chore: code format (#72)

* chore: code format

* chore: remove fetch-depth

* chore: add format and lint

* chore: add pr_check

* fix: lint with config

* chore: this pr only unit test

* fix: code format error
This commit is contained in:
Hardy 2025-01-14 14:29:31 +08:00 committed by GitHub
parent fb4dafecb3
commit 8da176bea8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
88 changed files with 3497 additions and 3365 deletions

307
.github/workflows/pr_check.yml vendored Normal file
View File

@ -0,0 +1,307 @@
name: Unit Test
on:
pull_request:
branches: [ "main" ]
defaults:
run:
shell: bash
env:
GO_VERSION: 1.23.4
NODEJS_VERSION: 16.20.2
PNAME: console
jobs:
format_check:
runs-on: ubuntu-latest
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
path: ${{ env.PNAME }}
- name: Checkout framework repository
uses: actions/checkout@v4
with:
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Code format
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/console
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for code format
cd $WORK
echo Formating code at $PWD ...
make format
if [ $? -ne 0 ]; then
echo "make format failed, please check make output"
exit 1
fi
- name: Check for changes after format
id: check-changes
shell: bash
run: |
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/$PNAME
# for foramt check
cd $WORK
if [[ $(git status --porcelain | grep -c " M .*\.go$") -gt 0 ]]; then
echo "go format detected formatting changes"
echo "changes=true" >> $GITHUB_OUTPUT
else
echo "go format no changes found"
echo "changes=false" >> $GITHUB_OUTPUT
fi
- name: Fail workflow if changes after format
if: steps.check-changes.outputs.changes == 'true'
run: exit 1
unit_test:
runs-on: ubuntu-latest
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
path: ${{ env.PNAME }}
- name: Checkout framework repository
uses: actions/checkout@v4
with:
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Unit test
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/$PNAME
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for unit test
cd $WORK
echo Testing code at $PWD ...
make test
code_lint:
runs-on: ubuntu-latest
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
path: ${{ env.PNAME }}
- name: Checkout framework repository
uses: actions/checkout@v4
with:
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Code lint
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/$PNAME
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for code lint
cd $WORK
echo Testing code at $PWD ...
# make lint

View File

@ -1,105 +0,0 @@
name: Unit Test
on:
pull_request:
branches: [ "main" ]
defaults:
run:
shell: bash
jobs:
build:
runs-on: ubuntu-latest
env:
GO_VERSION: 1.23.4
NODEJS_VERSION: 16.20.2
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
fetch-depth: 0
path: console
- name: Checkout framework repository
uses: actions/checkout@v4
with:
fetch-depth: 0
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
fetch-depth: 0
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Unit test
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/console
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for unit test
cd $WORK
echo Testing code at $PWD ...
make test

2
.gitignore vendored
View File

@ -32,5 +32,7 @@ appveyor.yml
log/ log/
.env .env
generated_*.go generated_*.go
config/generated.go
config/generat*.go
config/initialization.dsl config/initialization.dsl
config/system_config.yml config/system_config.yml

View File

@ -46,7 +46,7 @@ func GetMapStringValue(m util.MapStr, key string) string {
func MapLabel(labelName, indexName, keyField, valueField string, client elastic.API, cacheLabels map[string]string) string { func MapLabel(labelName, indexName, keyField, valueField string, client elastic.API, cacheLabels map[string]string) string {
if len(cacheLabels) > 0 { if len(cacheLabels) > 0 {
if v, ok := cacheLabels[labelName]; ok{ if v, ok := cacheLabels[labelName]; ok {
return v return v
} }
} }
@ -58,7 +58,7 @@ func MapLabel(labelName, indexName, keyField, valueField string, client elastic.
return labelMaps[labelName] return labelMaps[labelName]
} }
func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, keyFieldValues []string, cacheSize int) (map[string]string, error){ func GetLabelMaps(indexName, keyField, valueField string, client elastic.API, keyFieldValues []string, cacheSize int) (map[string]string, error) {
if client == nil { if client == nil {
return nil, fmt.Errorf("cluster client must not be empty") return nil, fmt.Errorf("cluster client must not be empty")
} }
@ -89,7 +89,7 @@ func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, k
var key string var key string
if keyField == "_id" { if keyField == "_id" {
key = hit.ID key = hit.ID
}else{ } else {
key = GetMapStringValue(sourceM, keyField) key = GetMapStringValue(sourceM, keyField)
} }
if key != "" { if key != "" {
@ -99,7 +99,7 @@ func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, k
return labelMaps, nil return labelMaps, nil
} }
func ExecuteTemplate( tpl *template.Template, ctx map[string]interface{}) ([]byte, error){ func ExecuteTemplate(tpl *template.Template, ctx map[string]interface{}) ([]byte, error) {
msgBuffer := &bytes.Buffer{} msgBuffer := &bytes.Buffer{}
err := tpl.Execute(msgBuffer, ctx) err := tpl.Execute(msgBuffer, ctx)
return msgBuffer.Bytes(), err return msgBuffer.Bytes(), err

View File

@ -1,10 +0,0 @@
package config
const LastCommitLog = "N/A"
const BuildDate = "N/A"
const EOLDate = "N/A"
const Version = "0.0.1-SNAPSHOT"
const BuildNumber = "001"

View File

@ -29,24 +29,24 @@ import (
) )
const ( const (
ErrTypeRequestParams = "request_params_error" ErrTypeRequestParams = "request_params_error"
ErrTypeApplication = "application_error" ErrTypeApplication = "application_error"
ErrTypeAlreadyExists = "already_exists_error" ErrTypeAlreadyExists = "already_exists_error"
ErrTypeNotExists = "not_exists_error" ErrTypeNotExists = "not_exists_error"
ErrTypeIncorrectPassword = "incorrect_password_error" ErrTypeIncorrectPassword = "incorrect_password_error"
ErrTypeDomainPrefixMismatch = "domain_prefix_mismatch_error" ErrTypeDomainPrefixMismatch = "domain_prefix_mismatch_error"
ErrTypeDisabled = "disabled_error" ErrTypeDisabled = "disabled_error"
ErrTypeRequestTimeout = "request_timeout_error" ErrTypeRequestTimeout = "request_timeout_error"
) )
var ( var (
ErrPasswordIncorrect = errors.New("incorrect password") ErrPasswordIncorrect = errors.New("incorrect password")
ErrNotExistsErr = errors.New("not exists") ErrNotExistsErr = errors.New("not exists")
) )
type Error struct { type Error struct {
typ string typ string
msg interface{} msg interface{}
field string field string
} }
@ -54,22 +54,22 @@ func (err Error) Error() string {
return fmt.Sprintf("%s:%v: %v", err.typ, err.field, err.msg) return fmt.Sprintf("%s:%v: %v", err.typ, err.field, err.msg)
} }
//NewAppError returns an application error // NewAppError returns an application error
func NewAppError(msg any) *Error { func NewAppError(msg any) *Error {
return New(ErrTypeApplication, "", msg) return New(ErrTypeApplication, "", msg)
} }
//NewParamsError returns a request params error // NewParamsError returns a request params error
func NewParamsError(field string, msg any) *Error { func NewParamsError(field string, msg any) *Error {
return New(ErrTypeRequestParams, field, msg) return New(ErrTypeRequestParams, field, msg)
} }
//NewAlreadyExistsError returns an already exists error // NewAlreadyExistsError returns an already exists error
func NewAlreadyExistsError(field string, msg any) *Error { func NewAlreadyExistsError(field string, msg any) *Error {
return New(ErrTypeAlreadyExists, field, msg) return New(ErrTypeAlreadyExists, field, msg)
} }
//NewNotExistsError returns a not exists error // NewNotExistsError returns a not exists error
func NewNotExistsError(field string, msg any) *Error { func NewNotExistsError(field string, msg any) *Error {
return New(ErrTypeNotExists, field, msg) return New(ErrTypeNotExists, field, msg)
} }
@ -80,4 +80,4 @@ func New(typ string, field string, msg any) *Error {
msg, msg,
field, field,
} }
} }

View File

@ -144,8 +144,8 @@ const (
PermissionMigrationTaskWrite = "task:write" PermissionMigrationTaskWrite = "task:write"
PermissionComparisonTaskRead = "comparison_task:read" PermissionComparisonTaskRead = "comparison_task:read"
PermissionComparisonTaskWrite = "comparison_task:write" PermissionComparisonTaskWrite = "comparison_task:write"
PermissionSmtpServerRead = "smtp_server:read" PermissionSmtpServerRead = "smtp_server:read"
PermissionSmtpServerWrite = "smtp_server:write" PermissionSmtpServerWrite = "smtp_server:write"
) )
var ( var (
@ -221,8 +221,8 @@ var (
DashboardAllPermission = []string{PermissionLayoutRead, PermissionLayoutWrite} DashboardAllPermission = []string{PermissionLayoutRead, PermissionLayoutWrite}
WorkbenchReadPermission = []string{PermissionElasticsearchClusterRead, PermissionActivityRead, PermissionAlertMessageRead, PermissionElasticsearchMetricRead} WorkbenchReadPermission = []string{PermissionElasticsearchClusterRead, PermissionActivityRead, PermissionAlertMessageRead, PermissionElasticsearchMetricRead}
WorkbenchAllPermission = WorkbenchReadPermission WorkbenchAllPermission = WorkbenchReadPermission
SmtpServerReadPermission = []string{PermissionSmtpServerRead} SmtpServerReadPermission = []string{PermissionSmtpServerRead}
SmtpServerAllPermission = []string{PermissionSmtpServerRead, PermissionSmtpServerWrite} SmtpServerAllPermission = []string{PermissionSmtpServerRead, PermissionSmtpServerWrite}
) )
var AdminPrivilege = []string{ var AdminPrivilege = []string{
@ -304,8 +304,8 @@ func init() {
SubscriptionRead: SubscriptionReadPermission, SubscriptionRead: SubscriptionReadPermission,
SubscriptionAll: SubscriptionAllPermission, SubscriptionAll: SubscriptionAllPermission,
SmtpServerRead: SmtpServerReadPermission, SmtpServerRead: SmtpServerReadPermission,
SmtpServerAll: SmtpServerAllPermission, SmtpServerAll: SmtpServerAllPermission,
} }
} }

View File

@ -32,72 +32,72 @@ import (
) )
type Alert struct { type Alert struct {
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword }"` RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword }"`
RuleName string `json:"rule_name" elastic_mapping:"rule_name: { type: keyword }"` RuleName string `json:"rule_name" elastic_mapping:"rule_name: { type: keyword }"`
ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword }"` ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword }"`
ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword }"` ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword }"`
Expression string `json:"expression" elastic_mapping:"expression: { type: keyword, copy_to:search_text }"` Expression string `json:"expression" elastic_mapping:"expression: { type: keyword, copy_to:search_text }"`
Objects []string `json:"objects" elastic_mapping:"objects: { type:keyword,copy_to:search_text }"` Objects []string `json:"objects" elastic_mapping:"objects: { type:keyword,copy_to:search_text }"`
Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"` Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"`
Title string `json:"title" elastic_mapping:"title: { type: keyword }"` Title string `json:"title" elastic_mapping:"title: { type: keyword }"`
Message string `json:"message" elastic_mapping:"context: { type: keyword, copy_to:search_text }"` Message string `json:"message" elastic_mapping:"context: { type: keyword, copy_to:search_text }"`
AcknowledgedTime interface{} `json:"acknowledged_time,omitempty"` AcknowledgedTime interface{} `json:"acknowledged_time,omitempty"`
ActionExecutionResults []ActionExecutionResult `json:"action_execution_results,omitempty"` ActionExecutionResults []ActionExecutionResult `json:"action_execution_results,omitempty"`
RecoverActionResults []ActionExecutionResult `json:"recover_action_results,omitempty"` RecoverActionResults []ActionExecutionResult `json:"recover_action_results,omitempty"`
EscalationActionResults []ActionExecutionResult `json:"escalation_action_results,omitempty"` EscalationActionResults []ActionExecutionResult `json:"escalation_action_results,omitempty"`
Users []string `json:"users,omitempty"` Users []string `json:"users,omitempty"`
State string `json:"state"` State string `json:"state"`
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`
IsNotified bool `json:"is_notified" elastic_mapping:"is_notified: { type: boolean }"` //标识本次检测是否发送了告警通知 IsNotified bool `json:"is_notified" elastic_mapping:"is_notified: { type: boolean }"` //标识本次检测是否发送了告警通知
IsEscalated bool `json:"is_escalated" elastic_mapping:"is_escalated: { type: boolean }"` //标识本次检测是否发送了升级告警通知 IsEscalated bool `json:"is_escalated" elastic_mapping:"is_escalated: { type: boolean }"` //标识本次检测是否发送了升级告警通知
Conditions Condition `json:"condition"` Conditions Condition `json:"condition"`
ConditionResult *ConditionResult `json:"condition_result,omitempty" elastic_mapping:"condition_result: { type: object,enabled:false }"` ConditionResult *ConditionResult `json:"condition_result,omitempty" elastic_mapping:"condition_result: { type: object,enabled:false }"`
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
} }
type ActionExecutionResult struct { type ActionExecutionResult struct {
ExecutionTime int `json:"execution_time"` ExecutionTime int `json:"execution_time"`
Error string `json:"error"` Error string `json:"error"`
Result string `json:"result"` Result string `json:"result"`
Message string `json:"message"` Message string `json:"message"`
ChannelName string `json:"channel_name"` ChannelName string `json:"channel_name"`
ChannelType string `json:"channel_type"` ChannelType string `json:"channel_type"`
ChannelID string `json:"channel_id"` ChannelID string `json:"channel_id"`
} }
const ( const (
AlertStateAlerting string = "alerting" AlertStateAlerting string = "alerting"
AlertStateOK = "ok" AlertStateOK = "ok"
AlertStateError = "error" AlertStateError = "error"
AlertStateNodata = "nodata" AlertStateNodata = "nodata"
) )
const ( const (
MessageStateAlerting = "alerting" MessageStateAlerting = "alerting"
MessageStateIgnored = "ignored" MessageStateIgnored = "ignored"
MessageStateRecovered = "recovered" MessageStateRecovered = "recovered"
) )
type AlertMessage struct { type AlertMessage struct {
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword,copy_to:search_text }"` RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword,copy_to:search_text }"`
ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword,copy_to:search_text }"` ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword,copy_to:search_text }"`
ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword,copy_to:search_text }"` ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword,copy_to:search_text }"`
Title string `json:"title" elastic_mapping:"title: { type: keyword,copy_to:search_text }"` Title string `json:"title" elastic_mapping:"title: { type: keyword,copy_to:search_text }"`
Message string `json:"message" elastic_mapping:"content: { type: keyword,copy_to:search_text }"` Message string `json:"message" elastic_mapping:"content: { type: keyword,copy_to:search_text }"`
Status string `json:"status" elastic_mapping:"status: { type: keyword,copy_to:search_text }"` Status string `json:"status" elastic_mapping:"status: { type: keyword,copy_to:search_text }"`
IgnoredTime time.Time `json:"ignored_time,omitempty" elastic_mapping:"ignored_time: { type: date }"` IgnoredTime time.Time `json:"ignored_time,omitempty" elastic_mapping:"ignored_time: { type: date }"`
IgnoredReason string `json:"ignored_reason,omitempty" elastic_mapping:"ignored_reason: { type: keyword,copy_to:search_text }"` IgnoredReason string `json:"ignored_reason,omitempty" elastic_mapping:"ignored_reason: { type: keyword,copy_to:search_text }"`
IgnoredUser string `json:"ignored_user,omitempty" elastic_mapping:"ignored_user: { type: keyword,copy_to:search_text }"` IgnoredUser string `json:"ignored_user,omitempty" elastic_mapping:"ignored_user: { type: keyword,copy_to:search_text }"`
Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"` Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"`
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"` Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"`
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"` Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"`
} }
/* /*
@ -109,4 +109,4 @@ type AlertMessage struct {
*/ */
//message status (Active, Ignore, Recover) //message status (Active, Ignore, Recover)
//rule status (Active, Error, OK) //rule status (Active, Error, OK)

View File

@ -30,10 +30,11 @@ package alerting
import "fmt" import "fmt"
type Condition struct { type Condition struct {
Operator string `json:"operator"` Operator string `json:"operator"`
Items []ConditionItem `json:"items"` Items []ConditionItem `json:"items"`
} }
func (cond *Condition) GetMinimumPeriodMatch() int{
func (cond *Condition) GetMinimumPeriodMatch() int {
var minPeriodMatch = 0 var minPeriodMatch = 0
for _, citem := range cond.Items { for _, citem := range cond.Items {
if citem.MinimumPeriodMatch > minPeriodMatch { if citem.MinimumPeriodMatch > minPeriodMatch {
@ -45,14 +46,14 @@ func (cond *Condition) GetMinimumPeriodMatch() int{
type ConditionItem struct { type ConditionItem struct {
//MetricName string `json:"metric"` //MetricName string `json:"metric"`
MinimumPeriodMatch int `json:"minimum_period_match"` MinimumPeriodMatch int `json:"minimum_period_match"`
Operator string `json:"operator"` Operator string `json:"operator"`
Values []string `json:"values"` Values []string `json:"values"`
Priority string `json:"priority"` Priority string `json:"priority"`
Expression string `json:"expression,omitempty"` Expression string `json:"expression,omitempty"`
} }
func (cond *ConditionItem) GenerateConditionExpression()(conditionExpression string, err error){ func (cond *ConditionItem) GenerateConditionExpression() (conditionExpression string, err error) {
valueLength := len(cond.Values) valueLength := len(cond.Values)
if valueLength == 0 { if valueLength == 0 {
return conditionExpression, fmt.Errorf("condition values: %v should not be empty", cond.Values) return conditionExpression, fmt.Errorf("condition values: %v should not be empty", cond.Values)
@ -81,20 +82,20 @@ func (cond *ConditionItem) GenerateConditionExpression()(conditionExpression str
type ConditionResult struct { type ConditionResult struct {
ResultItems []ConditionResultItem `json:"result_items"` ResultItems []ConditionResultItem `json:"result_items"`
QueryResult *QueryResult `json:"query_result"` QueryResult *QueryResult `json:"query_result"`
} }
type ConditionResultItem struct { type ConditionResultItem struct {
GroupValues []string `json:"group_values"` GroupValues []string `json:"group_values"`
ConditionItem *ConditionItem `json:"condition_item"` ConditionItem *ConditionItem `json:"condition_item"`
IssueTimestamp interface{} `json:"issue_timestamp"` IssueTimestamp interface{} `json:"issue_timestamp"`
ResultValue interface{} `json:"result_value"` //满足条件最后一个值 ResultValue interface{} `json:"result_value"` //满足条件最后一个值
RelationValues map[string]interface{} `json:"relation_values"` RelationValues map[string]interface{} `json:"relation_values"`
} }
var PriorityWeights = map[string]int{ var PriorityWeights = map[string]int{
"info": 1, "info": 1,
"low": 2, "low": 2,
"medium": 3, "medium": 3,
"high": 4, "high": 4,
"critical": 5, "critical": 5,
} }

View File

@ -33,18 +33,17 @@ import (
type Channel struct { type Channel struct {
orm.ORMObjectBase orm.ORMObjectBase
Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"` Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"`
Type string `json:"type" elastic_mapping:"type:{type:keyword,copy_to:search_text}"` // email or webhook Type string `json:"type" elastic_mapping:"type:{type:keyword,copy_to:search_text}"` // email or webhook
Priority int `json:"priority,omitempty"` Priority int `json:"priority,omitempty"`
Webhook *CustomWebhook `json:"webhook,omitempty" elastic_mapping:"webhook:{type:object}"` Webhook *CustomWebhook `json:"webhook,omitempty" elastic_mapping:"webhook:{type:object}"`
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
SubType string `json:"sub_type" elastic_mapping:"sub_type:{type:keyword,copy_to:search_text}"` SubType string `json:"sub_type" elastic_mapping:"sub_type:{type:keyword,copy_to:search_text}"`
Email *Email `json:"email,omitempty" elastic_mapping:"email:{type:object}"` Email *Email `json:"email,omitempty" elastic_mapping:"email:{type:object}"`
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"` Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
} }
const ( const (
ChannelEmail = "email" ChannelEmail = "email"
ChannelWebhook = "webhook" ChannelWebhook = "webhook"
) )

View File

@ -29,11 +29,11 @@ package alerting
type Filter struct { type Filter struct {
And []FilterQuery `json:"and,omitempty"` And []FilterQuery `json:"and,omitempty"`
Or []FilterQuery `json:"or,omitempty"` Or []FilterQuery `json:"or,omitempty"`
Not []FilterQuery `json:"not,omitempty"` Not []FilterQuery `json:"not,omitempty"`
//MinimumShouldMatch int `json:"minimum_should_match"` //MinimumShouldMatch int `json:"minimum_should_match"`
} }
func (f Filter) IsEmpty() bool { func (f Filter) IsEmpty() bool {
return len(f.And) == 0 && len(f.Or) == 0 && len(f.Not) == 0 return len(f.And) == 0 && len(f.Or) == 0 && len(f.Not) == 0
} }

View File

@ -28,12 +28,12 @@
package alerting package alerting
type FilterQuery struct { type FilterQuery struct {
Field string `json:"field,omitempty"` Field string `json:"field,omitempty"`
Operator string `json:"operator,omitempty"` Operator string `json:"operator,omitempty"`
Values []string `json:"values,omitempty"` Values []string `json:"values,omitempty"`
And []FilterQuery `json:"and,omitempty"` And []FilterQuery `json:"and,omitempty"`
Or []FilterQuery `json:"or,omitempty"` Or []FilterQuery `json:"or,omitempty"`
Not []FilterQuery `json:"not,omitempty"` Not []FilterQuery `json:"not,omitempty"`
} }
func (fq FilterQuery) IsComplex() bool { func (fq FilterQuery) IsComplex() bool {
@ -42,4 +42,4 @@ func (fq FilterQuery) IsComplex() bool {
func (f FilterQuery) IsEmpty() bool { func (f FilterQuery) IsEmpty() bool {
return !f.IsComplex() && f.Operator == "" return !f.IsComplex() && f.Operator == ""
} }

View File

@ -36,13 +36,12 @@ import (
type Metric struct { type Metric struct {
insight.Metric insight.Metric
Title string `json:"title,omitempty"` //text template Title string `json:"title,omitempty"` //text template
Message string `json:"message,omitempty"` // text template Message string `json:"message,omitempty"` // text template
Expression string `json:"expression,omitempty" elastic_mapping:"expression:{type:keyword,copy_to:search_text}"` //告警表达式,自动生成 eg: avg(cpu) > 80 Expression string `json:"expression,omitempty" elastic_mapping:"expression:{type:keyword,copy_to:search_text}"` //告警表达式,自动生成 eg: avg(cpu) > 80
} }
func (m *Metric) GenerateExpression() (string, error) {
func (m *Metric) GenerateExpression() (string, error){
if len(m.Items) == 1 { if len(m.Items) == 1 {
return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil
} }
@ -50,12 +49,12 @@ func (m *Metric) GenerateExpression() (string, error){
return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items)) return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items))
} }
var ( var (
expressionBytes = []byte(m.Formula) expressionBytes = []byte(m.Formula)
metricExpression string metricExpression string
) )
for _, item := range m.Items { for _, item := range m.Items {
metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field) metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field)
reg, err := regexp.Compile(item.Name+`([^\w]|$)`) reg, err := regexp.Compile(item.Name + `([^\w]|$)`)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -66,23 +65,23 @@ func (m *Metric) GenerateExpression() (string, error){
} }
type MetricItem struct { type MetricItem struct {
Name string `json:"name"` Name string `json:"name"`
Field string `json:"field"` Field string `json:"field"`
Statistic string `json:"statistic"` Statistic string `json:"statistic"`
} }
type QueryResult struct { type QueryResult struct {
Query string `json:"query"` Query string `json:"query"`
Raw string `json:"raw"` Raw string `json:"raw"`
MetricData []MetricData `json:"metric_data"` MetricData []MetricData `json:"metric_data"`
Nodata bool `json:"nodata"` Nodata bool `json:"nodata"`
Min interface{} `json:"-"` Min interface{} `json:"-"`
Max interface{} `json:"-"` Max interface{} `json:"-"`
} }
type MetricData struct { type MetricData struct {
GroupValues []string `json:"group_values"` GroupValues []string `json:"group_values"`
Data map[string][]TimeMetricData `json:"data"` Data map[string][]TimeMetricData `json:"data"`
} }
type TimeMetricData []interface{} type TimeMetricData []interface{}

View File

@ -32,20 +32,19 @@ import (
) )
type Resource struct { type Resource struct {
ID string `json:"resource_id" elastic_mapping:"resource_id:{type:keyword}"` ID string `json:"resource_id" elastic_mapping:"resource_id:{type:keyword}"`
Name string `json:"resource_name" elastic_mapping:"resource_name:{type:keyword}"` Name string `json:"resource_name" elastic_mapping:"resource_name:{type:keyword}"`
Type string `json:"type" elastic_mapping:"type:{type:keyword}"` Type string `json:"type" elastic_mapping:"type:{type:keyword}"`
Objects []string `json:"objects" elastic_mapping:"objects:{type:keyword,copy_to:search_text}"` Objects []string `json:"objects" elastic_mapping:"objects:{type:keyword,copy_to:search_text}"`
Filter FilterQuery `json:"filter,omitempty" elastic_mapping:"-"` Filter FilterQuery `json:"filter,omitempty" elastic_mapping:"-"`
RawFilter map[string]interface{} `json:"raw_filter,omitempty"` RawFilter map[string]interface{} `json:"raw_filter,omitempty"`
TimeField string `json:"time_field,omitempty" elastic_mapping:"id:{type:keyword}"` TimeField string `json:"time_field,omitempty" elastic_mapping:"id:{type:keyword}"`
Context Context `json:"context"` Context Context `json:"context"`
} }
func (r Resource) Validate() error{ func (r Resource) Validate() error {
if r.TimeField == "" { if r.TimeField == "" {
return fmt.Errorf("TimeField can not be empty") return fmt.Errorf("TimeField can not be empty")
} }
return nil return nil
} }

View File

@ -33,33 +33,33 @@ import (
) )
type Rule struct { type Rule struct {
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"` Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"`
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:keyword}"` Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:keyword}"`
Resource Resource `json:"resource" elastic_mapping:"resource:{type:object}"` Resource Resource `json:"resource" elastic_mapping:"resource:{type:object}"`
Metrics Metric `json:"metrics" elastic_mapping:"metrics:{type:object}"` Metrics Metric `json:"metrics" elastic_mapping:"metrics:{type:object}"`
Conditions Condition `json:"conditions" elastic_mapping:"conditions:{type:object}"` Conditions Condition `json:"conditions" elastic_mapping:"conditions:{type:object}"`
Channels *NotificationConfig `json:"channels,omitempty" elastic_mapping:"channels:{type:object}"` Channels *NotificationConfig `json:"channels,omitempty" elastic_mapping:"channels:{type:object}"`
NotificationConfig *NotificationConfig `json:"notification_config,omitempty" elastic_mapping:"notification_config:{type:object}"` NotificationConfig *NotificationConfig `json:"notification_config,omitempty" elastic_mapping:"notification_config:{type:object}"`
RecoveryNotificationConfig *RecoveryNotificationConfig `json:"recovery_notification_config,omitempty" elastic_mapping:"recovery_notification_config:{type:object}"` RecoveryNotificationConfig *RecoveryNotificationConfig `json:"recovery_notification_config,omitempty" elastic_mapping:"recovery_notification_config:{type:object}"`
Schedule Schedule `json:"schedule" elastic_mapping:"schedule:{type:object}"` Schedule Schedule `json:"schedule" elastic_mapping:"schedule:{type:object}"`
LastNotificationTime time.Time `json:"-" elastic_mapping:"last_notification_time:{type:date}"` LastNotificationTime time.Time `json:"-" elastic_mapping:"last_notification_time:{type:date}"`
LastTermStartTime time.Time `json:"-"` //标识最近一轮告警的开始时间 LastTermStartTime time.Time `json:"-"` //标识最近一轮告警的开始时间
LastEscalationTime time.Time `json:"-"` //标识最近一次告警升级发送通知的时间 LastEscalationTime time.Time `json:"-"` //标识最近一次告警升级发送通知的时间
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"` SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
Expression string `json:"-"` Expression string `json:"-"`
Creator struct { Creator struct {
Name string `json:"name" elastic_mapping:"name: { type: keyword }"` Name string `json:"name" elastic_mapping:"name: { type: keyword }"`
Id string `json:"id" elastic_mapping:"id: { type: keyword }"` Id string `json:"id" elastic_mapping:"id: { type: keyword }"`
} `json:"creator" elastic_mapping:"creator:{type:object}"` } `json:"creator" elastic_mapping:"creator:{type:object}"`
Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"` Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"`
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"` Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"`
} }
func (rule *Rule) GetOrInitExpression() (string, error){ func (rule *Rule) GetOrInitExpression() (string, error) {
if rule.Expression != ""{ if rule.Expression != "" {
return rule.Expression, nil return rule.Expression, nil
} }
sb := strings.Builder{} sb := strings.Builder{}
@ -81,7 +81,8 @@ func (rule *Rule) GetOrInitExpression() (string, error){
rule.Expression = strings.ReplaceAll(sb.String(), "result", metricExp) rule.Expression = strings.ReplaceAll(sb.String(), "result", metricExp)
return rule.Expression, nil return rule.Expression, nil
} }
//GetNotificationConfig for adapter old version config
// GetNotificationConfig for adapter old version config
func (rule *Rule) GetNotificationConfig() *NotificationConfig { func (rule *Rule) GetNotificationConfig() *NotificationConfig {
if rule.NotificationConfig != nil { if rule.NotificationConfig != nil {
return rule.NotificationConfig return rule.NotificationConfig
@ -96,37 +97,37 @@ func (rule *Rule) GetNotificationTitleAndMessage() (string, string) {
} }
type NotificationConfig struct { type NotificationConfig struct {
Enabled bool `json:"enabled"` Enabled bool `json:"enabled"`
Title string `json:"title,omitempty"` //text template Title string `json:"title,omitempty"` //text template
Message string `json:"message,omitempty"` // text template Message string `json:"message,omitempty"` // text template
Normal []Channel `json:"normal,omitempty"` Normal []Channel `json:"normal,omitempty"`
Escalation []Channel `json:"escalation,omitempty"` Escalation []Channel `json:"escalation,omitempty"`
ThrottlePeriod string `json:"throttle_period,omitempty"` //沉默周期 ThrottlePeriod string `json:"throttle_period,omitempty"` //沉默周期
AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"` AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"`
EscalationThrottlePeriod string `json:"escalation_throttle_period,omitempty"` EscalationThrottlePeriod string `json:"escalation_throttle_period,omitempty"`
EscalationEnabled bool `json:"escalation_enabled,omitempty"` EscalationEnabled bool `json:"escalation_enabled,omitempty"`
} }
type RecoveryNotificationConfig struct { type RecoveryNotificationConfig struct {
Enabled bool `json:"enabled"` // channel enabled Enabled bool `json:"enabled"` // channel enabled
Title string `json:"title"` //text template Title string `json:"title"` //text template
Message string `json:"message"` // text template Message string `json:"message"` // text template
AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"` AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"`
Normal []Channel `json:"normal,omitempty"` Normal []Channel `json:"normal,omitempty"`
EventEnabled bool `json:"event_enabled"` EventEnabled bool `json:"event_enabled"`
} }
type MessageTemplate struct{ type MessageTemplate struct {
Type string `json:"type"` Type string `json:"type"`
Source string `json:"source"` Source string `json:"source"`
} }
type TimeRange struct { type TimeRange struct {
Start string `json:"start"` Start string `json:"start"`
End string `json:"end"` End string `json:"end"`
} }
func (tr *TimeRange) Include( t time.Time) bool { func (tr *TimeRange) Include(t time.Time) bool {
if tr.Start == "" || tr.End == "" { if tr.Start == "" || tr.End == "" {
return true return true
} }
@ -135,10 +136,11 @@ func (tr *TimeRange) Include( t time.Time) bool {
} }
type FilterParam struct { type FilterParam struct {
Start interface{} `json:"start"` Start interface{} `json:"start"`
End interface{} `json:"end"` End interface{} `json:"end"`
BucketSize string `json:"bucket_size"` BucketSize string `json:"bucket_size"`
} }
//ctx //ctx
//rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values], //rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
//check_status ,timestamp, //check_status ,timestamp,

View File

@ -36,7 +36,7 @@ import (
"time" "time"
) )
func TestCreateRule( t *testing.T) { func TestCreateRule(t *testing.T) {
rule := Rule{ rule := Rule{
//ORMObjectBase: orm.ORMObjectBase{ //ORMObjectBase: orm.ORMObjectBase{
// ID: util.GetUUID(), // ID: util.GetUUID(),
@ -45,9 +45,9 @@ func TestCreateRule( t *testing.T) {
//}, //},
Enabled: true, Enabled: true,
Resource: Resource{ Resource: Resource{
ID: "c8i18llath2blrusdjng", ID: "c8i18llath2blrusdjng",
Type: "elasticsearch", Type: "elasticsearch",
Objects: []string{".infini_metrics*"}, Objects: []string{".infini_metrics*"},
TimeField: "timestamp", TimeField: "timestamp",
Filter: FilterQuery{ Filter: FilterQuery{
And: []FilterQuery{ And: []FilterQuery{
@ -69,68 +69,68 @@ func TestCreateRule( t *testing.T) {
}, },
}, },
}, },
//Metrics: Metric{ //Metrics: Metric{
// PeriodInterval: "1m", // PeriodInterval: "1m",
// MaxPeriods: 15, // MaxPeriods: 15,
// Items: []MetricItem{ // Items: []MetricItem{
// {Name: "red_health", Field: "*", Statistic: "count", Group: []string{"metadata.labels.cluster_id"}}, // {Name: "red_health", Field: "*", Statistic: "count", Group: []string{"metadata.labels.cluster_id"}},
// }, // },
//}, //},
//Conditions: Condition{ //Conditions: Condition{
// Operator: "any", // Operator: "any",
// Items: []ConditionItem{ // Items: []ConditionItem{
// { MinimumPeriodMatch: 1, Operator: "gte", Values: []string{"1"}, Priority: "error", AlertMessage: "集群健康状态为 Red"}, // { MinimumPeriodMatch: 1, Operator: "gte", Values: []string{"1"}, Priority: "error", AlertMessage: "集群健康状态为 Red"},
// }, // },
//}, //},
Metrics: Metric{ Metrics: Metric{
Metric: insight.Metric{ Metric: insight.Metric{
Groups: []insight.MetricGroupItem{{"metadata.labels.cluster_id", 10}, {"metadata.labels.node_id", 10}}, Groups: []insight.MetricGroupItem{{"metadata.labels.cluster_id", 10}, {"metadata.labels.node_id", 10}},
Items: []insight.MetricItem{ Items: []insight.MetricItem{
{Name: "a", Field: "payload.elasticsearch.node_stats.fs.total.free_in_bytes", Statistic: "min" }, {Name: "a", Field: "payload.elasticsearch.node_stats.fs.total.free_in_bytes", Statistic: "min"},
{Name: "b", Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", Statistic: "max"}, {Name: "b", Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", Statistic: "max"},
},
BucketSize: "1m",
Formula: "a/b*100",
},
//Expression: "min(fs.free_in_bytes)/max(fs.total_in_bytes)*100",
}, },
Conditions: Condition{ BucketSize: "1m",
Operator: "any", Formula: "a/b*100",
Items: []ConditionItem{
{MinimumPeriodMatch: 1, Operator: "lte", Values: []string{"76"}, Priority: "error"},
},
},
Channels: &NotificationConfig{
Normal: []Channel{
{Name: "钉钉", Type: ChannelWebhook, Webhook: &CustomWebhook{
HeaderParams: map[string]string{
"Message-Type": "application/json",
},
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
Method: http.MethodPost,
URL: "https://oapi.dingtalk.com/robot/send?access_token=XXXXXX",
}},
},
Escalation: []Channel{
{Type: ChannelWebhook, Name: "微信", Webhook: &CustomWebhook{
HeaderParams: map[string]string{
"Message-Type": "application/json",
},
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
Method: http.MethodPost,
URL: "https://oapi.weixin.com/robot/send?access_token=6a5c7c9454ff74537a6de493153b1da68860942d4b0aeb33797cb68b5111b077",
}},
},
ThrottlePeriod: "1h",
AcceptTimeRange: TimeRange{
Start: "8:00",
End: "21:00",
},
EscalationEnabled: false,
EscalationThrottlePeriod: "30m",
}, },
//Expression: "min(fs.free_in_bytes)/max(fs.total_in_bytes)*100",
},
Conditions: Condition{
Operator: "any",
Items: []ConditionItem{
{MinimumPeriodMatch: 1, Operator: "lte", Values: []string{"76"}, Priority: "error"},
},
},
Channels: &NotificationConfig{
Normal: []Channel{
{Name: "钉钉", Type: ChannelWebhook, Webhook: &CustomWebhook{
HeaderParams: map[string]string{
"Message-Type": "application/json",
},
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
Method: http.MethodPost,
URL: "https://oapi.dingtalk.com/robot/send?access_token=XXXXXX",
}},
},
Escalation: []Channel{
{Type: ChannelWebhook, Name: "微信", Webhook: &CustomWebhook{
HeaderParams: map[string]string{
"Message-Type": "application/json",
},
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
Method: http.MethodPost,
URL: "https://oapi.weixin.com/robot/send?access_token=6a5c7c9454ff74537a6de493153b1da68860942d4b0aeb33797cb68b5111b077",
}},
},
ThrottlePeriod: "1h",
AcceptTimeRange: TimeRange{
Start: "8:00",
End: "21:00",
},
EscalationEnabled: false,
EscalationThrottlePeriod: "30m",
},
} }
//err := rule.Metrics.GenerateExpression() //err := rule.Metrics.GenerateExpression()
//if err != nil { //if err != nil {
@ -145,15 +145,12 @@ func TestCreateRule( t *testing.T) {
fmt.Println(exp) fmt.Println(exp)
} }
func TestTimeRange_Include(t *testing.T) {
func TestTimeRange_Include( t *testing.T) {
tr := TimeRange{ tr := TimeRange{
Start: "08:00", Start: "08:00",
End: "18:31", End: "18:31",
} }
fmt.Println(tr.Include(time.Now())) fmt.Println(tr.Include(time.Now()))
ti,_ := time.Parse(time.RFC3339, "2022-04-11T10:31:38.911000504Z") ti, _ := time.Parse(time.RFC3339, "2022-04-11T10:31:38.911000504Z")
fmt.Println(time.Now().Sub(ti)) fmt.Println(time.Now().Sub(ti))
} }

View File

@ -28,14 +28,11 @@
package alerting package alerting
type Schedule struct { type Schedule struct {
Cron *Cron `json:"cron,omitempty" elastic_mapping:"cron:{type:object}"` Cron *Cron `json:"cron,omitempty" elastic_mapping:"cron:{type:object}"`
Interval string `json:"interval,omitempty" elastic_mapping:"interval:{type:keyword}"` Interval string `json:"interval,omitempty" elastic_mapping:"interval:{type:keyword}"`
} }
type Cron struct { type Cron struct {
Expression string `json:"expression" elastic_mapping:"expression:{type:text}"` Expression string `json:"expression" elastic_mapping:"expression:{type:text}"`
Timezone string `json:"timezone" elastic_mapping:"timezone:{type:keyword}"` Timezone string `json:"timezone" elastic_mapping:"timezone:{type:keyword}"`
} }

View File

@ -29,19 +29,19 @@ package alerting
type CustomWebhook struct { type CustomWebhook struct {
HeaderParams map[string]string `json:"header_params,omitempty" elastic_mapping:"header_params:{type:object,enabled:false}"` HeaderParams map[string]string `json:"header_params,omitempty" elastic_mapping:"header_params:{type:object,enabled:false}"`
Method string `json:"method" elastic_mapping:"method:{type:keyword}"` Method string `json:"method" elastic_mapping:"method:{type:keyword}"`
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Body string `json:"body" elastic_mapping:"body:{type:text}"` Body string `json:"body" elastic_mapping:"body:{type:text}"`
} }
type Email struct { type Email struct {
ServerID string `json:"server_id" elastic_mapping:"server_id:{type:keyword}"` ServerID string `json:"server_id" elastic_mapping:"server_id:{type:keyword}"`
Recipients struct { Recipients struct {
To []string `json:"to,omitempty" elastic_mapping:"to:{type:keyword}"` To []string `json:"to,omitempty" elastic_mapping:"to:{type:keyword}"`
CC []string `json:"cc,omitempty" elastic_mapping:"cc:{type:keyword}"` CC []string `json:"cc,omitempty" elastic_mapping:"cc:{type:keyword}"`
BCC []string `json:"bcc,omitempty" elastic_mapping:"bcc:{type:keyword}"` BCC []string `json:"bcc,omitempty" elastic_mapping:"bcc:{type:keyword}"`
} `json:"recipients" elastic_mapping:"recipients:{type:object}"` } `json:"recipients" elastic_mapping:"recipients:{type:object}"`
Subject string `json:"subject" elastic_mapping:"subject:{type:text}"` Subject string `json:"subject" elastic_mapping:"subject:{type:text}"`
Body string `json:"body" elastic_mapping:"body:{type:text}"` Body string `json:"body" elastic_mapping:"body:{type:text}"`
ContentType string `json:"content_type" elastic_mapping:"content_type:{type:keyword}"` ContentType string `json:"content_type" elastic_mapping:"content_type:{type:keyword}"`
} }

View File

@ -35,13 +35,13 @@ import (
type EmailServer struct { type EmailServer struct {
orm.ORMObjectBase orm.ORMObjectBase
Name string `json:"name" elastic_mapping:"name:{type:text}"` Name string `json:"name" elastic_mapping:"name:{type:text}"`
Host string `json:"host" elastic_mapping:"host:{type:keyword}"` Host string `json:"host" elastic_mapping:"host:{type:keyword}"`
Port int `json:"port" elastic_mapping:"port:{type:keyword}"` Port int `json:"port" elastic_mapping:"port:{type:keyword}"`
TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"` TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"`
Auth *model.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"` Auth *model.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"`
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"` Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"` CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"`
} }
func (serv *EmailServer) Validate(requireName bool) error { func (serv *EmailServer) Validate(requireName bool) error {
@ -55,4 +55,4 @@ func (serv *EmailServer) Validate(requireName bool) error {
return fmt.Errorf("name can not be empty") return fmt.Errorf("name can not be empty")
} }
return nil return nil
} }

View File

@ -30,19 +30,19 @@ package insight
import "time" import "time"
type Dashboard struct { type Dashboard struct {
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
ClusterId string `json:"cluster_id" elastic_mapping:"cluster_id: { type: keyword }"` ClusterId string `json:"cluster_id" elastic_mapping:"cluster_id: { type: keyword }"`
IndexPattern string `json:"index_pattern" elastic_mapping:"index_pattern: { type: keyword }"` IndexPattern string `json:"index_pattern" elastic_mapping:"index_pattern: { type: keyword }"`
TimeField string `json:"time_field,omitempty" elastic_mapping:"time_field: { type: keyword }"` TimeField string `json:"time_field,omitempty" elastic_mapping:"time_field: { type: keyword }"`
Filter interface{} `json:"filter,omitempty" elastic_mapping:"filter: { type: object, enabled:false }"` Filter interface{} `json:"filter,omitempty" elastic_mapping:"filter: { type: object, enabled:false }"`
BucketSize string `json:"bucket_size" elastic_mapping:"bucket_size: { type: keyword }"` BucketSize string `json:"bucket_size" elastic_mapping:"bucket_size: { type: keyword }"`
Title string `json:"title" elastic_mapping:"title: { type: keyword }"` Title string `json:"title" elastic_mapping:"title: { type: keyword }"`
Description string `json:"description" elastic_mapping:"description: { type: keyword }"` Description string `json:"description" elastic_mapping:"description: { type: keyword }"`
Visualizations interface{} `json:"visualizations" elastic_mapping:"visualizations: { type: object, enabled:false }"` Visualizations interface{} `json:"visualizations" elastic_mapping:"visualizations: { type: object, enabled:false }"`
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword }"` Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword }"`
User string `json:"user" elastic_mapping:"user: { type: keyword }"` User string `json:"user" elastic_mapping:"user: { type: keyword }"`
Query interface{} `json:"query,omitempty" elastic_mapping:"query: { type: object, enabled:false }"` Query interface{} `json:"query,omitempty" elastic_mapping:"query: { type: object, enabled:false }"`
TimeFilter interface{} `json:"time_filter,omitempty" elastic_mapping:"time_filter: { type: object, enabled:false }"` TimeFilter interface{} `json:"time_filter,omitempty" elastic_mapping:"time_filter: { type: object, enabled:false }"`
} }

View File

@ -27,9 +27,8 @@
package insight package insight
type SeriesItem struct { type SeriesItem struct {
Type string `json:"type"` Type string `json:"type"`
Options map[string]interface{} `json:"options"` Options map[string]interface{} `json:"options"`
Metric Metric `json:"metric"` Metric Metric `json:"metric"`
} }

View File

@ -29,39 +29,40 @@ package insight
import ( import (
"fmt" "fmt"
"regexp"
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
"regexp"
) )
type Metric struct { type Metric struct {
AggTypes []string `json:"agg_types,omitempty"` AggTypes []string `json:"agg_types,omitempty"`
IndexPattern string `json:"index_pattern,omitempty"` IndexPattern string `json:"index_pattern,omitempty"`
TimeField string `json:"time_field,omitempty"` TimeField string `json:"time_field,omitempty"`
BucketSize string `json:"bucket_size,omitempty"` BucketSize string `json:"bucket_size,omitempty"`
Filter interface{} `json:"filter,omitempty"` Filter interface{} `json:"filter,omitempty"`
Groups []MetricGroupItem `json:"groups,omitempty"` //bucket group Groups []MetricGroupItem `json:"groups,omitempty"` //bucket group
Sort []GroupSort `json:"sort,omitempty"` Sort []GroupSort `json:"sort,omitempty"`
ClusterId string `json:"cluster_id,omitempty"` ClusterId string `json:"cluster_id,omitempty"`
Formula string `json:"formula,omitempty"` Formula string `json:"formula,omitempty"`
//array of formula for new version //array of formula for new version
Formulas []string `json:"formulas,omitempty"` Formulas []string `json:"formulas,omitempty"`
Items []MetricItem `json:"items"` Items []MetricItem `json:"items"`
FormatType string `json:"format,omitempty"` FormatType string `json:"format,omitempty"`
TimeFilter interface{} `json:"time_filter,omitempty"` TimeFilter interface{} `json:"time_filter,omitempty"`
TimeBeforeGroup bool `json:"time_before_group,omitempty"` TimeBeforeGroup bool `json:"time_before_group,omitempty"`
BucketLabel *BucketLabel `json:"bucket_label,omitempty"` BucketLabel *BucketLabel `json:"bucket_label,omitempty"`
// number of buckets to return, used for aggregation auto_date_histogram when bucket size equals 'auto' // number of buckets to return, used for aggregation auto_date_histogram when bucket size equals 'auto'
Buckets uint `json:"buckets,omitempty"` Buckets uint `json:"buckets,omitempty"`
Unit string `json:"unit,omitempty"` Unit string `json:"unit,omitempty"`
} }
type MetricBase struct { type MetricBase struct {
orm.ORMObjectBase orm.ORMObjectBase
//display name of the metric //display name of the metric
Name string `json:"name"` Name string `json:"name"`
//metric identifier //metric identifier
Key string `json:"key"` Key string `json:"key"`
//optional values : "node", "indices", "shard" //optional values : "node", "indices", "shard"
Level string `json:"level"` Level string `json:"level"`
//metric calculation formula //metric calculation formula
@ -76,16 +77,16 @@ type MetricBase struct {
} }
type GroupSort struct { type GroupSort struct {
Key string `json:"key"` Key string `json:"key"`
Direction string `json:"direction"` Direction string `json:"direction"`
} }
type MetricGroupItem struct { type MetricGroupItem struct {
Field string `json:"field"` Field string `json:"field"`
Limit int `json:"limit"` Limit int `json:"limit"`
} }
func (m *Metric) GenerateExpression() (string, error){ func (m *Metric) GenerateExpression() (string, error) {
if len(m.Items) == 1 { if len(m.Items) == 1 {
return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil
} }
@ -93,12 +94,12 @@ func (m *Metric) GenerateExpression() (string, error){
return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items)) return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items))
} }
var ( var (
expressionBytes = []byte(m.Formula) expressionBytes = []byte(m.Formula)
metricExpression string metricExpression string
) )
for _, item := range m.Items { for _, item := range m.Items {
metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field) metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field)
reg, err := regexp.Compile(item.Name+`([^\w]|$)`) reg, err := regexp.Compile(item.Name + `([^\w]|$)`)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -127,10 +128,10 @@ func (m *Metric) ValidateSortKey() error {
mm[item.Name] = &item mm[item.Name] = &item
} }
for _, sortItem := range m.Sort { for _, sortItem := range m.Sort {
if !util.StringInArray([]string{"desc", "asc"}, sortItem.Direction){ if !util.StringInArray([]string{"desc", "asc"}, sortItem.Direction) {
return fmt.Errorf("unknown sort direction [%s]", sortItem.Direction) return fmt.Errorf("unknown sort direction [%s]", sortItem.Direction)
} }
if _, ok := mm[sortItem.Key]; !ok && !util.StringInArray([]string{"_key", "_count"}, sortItem.Key){ if _, ok := mm[sortItem.Key]; !ok && !util.StringInArray([]string{"_key", "_count"}, sortItem.Key) {
return fmt.Errorf("unknown sort key [%s]", sortItem.Key) return fmt.Errorf("unknown sort key [%s]", sortItem.Key)
} }
} }
@ -138,26 +139,26 @@ func (m *Metric) ValidateSortKey() error {
} }
type MetricItem struct { type MetricItem struct {
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`
Field string `json:"field"` Field string `json:"field"`
FieldType string `json:"field_type,omitempty"` FieldType string `json:"field_type,omitempty"`
Statistic string `json:"statistic,omitempty"` Statistic string `json:"statistic,omitempty"`
} }
type MetricDataItem struct { type MetricDataItem struct {
Timestamp interface{} `json:"timestamp,omitempty"` Timestamp interface{} `json:"timestamp,omitempty"`
Value interface{} `json:"value"` Value interface{} `json:"value"`
Groups []string `json:"groups,omitempty"` Groups []string `json:"groups,omitempty"`
GroupLabel string `json:"group_label,omitempty"` GroupLabel string `json:"group_label,omitempty"`
} }
type MetricData struct { type MetricData struct {
Groups []string `json:"groups,omitempty"` Groups []string `json:"groups,omitempty"`
Data map[string][]MetricDataItem Data map[string][]MetricDataItem
GroupLabel string `json:"group_label,omitempty"` GroupLabel string `json:"group_label,omitempty"`
} }
type BucketLabel struct { type BucketLabel struct {
Enabled bool `json:"enabled"` Enabled bool `json:"enabled"`
Template string `json:"template,omitempty"` Template string `json:"template,omitempty"`
} }

View File

@ -30,15 +30,15 @@ package insight
import "time" import "time"
type Visualization struct { type Visualization struct {
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"` ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
Created *time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"` Created *time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
Updated *time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"` Updated *time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
Title string `json:"title,omitempty" elastic_mapping:"title: { type: keyword }"` Title string `json:"title,omitempty" elastic_mapping:"title: { type: keyword }"`
IndexPattern string `json:"index_pattern,omitempty" elastic_mapping:"index_pattern: { type: keyword }"` IndexPattern string `json:"index_pattern,omitempty" elastic_mapping:"index_pattern: { type: keyword }"`
ClusterId string `json:"cluster_id,omitempty" elastic_mapping:"cluster_id: { type: keyword }"` ClusterId string `json:"cluster_id,omitempty" elastic_mapping:"cluster_id: { type: keyword }"`
Series []SeriesItem `json:"series" elastic_mapping:"series: { type: object,enabled:false }"` Series []SeriesItem `json:"series" elastic_mapping:"series: { type: object,enabled:false }"`
Position *Position `json:"position,omitempty" elastic_mapping:"position: { type: object,enabled:false }"` Position *Position `json:"position,omitempty" elastic_mapping:"position: { type: object,enabled:false }"`
Description string `json:"description,omitempty" elastic_mapping:"description: { type: keyword }"` Description string `json:"description,omitempty" elastic_mapping:"description: { type: keyword }"`
} }
type Position struct { type Position struct {

View File

@ -31,6 +31,6 @@ import "infini.sh/framework/core/orm"
type Widget struct { type Widget struct {
orm.ORMObjectBase orm.ORMObjectBase
Title string `json:"title" elastic_mapping:"title: { type: text }"` Title string `json:"title" elastic_mapping:"title: { type: text }"`
Config interface{}`json:"config" elastic_mapping:"config: { type: object,enabled:false }"` Config interface{} `json:"config" elastic_mapping:"config: { type: object,enabled:false }"`
} }

View File

@ -31,20 +31,21 @@ import "infini.sh/framework/core/orm"
type Layout struct { type Layout struct {
orm.ORMObjectBase orm.ORMObjectBase
Name string `json:"name" elastic_mapping:"name: { type: text }"` Name string `json:"name" elastic_mapping:"name: { type: text }"`
Description string `json:"description" elastic_mapping:"description: { type: text }"` Description string `json:"description" elastic_mapping:"description: { type: text }"`
Creator struct { Creator struct {
Name string `json:"name"` Name string `json:"name"`
Id string `json:"id"` Id string `json:"id"`
} `json:"creator"` } `json:"creator"`
ViewID string `json:"view_id" elastic_mapping:"view_id: { type: keyword }"` ViewID string `json:"view_id" elastic_mapping:"view_id: { type: keyword }"`
Config interface{} `json:"config" elastic_mapping:"config: { type: object, enabled:false }"` Config interface{} `json:"config" elastic_mapping:"config: { type: object, enabled:false }"`
Reserved bool `json:"reserved,omitempty" elastic_mapping:"reserved:{type:boolean}"` Reserved bool `json:"reserved,omitempty" elastic_mapping:"reserved:{type:boolean}"`
Type LayoutType `json:"type" elastic_mapping:"type: { type: keyword }"` Type LayoutType `json:"type" elastic_mapping:"type: { type: keyword }"`
IsFixed bool `json:"is_fixed" elastic_mapping:"is_fixed: { type: boolean }"` IsFixed bool `json:"is_fixed" elastic_mapping:"is_fixed: { type: boolean }"`
} }
type LayoutType string type LayoutType string
const ( const (
LayoutTypeWorkspace LayoutType = "workspace" LayoutTypeWorkspace LayoutType = "workspace"
) )

View File

@ -46,9 +46,9 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
HostName string `json:"host_name"` HostName string `json:"host_name"`
IP string `json:"ip"` IP string `json:"ip"`
Source string `json:"source"` Source string `json:"source"`
OSName string `json:"os_name"` OSName string `json:"os_name"`
OSArch string `json:"os_arch"` OSArch string `json:"os_arch"`
NodeID string `json:"node_uuid"` NodeID string `json:"node_uuid"`
} }
err := h.DecodeJSON(req, &reqBody) err := h.DecodeJSON(req, &reqBody)
if err != nil { if err != nil {
@ -84,7 +84,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
hostInfo = &host.HostInfo{ hostInfo = &host.HostInfo{
IP: hi.IP, IP: hi.IP,
OSInfo: host.OS{ OSInfo: host.OS{
Platform: hi.OSName, Platform: hi.OSName,
KernelArch: hi.OSArch, KernelArch: hi.OSArch,
}, },
NodeID: hi.NodeID, NodeID: hi.NodeID,
@ -97,7 +97,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
} }
hostInfo.Timestamp = time.Now() hostInfo.Timestamp = time.Now()
var ctx *orm.Context var ctx *orm.Context
if i == len(reqBody) - 1 { if i == len(reqBody)-1 {
ctx = &orm.Context{ ctx = &orm.Context{
Refresh: "wait_for", Refresh: "wait_for",
} }
@ -112,7 +112,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
continue continue
} }
} }
resBody := util.MapStr{ resBody := util.MapStr{
"success": true, "success": true,
} }
if len(errors) > 0 { if len(errors) > 0 {
@ -168,15 +168,15 @@ func (h *APIHandler) GetHostAgentInfo(w http.ResponseWriter, req *http.Request,
} }
h.WriteJSON(w, util.MapStr{ h.WriteJSON(w, util.MapStr{
"host_id": hostID, "host_id": hostID,
"agent_id": obj.ID, "agent_id": obj.ID,
"version": obj.Application.Version, "version": obj.Application.Version,
"status": hostInfo.AgentStatus, "status": hostInfo.AgentStatus,
"endpoint": obj.GetEndpoint(), "endpoint": obj.GetEndpoint(),
}, http.StatusOK) }, http.StatusOK)
} }
func getHost(hostID string) (*host.HostInfo, error){ func getHost(hostID string) (*host.HostInfo, error) {
hostInfo := &host.HostInfo{} hostInfo := &host.HostInfo{}
hostInfo.ID = hostID hostInfo.ID = hostID
exists, err := orm.Get(hostInfo) exists, err := orm.Get(hostInfo)
@ -241,4 +241,4 @@ func (h *APIHandler) GetHostElasticProcess(w http.ResponseWriter, req *http.Requ
h.WriteJSON(w, util.MapStr{ h.WriteJSON(w, util.MapStr{
//"elastic_processes": processes, //"elastic_processes": processes,
}, http.StatusOK) }, http.StatusOK)
} }

View File

@ -31,13 +31,13 @@ import (
"bytes" "bytes"
"fmt" "fmt"
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/elastic" "infini.sh/framework/core/elastic"
"infini.sh/framework/core/global" "infini.sh/framework/core/global"
"infini.sh/framework/core/kv" "infini.sh/framework/core/kv"
"infini.sh/framework/core/model" "infini.sh/framework/core/model"
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
common2 "infini.sh/framework/modules/elastic/common" common2 "infini.sh/framework/modules/elastic/common"
metadata2 "infini.sh/framework/modules/elastic/metadata" metadata2 "infini.sh/framework/modules/elastic/metadata"
"time" "time"

View File

@ -37,15 +37,15 @@ import (
"path" "path"
) )
func GenerateClientCert(caFile, caKey string) (caCert, clientCertPEM, clientKeyPEM []byte, err error){ func GenerateClientCert(caFile, caKey string) (caCert, clientCertPEM, clientKeyPEM []byte, err error) {
return generateCert(caFile, caKey, false) return generateCert(caFile, caKey, false)
} }
func GenerateServerCert(caFile, caKey string) (caCert, serverCertPEM, serverKeyPEM []byte, err error){ func GenerateServerCert(caFile, caKey string) (caCert, serverCertPEM, serverKeyPEM []byte, err error) {
return generateCert(caFile, caKey, true) return generateCert(caFile, caKey, true)
} }
func generateCert(caFile, caKey string, isServer bool)(caCert, instanceCertPEM, instanceKeyPEM []byte, err error){ func generateCert(caFile, caKey string, isServer bool) (caCert, instanceCertPEM, instanceKeyPEM []byte, err error) {
pool := x509.NewCertPool() pool := x509.NewCertPool()
caCert, err = os.ReadFile(caFile) caCert, err = os.ReadFile(caFile)
if err != nil { if err != nil {
@ -69,11 +69,11 @@ func generateCert(caFile, caKey string, isServer bool)(caCert, instanceCertPEM,
if err != nil { if err != nil {
return return
} }
if isServer{ if isServer {
b = &pem.Block{Type: "CERTIFICATE", Bytes: caCertBytes} b = &pem.Block{Type: "CERTIFICATE", Bytes: caCertBytes}
certPEM := pem.EncodeToMemory(b) certPEM := pem.EncodeToMemory(b)
instanceCertPEM, instanceKeyPEM, err = util.GenerateServerCert(rootCert, certKey.(*rsa.PrivateKey), certPEM, nil) instanceCertPEM, instanceKeyPEM, err = util.GenerateServerCert(rootCert, certKey.(*rsa.PrivateKey), certPEM, nil)
}else{ } else {
_, instanceCertPEM, instanceKeyPEM = util.GetClientCert(rootCert, certKey) _, instanceCertPEM, instanceKeyPEM = util.GetClientCert(rootCert, certKey)
} }
return caCert, instanceCertPEM, instanceKeyPEM, nil return caCert, instanceCertPEM, instanceKeyPEM, nil
@ -84,9 +84,9 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) {
instanceCrt := path.Join(dataDir, "certs/agent/instance.crt") instanceCrt := path.Join(dataDir, "certs/agent/instance.crt")
instanceKey := path.Join(dataDir, "certs/agent/instance.key") instanceKey := path.Join(dataDir, "certs/agent/instance.key")
var ( var (
err error err error
clientCertPEM []byte clientCertPEM []byte
clientKeyPEM []byte clientKeyPEM []byte
) )
if util.FileExists(instanceCrt) && util.FileExists(instanceKey) { if util.FileExists(instanceCrt) && util.FileExists(instanceKey) {
return instanceCrt, instanceKey, nil return instanceCrt, instanceKey, nil
@ -96,7 +96,7 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) {
return "", "", err return "", "", err
} }
baseDir := path.Join(dataDir, "certs/agent") baseDir := path.Join(dataDir, "certs/agent")
if !util.IsExist(baseDir){ if !util.IsExist(baseDir) {
err = os.MkdirAll(baseDir, 0775) err = os.MkdirAll(baseDir, 0775)
if err != nil { if err != nil {
return "", "", err return "", "", err
@ -111,4 +111,4 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) {
return "", "", err return "", "", err
} }
return instanceCrt, instanceKey, nil return instanceCrt, instanceKey, nil
} }

View File

@ -30,8 +30,8 @@ package common
import ( import (
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
"infini.sh/console/modules/agent/model" "infini.sh/console/modules/agent/model"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/env" "infini.sh/framework/core/env"
"infini.sh/framework/modules/configs/common"
) )
func GetAgentConfig() *model.AgentConfig { func GetAgentConfig() *model.AgentConfig {

View File

@ -28,14 +28,14 @@
package model package model
type AgentConfig struct { type AgentConfig struct {
Enabled bool `config:"enabled"` Enabled bool `config:"enabled"`
Setup *SetupConfig `config:"setup"` Setup *SetupConfig `config:"setup"`
} }
type SetupConfig struct { type SetupConfig struct {
DownloadURL string `config:"download_url"` DownloadURL string `config:"download_url"`
CACertFile string `config:"ca_cert"` CACertFile string `config:"ca_cert"`
CAKeyFile string `config:"ca_key"` CAKeyFile string `config:"ca_key"`
ConsoleEndpoint string `config:"console_endpoint"` ConsoleEndpoint string `config:"console_endpoint"`
Port string `config:"port"` Port string `config:"port"`
} }

View File

@ -38,36 +38,36 @@ import (
"strings" "strings"
) )
func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody:=util.MapStr{} resBody := util.MapStr{}
reqBody := struct{ reqBody := struct {
Keyword string `json:"keyword"` Keyword string `json:"keyword"`
Size int `json:"size"` Size int `json:"size"`
From int `json:"from"` From int `json:"from"`
Aggregations []elastic.SearchAggParam `json:"aggs"` Aggregations []elastic.SearchAggParam `json:"aggs"`
Highlight elastic.SearchHighlightParam `json:"highlight"` Highlight elastic.SearchHighlightParam `json:"highlight"`
Filter elastic.SearchFilterParam `json:"filter"` Filter elastic.SearchFilterParam `json:"filter"`
Sort []string `json:"sort"` Sort []string `json:"sort"`
StartTime interface{} `json:"start_time"` StartTime interface{} `json:"start_time"`
EndTime interface{} `json:"end_time"` EndTime interface{} `json:"end_time"`
}{} }{}
err := h.DecodeJSON(req, &reqBody) err := h.DecodeJSON(req, &reqBody)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations) aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
aggs["term_cluster_id"] = util.MapStr{ aggs["term_cluster_id"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.cluster_id", "field": "metadata.labels.cluster_id",
"size": 1000, "size": 1000,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"term_cluster_name": util.MapStr{ "term_cluster_name": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.cluster_name", "field": "metadata.labels.cluster_name",
"size": 1, "size": 1,
}, },
}, },
}, },
@ -86,9 +86,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
clusterFilter, hasAllPrivilege := h.GetClusterFilter(req, "metadata.labels.cluster_id") clusterFilter, hasAllPrivilege := h.GetClusterFilter(req, "metadata.labels.cluster_id")
if !hasAllPrivilege && clusterFilter == nil { if !hasAllPrivilege && clusterFilter == nil {
h.WriteJSON(w, elastic.SearchResponse{ h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
}, http.StatusOK)
return return
} }
if !hasAllPrivilege && clusterFilter != nil { if !hasAllPrivilege && clusterFilter != nil {
@ -97,9 +95,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req) hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req)
if !hasAllPrivilege && len(indexPrivilege) == 0 { if !hasAllPrivilege && len(indexPrivilege) == 0 {
h.WriteJSON(w, elastic.SearchResponse{ h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
}, http.StatusOK)
return return
} }
if !hasAllPrivilege { if !hasAllPrivilege {
@ -107,10 +103,10 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
for clusterID, indices := range indexPrivilege { for clusterID, indices := range indexPrivilege {
var ( var (
wildcardIndices []string wildcardIndices []string
normalIndices []string normalIndices []string
) )
for _, index := range indices { for _, index := range indices {
if strings.Contains(index,"*") { if strings.Contains(index, "*") {
wildcardIndices = append(wildcardIndices, index) wildcardIndices = append(wildcardIndices, index)
continue continue
} }
@ -120,8 +116,8 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
if len(wildcardIndices) > 0 { if len(wildcardIndices) > 0 {
subShould = append(subShould, util.MapStr{ subShould = append(subShould, util.MapStr{
"query_string": util.MapStr{ "query_string": util.MapStr{
"query": strings.Join(wildcardIndices, " "), "query": strings.Join(wildcardIndices, " "),
"fields": []string{"metadata.labels.index_name"}, "fields": []string{"metadata.labels.index_name"},
"default_operator": "OR", "default_operator": "OR",
}, },
}) })
@ -146,7 +142,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
{ {
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"should": subShould, "should": subShould,
}, },
}, },
}, },
@ -156,7 +152,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
indexFilter := util.MapStr{ indexFilter := util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"should": indexShould, "should": indexShould,
}, },
} }
filter = append(filter, indexFilter) filter = append(filter, indexFilter)
@ -168,7 +164,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
{ {
"query_string": util.MapStr{ "query_string": util.MapStr{
"default_field": "*", "default_field": "*",
"query": reqBody.Keyword, "query": reqBody.Keyword,
}, },
}, },
} }
@ -176,15 +172,15 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
var boolQuery = util.MapStr{ var boolQuery = util.MapStr{
"filter": filter, "filter": filter,
} }
if len(should) >0 { if len(should) > 0 {
boolQuery["should"] = should boolQuery["should"] = should
boolQuery["minimum_should_match"] = 1 boolQuery["minimum_should_match"] = 1
} }
query := util.MapStr{ query := util.MapStr{
"aggs": aggs, "aggs": aggs,
"size": reqBody.Size, "size": reqBody.Size,
"from": reqBody.From, "from": reqBody.From,
"_source": []string{"changelog", "id", "metadata", "timestamp"}, "_source": []string{"changelog", "id", "metadata", "timestamp"},
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight), "highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
"query": util.MapStr{ "query": util.MapStr{
"bool": boolQuery, "bool": boolQuery,
@ -194,7 +190,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
reqBody.Sort = []string{"timestamp", "desc"} reqBody.Sort = []string{"timestamp", "desc"}
} }
query["sort"] = []util.MapStr{ query["sort"] = []util.MapStr{
{ {
reqBody.Sort[0]: util.MapStr{ reqBody.Sort[0]: util.MapStr{
"order": reqBody.Sort[1], "order": reqBody.Sort[1],
@ -206,8 +202,8 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetWildcardIndexName(event.Activity{}), dsl) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetWildcardIndexName(event.Activity{}), dsl)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
w.Write(response.RawResult.Body) w.Write(response.RawResult.Body)
} }

View File

@ -33,9 +33,9 @@ import (
"net/http" "net/http"
) )
func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -43,8 +43,8 @@ func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request,
return return
} }
if !exists{ if !exists {
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(errStr) log.Error(errStr)
h.WriteError(w, errStr, http.StatusInternalServerError) h.WriteError(w, errStr, http.StatusInternalServerError)
return return
@ -105,4 +105,4 @@ func (h *APIHandler) HandleGetAliasAction(w http.ResponseWriter, req *http.Reque
return return
} }
h.WriteJSON(w, res, http.StatusOK) h.WriteJSON(w, res, http.StatusOK)
} }

View File

@ -118,7 +118,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
var ( var (
// cluster_id => cluster_uuid // cluster_id => cluster_uuid
clustersM = map[string]string{} clustersM = map[string]string{}
clusterUUIDs []string clusterUUIDs []string
) )
for _, cid := range clusterIDs { for _, cid := range clusterIDs {
@ -145,28 +145,27 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
indexMetricItems := []GroupMetricItem{} indexMetricItems := []GroupMetricItem{}
metricItem := newMetricItem("cluster_indexing", 2, "cluster") metricItem := newMetricItem("cluster_indexing", 2, "cluster")
metricItem.OnlyPrimary = true metricItem.OnlyPrimary = true
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "cluster_indexing", Key: "cluster_indexing",
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: metricItem, MetricItem: metricItem,
FormatType: "num", FormatType: "num",
Units: "doc/s", Units: "doc/s",
}) })
metricItem = newMetricItem("cluster_search", 2, "cluster") metricItem = newMetricItem("cluster_search", 2, "cluster")
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "cluster_search", Key: "cluster_search",
Field: "payload.elasticsearch.node_stats.indices.search.query_total", Field: "payload.elasticsearch.node_stats.indices.search.query_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: metricItem, MetricItem: metricItem,
FormatType: "num", FormatType: "num",
Units: "query/s", Units: "query/s",
}) })
clusterID := global.MustLookupString(elastic.GlobalSystemElasticsearchID) clusterID := global.MustLookupString(elastic.GlobalSystemElasticsearchID)
intervalField, err := getDateHistogramIntervalField(clusterID, bucketSizeStr) intervalField, err := getDateHistogramIntervalField(clusterID, bucketSizeStr)
if err != nil { if err != nil {
@ -200,23 +199,23 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
}, },
}, },
}, },
}, },
}, },
} }
aggs:=map[string]interface{}{} aggs := map[string]interface{}{}
sumAggs := util.MapStr{} sumAggs := util.MapStr{}
for _,metricItem:=range indexMetricItems { for _, metricItem := range indexMetricItems {
leafAgg := util.MapStr{ leafAgg := util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field, "field": metricItem.Field,
}, },
} }
var sumBucketPath = "term_node>"+ metricItem.ID var sumBucketPath = "term_node>" + metricItem.ID
aggs[metricItem.ID] = leafAgg aggs[metricItem.ID] = leafAgg
sumAggs[metricItem.ID] = util.MapStr{ sumAggs[metricItem.ID] = util.MapStr{
@ -224,22 +223,22 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
"buckets_path": sumBucketPath, "buckets_path": sumBucketPath,
}, },
} }
if metricItem.IsDerivative{ if metricItem.IsDerivative {
sumAggs[metricItem.ID+"_deriv"]=util.MapStr{ sumAggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID, "buckets_path": metricItem.ID,
}, },
} }
} }
} }
sumAggs["term_node"]= util.MapStr{ sumAggs["term_node"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.node_id", "field": "metadata.labels.node_id",
"size": 1000, "size": 1000,
}, },
"aggs": aggs, "aggs": aggs,
} }
query["aggs"]= util.MapStr{ query["aggs"] = util.MapStr{
"group_by_level": util.MapStr{ "group_by_level": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.cluster_uuid", "field": "metadata.labels.cluster_uuid",
@ -247,11 +246,11 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram":util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs":sumAggs, "aggs": sumAggs,
}, },
}, },
}, },
@ -279,12 +278,12 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
for _, line := range indexMetrics["cluster_indexing"].Lines { for _, line := range indexMetrics["cluster_indexing"].Lines {
// remove first metric dot // remove first metric dot
data := line.Data data := line.Data
if v, ok := data.([][]interface{}); ok && len(v)> 0 { if v, ok := data.([][]interface{}); ok && len(v) > 0 {
// remove first metric dot // remove first metric dot
temp := v[1:] temp := v[1:]
// // remove first last dot // // remove first last dot
if len(temp) > 0 { if len(temp) > 0 {
temp = temp[0: len(temp)-1] temp = temp[0 : len(temp)-1]
} }
data = temp data = temp
} }
@ -293,12 +292,12 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
searchMetricData := util.MapStr{} searchMetricData := util.MapStr{}
for _, line := range indexMetrics["cluster_search"].Lines { for _, line := range indexMetrics["cluster_search"].Lines {
data := line.Data data := line.Data
if v, ok := data.([][]interface{}); ok && len(v)> 0 { if v, ok := data.([][]interface{}); ok && len(v) > 0 {
// remove first metric dot // remove first metric dot
temp := v[1:] temp := v[1:]
// // remove first last dot // // remove first last dot
if len(temp) > 0 { if len(temp) > 0 {
temp = temp[0: len(temp)-1] temp = temp[0 : len(temp)-1]
} }
data = temp data = temp
} }
@ -633,7 +632,6 @@ func (h *APIHandler) GetClusterNodes(w http.ResponseWriter, req *http.Request, p
} }
} }
if v, ok := nodeID.(string); ok { if v, ok := nodeID.(string); ok {
nodeInfos[v] = util.MapStr{ nodeInfos[v] = util.MapStr{
"timestamp": hitM["timestamp"], "timestamp": hitM["timestamp"],
@ -642,7 +640,7 @@ func (h *APIHandler) GetClusterNodes(w http.ResponseWriter, req *http.Request, p
"load_1m": load, "load_1m": load,
"heap.percent": heapUsage, "heap.percent": heapUsage,
"disk.avail": availDisk, "disk.avail": availDisk,
"disk.used": usedDisk, "disk.used": usedDisk,
"uptime": uptime, "uptime": uptime,
} }
@ -865,14 +863,14 @@ type RealtimeNodeInfo struct {
IndexQPS interface{} `json:"index_qps"` IndexQPS interface{} `json:"index_qps"`
QueryQPS interface{} `json:"query_qps"` QueryQPS interface{} `json:"query_qps"`
IndexBytesQPS interface{} `json:"index_bytes_qps"` IndexBytesQPS interface{} `json:"index_bytes_qps"`
Timestamp uint64 `json:"timestamp"` Timestamp uint64 `json:"timestamp"`
CatNodeResponse CatNodeResponse
} }
func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
ver := h.Client().GetVersion() ver := h.Client().GetVersion()
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -891,18 +889,18 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map
"aggs": util.MapStr{ "aggs": util.MapStr{
"date": util.MapStr{ "date": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"term_shard": util.MapStr{ "term_shard": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.shard_id", "field": "metadata.labels.shard_id",
"size": 1000, "size": 1000,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"filter_pri": util.MapStr{ "filter_pri": util.MapStr{
"filter": util.MapStr{ "term": util.MapStr{ "payload.elasticsearch.shard_stats.routing.primary": true } }, "filter": util.MapStr{"term": util.MapStr{"payload.elasticsearch.shard_stats.routing.primary": true}},
"aggs": util.MapStr{ "aggs": util.MapStr{
"index_total": util.MapStr{ "index_total": util.MapStr{
"max": util.MapStr{ "max": util.MapStr{
@ -994,8 +992,8 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map
func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
ver := h.Client().GetVersion() ver := h.Client().GetVersion()
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1048,7 +1046,7 @@ func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName st
"aggs": util.MapStr{ "aggs": util.MapStr{
"date": util.MapStr{ "date": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
@ -1108,8 +1106,8 @@ func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName st
func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
ver := h.Client().GetVersion() ver := h.Client().GetVersion()
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1128,7 +1126,7 @@ func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[
"aggs": util.MapStr{ "aggs": util.MapStr{
"date": util.MapStr{ "date": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
@ -1238,11 +1236,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
{ {
"match": util.MapStr{ "match": util.MapStr{
reqBody.SearchField: util.MapStr{ reqBody.SearchField: util.MapStr{
"query": reqBody.Keyword, "query": reqBody.Keyword,
"fuzziness": "AUTO", "fuzziness": "AUTO",
"max_expansions": 10, "max_expansions": 10,
"prefix_length": 2, "prefix_length": 2,
"boost": 2, "boost": 2,
}, },
}, },
}, },
@ -1284,11 +1282,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
{ {
"match": util.MapStr{ "match": util.MapStr{
"search_text": util.MapStr{ "search_text": util.MapStr{
"query": reqBody.Keyword, "query": reqBody.Keyword,
"fuzziness": "AUTO", "fuzziness": "AUTO",
"max_expansions": 10, "max_expansions": 10,
"prefix_length": 2, "prefix_length": 2,
"boost": 2, "boost": 2,
}, },
}, },
}, },
@ -1357,7 +1355,7 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
id := ps.ByName("id") id := ps.ByName("id")
collectionMode := GetMonitorState(id) collectionMode := GetMonitorState(id)
ret := util.MapStr{ ret := util.MapStr{
"cluster_id": id, "cluster_id": id,
"metric_collection_mode": collectionMode, "metric_collection_mode": collectionMode,
} }
queryDSL := util.MapStr{ queryDSL := util.MapStr{
@ -1382,7 +1380,7 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
"grp_name": util.MapStr{ "grp_name": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.name", "field": "metadata.name",
"size": 10, "size": 10,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"max_timestamp": util.MapStr{ "max_timestamp": util.MapStr{
@ -1405,11 +1403,11 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
key := bk["key"].(string) key := bk["key"].(string)
if tv, ok := bk["max_timestamp"].(map[string]interface{}); ok { if tv, ok := bk["max_timestamp"].(map[string]interface{}); ok {
if collectionMode == elastic.ModeAgentless { if collectionMode == elastic.ModeAgentless {
if util.StringInArray([]string{ "index_stats", "cluster_health", "cluster_stats", "node_stats"}, key) { if util.StringInArray([]string{"index_stats", "cluster_health", "cluster_stats", "node_stats"}, key) {
ret[key] = getCollectionStats(tv["value"]) ret[key] = getCollectionStats(tv["value"])
} }
}else{ } else {
if util.StringInArray([]string{ "shard_stats", "cluster_health", "cluster_stats", "node_stats"}, key) { if util.StringInArray([]string{"shard_stats", "cluster_health", "cluster_stats", "node_stats"}, key) {
ret[key] = getCollectionStats(tv["value"]) ret[key] = getCollectionStats(tv["value"])
} }
} }
@ -1422,15 +1420,15 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
func getCollectionStats(lastActiveAt interface{}) util.MapStr { func getCollectionStats(lastActiveAt interface{}) util.MapStr {
stats := util.MapStr{ stats := util.MapStr{
"last_active_at": lastActiveAt, "last_active_at": lastActiveAt,
"status": "active", "status": "active",
} }
if timestamp, ok := lastActiveAt.(float64); ok { if timestamp, ok := lastActiveAt.(float64); ok {
t := time.Unix(int64(timestamp/1000), 0) t := time.Unix(int64(timestamp/1000), 0)
if time.Now().Sub(t) > 5 * time.Minute { if time.Now().Sub(t) > 5*time.Minute {
stats["status"] = "warning" stats["status"] = "warning"
}else{ } else {
stats["status"] = "ok" stats["status"] = "ok"
} }
} }
return stats return stats
} }

View File

@ -39,7 +39,7 @@ import (
func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -47,16 +47,16 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
return return
} }
if !exists{ if !exists {
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(errStr) log.Error(errStr)
h.WriteError(w, errStr, http.StatusNotFound) h.WriteError(w, errStr, http.StatusNotFound)
return return
} }
var reqParams = struct{ var reqParams = struct {
Index string `json:"index"` Index string `json:"index"`
Body map[string]interface{} `json:"body"` Body map[string]interface{} `json:"body"`
DistinctByField map[string]interface{} `json:"distinct_by_field"` DistinctByField map[string]interface{} `json:"distinct_by_field"`
}{} }{}
@ -101,12 +101,12 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
if qm, ok := query.(map[string]interface{}); ok { if qm, ok := query.(map[string]interface{}); ok {
filter, _ := util.MapStr(qm).GetValue("bool.filter") filter, _ := util.MapStr(qm).GetValue("bool.filter")
if fv, ok := filter.([]interface{}); ok{ if fv, ok := filter.([]interface{}); ok {
fv = append(fv, util.MapStr{ fv = append(fv, util.MapStr{
"script": util.MapStr{ "script": util.MapStr{
"script": util.MapStr{ "script": util.MapStr{
"source": "distinct_by_field", "source": "distinct_by_field",
"lang": "infini", "lang": "infini",
"params": reqParams.DistinctByField, "params": reqParams.DistinctByField,
}, },
}, },
@ -173,7 +173,7 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
if timeout != "" { if timeout != "" {
queryArgs = &[]util.KV{ queryArgs = &[]util.KV{
{ {
Key: "timeout", Key: "timeout",
Value: timeout, Value: timeout,
}, },
} }
@ -184,7 +184,7 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
} }
var cancel context.CancelFunc var cancel context.CancelFunc
// here add one second for network delay // here add one second for network delay
ctx, cancel = context.WithTimeout(context.Background(), du + time.Second) ctx, cancel = context.WithTimeout(context.Background(), du+time.Second)
defer cancel() defer cancel()
} }
@ -207,12 +207,10 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
h.Write(w, searchRes.RawResult.Body) h.Write(w, searchRes.RawResult.Body)
} }
func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -221,16 +219,16 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
return return
} }
if !exists{ if !exists {
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
h.WriteError(w, errStr, http.StatusNotFound) h.WriteError(w, errStr, http.StatusNotFound)
return return
} }
var reqParams = struct{ var reqParams = struct {
BoolFilter interface{} `json:"boolFilter"` BoolFilter interface{} `json:"boolFilter"`
FieldName string `json:"field"` FieldName string `json:"field"`
Query string `json:"query"` Query string `json:"query"`
}{} }{}
err = h.DecodeJSON(req, &reqParams) err = h.DecodeJSON(req, &reqParams)
if err != nil { if err != nil {
@ -246,7 +244,7 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
indices, hasAll := h.GetAllowedIndices(req, targetClusterID) indices, hasAll := h.GetAllowedIndices(req, targetClusterID)
if !hasAll { if !hasAll {
if len(indices) == 0 { if len(indices) == 0 {
h.WriteJSON(w, values,http.StatusOK) h.WriteJSON(w, values, http.StatusOK)
return return
} }
boolQ["must"] = []util.MapStr{ boolQ["must"] = []util.MapStr{
@ -265,15 +263,15 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
"aggs": util.MapStr{ "aggs": util.MapStr{
"suggestions": util.MapStr{ "suggestions": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": reqParams.FieldName, "field": reqParams.FieldName,
"include": reqParams.Query + ".*", "include": reqParams.Query + ".*",
"execution_hint": "map", "execution_hint": "map",
"shard_size": 10, "shard_size": 10,
}, },
}, },
}, },
} }
var queryBodyBytes = util.MustToJSONBytes(queryBody) var queryBodyBytes = util.MustToJSONBytes(queryBody)
searchRes, err := client.SearchWithRawQueryDSL(indexName, queryBodyBytes) searchRes, err := client.SearchWithRawQueryDSL(indexName, queryBodyBytes)
if err != nil { if err != nil {
@ -285,7 +283,7 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
for _, bucket := range searchRes.Aggregations["suggestions"].Buckets { for _, bucket := range searchRes.Aggregations["suggestions"].Buckets {
values = append(values, bucket["key"]) values = append(values, bucket["key"])
} }
h.WriteJSON(w, values,http.StatusOK) h.WriteJSON(w, values, http.StatusOK)
} }
func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -293,7 +291,7 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.
traceIndex := h.GetParameterOrDefault(req, "traceIndex", orm.GetIndexName(elastic.TraceMeta{})) traceIndex := h.GetParameterOrDefault(req, "traceIndex", orm.GetIndexName(elastic.TraceMeta{}))
traceField := h.GetParameterOrDefault(req, "traceField", "trace_id") traceField := h.GetParameterOrDefault(req, "traceField", "trace_id")
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -301,8 +299,8 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.
return return
} }
if !exists{ if !exists {
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID) errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
h.WriteError(w, errStr, http.StatusNotFound) h.WriteError(w, errStr, http.StatusNotFound)
return return
} }
@ -340,4 +338,3 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.
} }
h.WriteJSON(w, indexNames, http.StatusOK) h.WriteJSON(w, indexNames, http.StatusOK)
} }

View File

@ -211,8 +211,7 @@ func (h *APIHandler) getDiscoverHosts(w http.ResponseWriter, req *http.Request,
func getHostSummary(agentIDs []string, metricName string, summary map[string]util.MapStr) error { func getHostSummary(agentIDs []string, metricName string, summary map[string]util.MapStr) error {
if summary == nil { if summary == nil {
summary = map[string]util.MapStr{ summary = map[string]util.MapStr{}
}
} }
if len(agentIDs) == 0 { if len(agentIDs) == 0 {
@ -506,8 +505,7 @@ func (h *APIHandler) FetchHostInfo(w http.ResponseWriter, req *http.Request, ps
for key, item := range hostMetrics { for key, item := range hostMetrics {
for _, line := range item.Lines { for _, line := range item.Lines {
if _, ok := networkMetrics[line.Metric.Label]; !ok { if _, ok := networkMetrics[line.Metric.Label]; !ok {
networkMetrics[line.Metric.Label] = util.MapStr{ networkMetrics[line.Metric.Label] = util.MapStr{}
}
} }
networkMetrics[line.Metric.Label][key] = line.Data networkMetrics[line.Metric.Label][key] = line.Data
} }
@ -682,20 +680,20 @@ func (h *APIHandler) getSingleHostMetricFromNode(ctx context.Context, nodeID str
} }
const ( const (
OSCPUUsedPercentMetricKey = "cpu_used_percent" OSCPUUsedPercentMetricKey = "cpu_used_percent"
MemoryUsedPercentMetricKey = "memory_used_percent" MemoryUsedPercentMetricKey = "memory_used_percent"
DiskUsedPercentMetricKey = "disk_used_percent" DiskUsedPercentMetricKey = "disk_used_percent"
SystemLoadMetricKey = "system_load" SystemLoadMetricKey = "system_load"
CPUIowaitMetricKey = "cpu_iowait" CPUIowaitMetricKey = "cpu_iowait"
SwapMemoryUsedPercentMetricKey= "swap_memory_used_percent" SwapMemoryUsedPercentMetricKey = "swap_memory_used_percent"
NetworkSummaryMetricKey = "network_summary" NetworkSummaryMetricKey = "network_summary"
NetworkPacketsSummaryMetricKey = "network_packets_summary" NetworkPacketsSummaryMetricKey = "network_packets_summary"
DiskReadRateMetricKey = "disk_read_rate" DiskReadRateMetricKey = "disk_read_rate"
DiskWriteRateMetricKey = "disk_write_rate" DiskWriteRateMetricKey = "disk_write_rate"
DiskPartitionUsageMetricKey = "disk_partition_usage" DiskPartitionUsageMetricKey = "disk_partition_usage"
NetworkInterfaceOutputRateMetricKey = "network_interface_output_rate" NetworkInterfaceOutputRateMetricKey = "network_interface_output_rate"
) )
func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
hostID := ps.MustGetParameter("host_id") hostID := ps.MustGetParameter("host_id")
hostInfo := &host.HostInfo{} hostInfo := &host.HostInfo{}
@ -798,7 +796,7 @@ func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Reque
metricItem.AddLine("Disk Write Rate", "Disk Write Rate", "network write rate of host.", "group1", "payload.host.diskio_summary.write.bytes", "max", bucketSizeStr, "%", "bytes", "0,0.[00]", "0,0.[00]", false, true) metricItem.AddLine("Disk Write Rate", "Disk Write Rate", "network write rate of host.", "group1", "payload.host.diskio_summary.write.bytes", "max", bucketSizeStr, "%", "bytes", "0,0.[00]", "0,0.[00]", false, true)
metricItems = append(metricItems, metricItem) metricItems = append(metricItems, metricItem)
case DiskPartitionUsageMetricKey, NetworkInterfaceOutputRateMetricKey: case DiskPartitionUsageMetricKey, NetworkInterfaceOutputRateMetricKey:
resBody["metrics"] , err = h.getGroupHostMetrics(ctx, hostInfo.AgentID, min, max, bucketSize, key) resBody["metrics"], err = h.getGroupHostMetrics(ctx, hostInfo.AgentID, min, max, bucketSize, key)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
h.WriteError(w, err, http.StatusInternalServerError) h.WriteError(w, err, http.StatusInternalServerError)

View File

@ -35,7 +35,7 @@ import (
"net/http" "net/http"
) )
func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
esClient := elastic.GetClient(clusterID) esClient := elastic.GetClient(clusterID)
policies, err := esClient.GetILMPolicy("") policies, err := esClient.GetILMPolicy("")
@ -47,7 +47,7 @@ func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.R
h.WriteJSON(w, policies, http.StatusOK) h.WriteJSON(w, policies, http.StatusOK)
} }
func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
policy := ps.MustGetParameter("policy") policy := ps.MustGetParameter("policy")
esClient := elastic.GetClient(clusterID) esClient := elastic.GetClient(clusterID)
@ -66,7 +66,7 @@ func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.
h.WriteAckOKJSON(w) h.WriteAckOKJSON(w)
} }
func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
policy := ps.MustGetParameter("policy") policy := ps.MustGetParameter("policy")
esClient := elastic.GetClient(clusterID) esClient := elastic.GetClient(clusterID)
@ -77,4 +77,4 @@ func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *htt
return return
} }
h.WriteAckOKJSON(w) h.WriteAckOKJSON(w)
} }

File diff suppressed because it is too large Load Diff

View File

@ -46,41 +46,41 @@ import (
) )
func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody:=util.MapStr{} resBody := util.MapStr{}
reqBody := struct{ reqBody := struct {
Keyword string `json:"keyword"` Keyword string `json:"keyword"`
Size int `json:"size"` Size int `json:"size"`
From int `json:"from"` From int `json:"from"`
Aggregations []elastic.SearchAggParam `json:"aggs"` Aggregations []elastic.SearchAggParam `json:"aggs"`
Highlight elastic.SearchHighlightParam `json:"highlight"` Highlight elastic.SearchHighlightParam `json:"highlight"`
Filter elastic.SearchFilterParam `json:"filter"` Filter elastic.SearchFilterParam `json:"filter"`
Sort []string `json:"sort"` Sort []string `json:"sort"`
SearchField string `json:"search_field"` SearchField string `json:"search_field"`
}{} }{}
err := h.DecodeJSON(req, &reqBody) err := h.DecodeJSON(req, &reqBody)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations) aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
aggs["term_cluster_id"] = util.MapStr{ aggs["term_cluster_id"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.cluster_id", "field": "metadata.cluster_id",
"size": 1000, "size": 1000,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"term_cluster_name": util.MapStr{ "term_cluster_name": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.cluster_name", "field": "metadata.cluster_name",
"size": 1, "size": 1,
}, },
}, },
}, },
} }
filter := elastic.BuildSearchTermFilter(reqBody.Filter) filter := elastic.BuildSearchTermFilter(reqBody.Filter)
var should []util.MapStr var should []util.MapStr
if reqBody.SearchField != ""{ if reqBody.SearchField != "" {
should = []util.MapStr{ should = []util.MapStr{
{ {
"prefix": util.MapStr{ "prefix": util.MapStr{
@ -103,8 +103,8 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
}, },
}, },
} }
}else{ } else {
if reqBody.Keyword != ""{ if reqBody.Keyword != "" {
should = []util.MapStr{ should = []util.MapStr{
{ {
"prefix": util.MapStr{ "prefix": util.MapStr{
@ -149,15 +149,13 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
} }
} }
must := []interface{}{ must := []interface{}{}
} if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri {
if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri { if indexFilter != nil {
if indexFilter != nil{
must = append(must, indexFilter) must = append(must, indexFilter)
} }
}else{ } else {
h.WriteJSON(w, elastic.SearchResponse{ h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
}, http.StatusOK)
return return
} }
boolQuery := util.MapStr{ boolQuery := util.MapStr{
@ -169,7 +167,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
}, },
}, },
"filter": filter, "filter": filter,
"must": must, "must": must,
} }
if len(should) > 0 { if len(should) > 0 {
boolQuery["should"] = should boolQuery["should"] = should
@ -178,7 +176,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
query := util.MapStr{ query := util.MapStr{
"aggs": aggs, "aggs": aggs,
"size": reqBody.Size, "size": reqBody.Size,
"from": reqBody.From, "from": reqBody.From,
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight), "highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
"query": util.MapStr{ "query": util.MapStr{
"bool": boolQuery, "bool": boolQuery,
@ -192,7 +190,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
}, },
} }
if len(reqBody.Sort) > 1 { if len(reqBody.Sort) > 1 {
query["sort"] = []util.MapStr{ query["sort"] = []util.MapStr{
{ {
reqBody.Sort[0]: util.MapStr{ reqBody.Sort[0]: util.MapStr{
"order": reqBody.Sort[1], "order": reqBody.Sort[1],
@ -204,14 +202,14 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.IndexConfig{}), dsl) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.IndexConfig{}), dsl)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
w.Write(util.MustToJSONBytes(response)) w.Write(util.MustToJSONBytes(response))
} }
func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool){ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool) {
hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req) hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req)
if !hasAllPrivilege && len(indexPrivilege) == 0 { if !hasAllPrivilege && len(indexPrivilege) == 0 {
return nil, false return nil, false
@ -221,10 +219,10 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
for clusterID, indices := range indexPrivilege { for clusterID, indices := range indexPrivilege {
var ( var (
wildcardIndices []string wildcardIndices []string
normalIndices []string normalIndices []string
) )
for _, index := range indices { for _, index := range indices {
if strings.Contains(index,"*") { if strings.Contains(index, "*") {
wildcardIndices = append(wildcardIndices, index) wildcardIndices = append(wildcardIndices, index)
continue continue
} }
@ -234,8 +232,8 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
if len(wildcardIndices) > 0 { if len(wildcardIndices) > 0 {
subShould = append(subShould, util.MapStr{ subShould = append(subShould, util.MapStr{
"query_string": util.MapStr{ "query_string": util.MapStr{
"query": strings.Join(wildcardIndices, " "), "query": strings.Join(wildcardIndices, " "),
"fields": []string{"metadata.index_name"}, "fields": []string{"metadata.index_name"},
"default_operator": "OR", "default_operator": "OR",
}, },
}) })
@ -260,7 +258,7 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
{ {
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"should": subShould, "should": subShould,
}, },
}, },
}, },
@ -270,14 +268,14 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
indexFilter := util.MapStr{ indexFilter := util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"should": indexShould, "should": indexShould,
}, },
} }
return indexFilter, true return indexFilter, true
} }
return nil, true return nil, true
} }
func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var indexIDs []interface{} var indexIDs []interface{}
h.DecodeJSON(req, &indexIDs) h.DecodeJSON(req, &indexIDs)
@ -288,8 +286,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
indexIDs = indexIDs[0:1] indexIDs = indexIDs[0:1]
// map indexIDs(cluster_id:index_name => cluster_uuid:indexName) // map indexIDs(cluster_id:index_name => cluster_uuid:indexName)
var ( var (
indexIDM = map[string]string{} indexIDM = map[string]string{}
newIndexIDs []interface{} newIndexIDs []interface{}
clusterIndexNames = map[string][]string{} clusterIndexNames = map[string][]string{}
) )
indexID := indexIDs[0] indexID := indexIDs[0]
@ -318,12 +316,12 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
return return
} }
clusterIndexNames[firstClusterID] = append(clusterIndexNames[firstClusterID], firstIndexName) clusterIndexNames[firstClusterID] = append(clusterIndexNames[firstClusterID], firstIndexName)
}else{ } else {
h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError) h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError)
return return
} }
for clusterID, indexNames := range clusterIndexNames { for clusterID, indexNames := range clusterIndexNames {
clusterUUID, err := adapter.GetClusterUUID(clusterID) clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil { if err != nil {
log.Warnf("get cluster uuid error: %v", err) log.Warnf("get cluster uuid error: %v", err)
continue continue
@ -382,7 +380,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
} }
if primary == true { if primary == true {
indexInfo.Shards++ indexInfo.Shards++
}else{ } else {
indexInfo.Replicas++ indexInfo.Replicas++
} }
indexInfo.Timestamp = hitM["timestamp"] indexInfo.Timestamp = hitM["timestamp"]
@ -403,36 +401,36 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
} }
var metricLen = 15 var metricLen = 15
// 索引速率 // 索引速率
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
indexMetric.OnlyPrimary = true indexMetric.OnlyPrimary = true
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems := []GroupMetricItem{} nodeMetricItems := []GroupMetricItem{}
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "indexing", Key: "indexing",
Field: "payload.elasticsearch.shard_stats.indexing.index_total", Field: "payload.elasticsearch.shard_stats.indexing.index_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: indexMetric, MetricItem: indexMetric,
FormatType: "num", FormatType: "num",
Units: "Indexing/s", Units: "Indexing/s",
}) })
queryMetric:=newMetricItem("search", 2, OperationGroupKey) queryMetric := newMetricItem("search", 2, OperationGroupKey)
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "search", Key: "search",
Field: "payload.elasticsearch.shard_stats.search.query_total", Field: "payload.elasticsearch.shard_stats.search.query_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryMetric, MetricItem: queryMetric,
FormatType: "num", FormatType: "num",
Units: "Search/s", Units: "Search/s",
}) })
aggs:=map[string]interface{}{} aggs := map[string]interface{}{}
query :=map[string]interface{}{} query := map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": []util.MapStr{ "must": []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
"metadata.category": util.MapStr{ "metadata.category": util.MapStr{
@ -462,7 +460,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
}, },
}, },
}, },
@ -471,18 +469,18 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
} }
sumAggs := util.MapStr{} sumAggs := util.MapStr{}
for _,metricItem:=range nodeMetricItems{ for _, metricItem := range nodeMetricItems {
leafAgg := util.MapStr{ leafAgg := util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field, "field": metricItem.Field,
}, },
} }
var sumBucketPath = "term_shard>"+ metricItem.ID var sumBucketPath = "term_shard>" + metricItem.ID
if metricItem.MetricItem.OnlyPrimary { if metricItem.MetricItem.OnlyPrimary {
filterSubAggs := util.MapStr{ filterSubAggs := util.MapStr{
metricItem.ID: leafAgg, metricItem.ID: leafAgg,
} }
aggs["filter_pri"]=util.MapStr{ aggs["filter_pri"] = util.MapStr{
"filter": util.MapStr{ "filter": util.MapStr{
"term": util.MapStr{ "term": util.MapStr{
"payload.elasticsearch.shard_stats.routing.primary": util.MapStr{ "payload.elasticsearch.shard_stats.routing.primary": util.MapStr{
@ -492,8 +490,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
}, },
"aggs": filterSubAggs, "aggs": filterSubAggs,
} }
sumBucketPath = "term_shard>filter_pri>"+ metricItem.ID sumBucketPath = "term_shard>filter_pri>" + metricItem.ID
}else{ } else {
aggs[metricItem.ID] = leafAgg aggs[metricItem.ID] = leafAgg
} }
@ -502,18 +500,18 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
"buckets_path": sumBucketPath, "buckets_path": sumBucketPath,
}, },
} }
if metricItem.IsDerivative{ if metricItem.IsDerivative {
sumAggs[metricItem.ID+"_deriv"]=util.MapStr{ sumAggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID, "buckets_path": metricItem.ID,
}, },
} }
} }
} }
sumAggs["term_shard"]= util.MapStr{ sumAggs["term_shard"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.shard_id", "field": "metadata.labels.shard_id",
"size": 10000, "size": 10000,
}, },
"aggs": aggs, "aggs": aggs,
} }
@ -523,8 +521,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
if err != nil { if err != nil {
panic(err) panic(err)
} }
query["size"]=0 query["size"] = 0
query["aggs"]= util.MapStr{ query["aggs"] = util.MapStr{
"group_by_level": util.MapStr{ "group_by_level": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.index_id", "field": "metadata.labels.index_id",
@ -532,11 +530,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram":util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs":sumAggs, "aggs": sumAggs,
}, },
}, },
}, },
@ -549,9 +547,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
indexMetrics := map[string]util.MapStr{} indexMetrics := map[string]util.MapStr{}
for key, item := range metrics { for key, item := range metrics {
for _, line := range item.Lines { for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok{ if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{ indexMetrics[line.Metric.Label] = util.MapStr{}
}
} }
indexMetrics[line.Metric.Label][key] = line.Data indexMetrics[line.Metric.Label][key] = line.Data
} }
@ -601,11 +598,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
indexID := ps.MustGetParameter("index") indexID := ps.MustGetParameter("index")
parts := strings.Split(indexID, ":") parts := strings.Split(indexID, ":")
if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) { if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) {
h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return return
} }
if len(parts) < 2 { if len(parts) < 2 {
h.WriteError(w, "invalid index id: "+ indexID, http.StatusInternalServerError) h.WriteError(w, "invalid index id: "+indexID, http.StatusInternalServerError)
return return
} }
@ -635,7 +632,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
return return
} }
q1 := orm.Query{ q1 := orm.Query{
Size: 1000, Size: 1000,
WildcardIndex: true, WildcardIndex: true,
} }
q1.Conds = orm.And( q1.Conds = orm.And(
@ -651,9 +648,9 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
summary := util.MapStr{} summary := util.MapStr{}
hit := response.Hits.Hits[0].Source hit := response.Hits.Hits[0].Source
var ( var (
shardsNum int shardsNum int
replicasNum int replicasNum int
indexInfo = util.MapStr{ indexInfo = util.MapStr{
"index": parts[1], "index": parts[1],
} }
) )
@ -683,7 +680,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
storeInBytes, _ := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "store", "size_in_bytes"}, resultM) storeInBytes, _ := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "store", "size_in_bytes"}, resultM)
if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "docs", "count"}, resultM); ok { if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "docs", "count"}, resultM); ok {
//summary["docs"] = docs //summary["docs"] = docs
if v, ok := docs.(float64); ok && primary == true{ if v, ok := docs.(float64); ok && primary == true {
shardSum.DocsCount += int64(v) shardSum.DocsCount += int64(v)
} }
} }
@ -695,7 +692,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
} }
if primary == true { if primary == true {
shardSum.Shards++ shardSum.Shards++
}else{ } else {
shardSum.Replicas++ shardSum.Replicas++
} }
} }
@ -706,7 +703,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
indexInfo["store_size"] = util.FormatBytes(float64(shardSum.StoreInBytes), 1) indexInfo["store_size"] = util.FormatBytes(float64(shardSum.StoreInBytes), 1)
indexInfo["shards"] = shardSum.Shards + shardSum.Replicas indexInfo["shards"] = shardSum.Shards + shardSum.Replicas
summary["unassigned_shards"] = (replicasNum + 1) * shardsNum - shardSum.Shards - shardSum.Replicas summary["unassigned_shards"] = (replicasNum+1)*shardsNum - shardSum.Shards - shardSum.Replicas
} }
summary["index_info"] = indexInfo summary["index_info"] = indexInfo
@ -721,7 +718,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
} }
indexName := ps.MustGetParameter("index") indexName := ps.MustGetParameter("index")
q1 := orm.Query{ q1 := orm.Query{
Size: 1000, Size: 1000,
WildcardIndex: true, WildcardIndex: true,
} }
clusterUUID, err := adapter.GetClusterUUID(clusterID) clusterUUID, err := adapter.GetClusterUUID(clusterID)
@ -742,7 +739,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
err, result := orm.Search(&event.Event{}, &q1) err, result := orm.Search(&event.Event{}, &q1)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
h.WriteError(w,err.Error(), http.StatusInternalServerError ) h.WriteError(w, err.Error(), http.StatusInternalServerError)
return return
} }
var shards = []interface{}{} var shards = []interface{}{}
@ -756,7 +753,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
err, nodesResult := orm.Search(elastic.NodeConfig{}, q) err, nodesResult := orm.Search(elastic.NodeConfig{}, q)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
h.WriteError(w,err.Error(), http.StatusInternalServerError ) h.WriteError(w, err.Error(), http.StatusInternalServerError)
return return
} }
nodeIDToName := util.MapStr{} nodeIDToName := util.MapStr{}
@ -803,7 +800,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
primary, _ := shardM.GetValue("routing.primary") primary, _ := shardM.GetValue("routing.primary")
if primary == true { if primary == true {
shardInfo["prirep"] = "p" shardInfo["prirep"] = "p"
}else{ } else {
shardInfo["prirep"] = "r" shardInfo["prirep"] = "r"
} }
shardInfo["state"], _ = shardM.GetValue("routing.state") shardInfo["state"], _ = shardM.GetValue("routing.state")
@ -880,11 +877,11 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
var metricType string var metricType string
if metricKey == v1.IndexHealthMetricKey { if metricKey == v1.IndexHealthMetricKey {
metricType = v1.MetricTypeClusterHealth metricType = v1.MetricTypeClusterHealth
}else{ } else {
//for agent mode //for agent mode
metricType = v1.MetricTypeNodeStats metricType = v1.MetricTypeNodeStats
} }
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, metricType,60) bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, metricType, 60)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err resBody["error"] = err
@ -892,7 +889,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
return return
} }
if bucketSize <= 60 { if bucketSize <= 60 {
min = min - int64(2 * bucketSize * 1000) min = min - int64(2*bucketSize*1000)
} }
timeout := h.GetParameterOrDefault(req, "timeout", "60s") timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout) du, err := time.ParseDuration(timeout)
@ -947,14 +944,14 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
return return
} }
metrics["shard_state"] = shardStateMetric metrics["shard_state"] = shardStateMetric
}else if metricKey == v1.IndexHealthMetricKey { } else if metricKey == v1.IndexHealthMetricKey {
healthMetric, err := h.GetIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize) healthMetric, err := h.GetIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
h.WriteError(w, err, http.StatusInternalServerError) h.WriteError(w, err, http.StatusInternalServerError)
return return
} }
metrics["index_health"] = healthMetric metrics["index_health"] = healthMetric
} else { } else {
switch metricKey { switch metricKey {
case v1.IndexThroughputMetricKey: case v1.IndexThroughputMetricKey:
@ -1037,7 +1034,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, metricType) minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, metricType)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[metricKey].MinBucketSize = int64(minBucketSize) metrics[metricKey].MinBucketSize = int64(minBucketSize)
} }
} }
@ -1047,8 +1044,8 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
h.WriteJSON(w, resBody, http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1101,14 +1098,14 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"groups": util.MapStr{ "groups": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "payload.elasticsearch.shard_stats.routing.state", "field": "payload.elasticsearch.shard_stats.routing.state",
"size": 10, "size": 10,
}, },
}, },
}, },
@ -1122,8 +1119,8 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str
return nil, err return nil, err
} }
metricItem:=newMetricItem("shard_state", 0, "") metricItem := newMetricItem("shard_state", 0, "")
metricItem.AddLine("Shard State","Shard State","","group1","payload.elasticsearch.shard_stats.routing.state","max",bucketSizeStr,"","ratio","0.[00]","0.[00]",false,false) metricItem.AddLine("Shard State", "Shard State", "", "group1", "payload.elasticsearch.shard_stats.routing.state", "max", bucketSizeStr, "", "ratio", "0.[00]", "0.[00]", false, false)
metricData := []interface{}{} metricData := []interface{}{}
if response.StatusCode == 200 { if response.StatusCode == 200 {
@ -1140,7 +1137,7 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str
} }
func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{} resBody := map[string]interface{}{}
id := ps.ByName("id") id := ps.ByName("id")
indexName := ps.ByName("index") indexName := ps.ByName("index")
if !h.IsIndexAllowed(req, id, indexName) { if !h.IsIndexAllowed(req, id, indexName) {
@ -1149,7 +1146,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
}, http.StatusForbidden) }, http.StatusForbidden)
return return
} }
q := &orm.Query{ Size: 1} q := &orm.Query{Size: 1}
q.AddSort("timestamp", orm.DESC) q.AddSort("timestamp", orm.DESC)
q.Conds = orm.And( q.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -1161,13 +1158,13 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
err, result := orm.Search(event.Event{}, q) err, result := orm.Search(event.Event{}, q)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
namesM := util.MapStr{} namesM := util.MapStr{}
if len(result.Result) > 0 { if len(result.Result) > 0 {
if data, ok := result.Result[0].(map[string]interface{}); ok { if data, ok := result.Result[0].(map[string]interface{}); ok {
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists {
if table, ok := routingTable.(map[string]interface{}); ok{ if table, ok := routingTable.(map[string]interface{}); ok {
if shardsM, ok := table["shards"].(map[string]interface{}); ok { if shardsM, ok := table["shards"].(map[string]interface{}); ok {
for _, rows := range shardsM { for _, rows := range shardsM {
if rowsArr, ok := rows.([]interface{}); ok { if rowsArr, ok := rows.([]interface{}); ok {
@ -1189,12 +1186,12 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
} }
//node uuid //node uuid
nodeIds := make([]interface{}, 0, len(namesM) ) nodeIds := make([]interface{}, 0, len(namesM))
for name, _ := range namesM { for name, _ := range namesM {
nodeIds = append(nodeIds, name) nodeIds = append(nodeIds, name)
} }
q1 := &orm.Query{ Size: 100} q1 := &orm.Query{Size: 100}
q1.AddSort("timestamp", orm.DESC) q1.AddSort("timestamp", orm.DESC)
q1.Conds = orm.And( q1.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -1204,7 +1201,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
err, result = orm.Search(elastic.NodeConfig{}, q1) err, result = orm.Search(elastic.NodeConfig{}, q1)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
nodes := []interface{}{} nodes := []interface{}{}
for _, hit := range result.Result { for _, hit := range result.Result {
@ -1224,11 +1221,11 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
if v, ok := nodeId.(string); ok { if v, ok := nodeId.(string); ok {
ninfo := util.MapStr{ ninfo := util.MapStr{
"id": v, "id": v,
"name": nodeName, "name": nodeName,
"ip": ip, "ip": ip,
"port": port, "port": port,
"status": status, "status": status,
"timestamp": hitM["timestamp"], "timestamp": hitM["timestamp"],
} }
nodes = append(nodes, ninfo) nodes = append(nodes, ninfo)
@ -1249,7 +1246,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
} }
var must = []util.MapStr{} var must = []util.MapStr{}
if !util.StringInArray(ids, "*"){ if !util.StringInArray(ids, "*") {
must = append(must, util.MapStr{ must = append(must, util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
@ -1260,9 +1257,8 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
if keyword != "" { if keyword != "" {
must = append(must, util.MapStr{ must = append(must, util.MapStr{
"wildcard":util.MapStr{ "wildcard": util.MapStr{
"metadata.index_name": "metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
}, },
}) })
} }
@ -1288,7 +1284,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
}, },
} }
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
indexName := orm.GetIndexName(elastic.IndexConfig{}) indexName := orm.GetIndexName(elastic.IndexConfig{})
resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl)) resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl))
@ -1310,22 +1305,22 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
return return
} }
//deleteIndexMetadata used to delete index metadata after index is deleted from cluster // deleteIndexMetadata used to delete index metadata after index is deleted from cluster
func (h APIHandler) deleteIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h APIHandler) deleteIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
indexName := orm.GetIndexName(elastic.IndexConfig{}) indexName := orm.GetIndexName(elastic.IndexConfig{})
must := []util.MapStr{ must := []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
"metadata.labels.state": "delete", "metadata.labels.state": "delete",
}, },
}, },
} }
if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri { if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri {
if indexFilter != nil { if indexFilter != nil {
must = append(must, indexFilter) must = append(must, indexFilter)
} }
}else{ } else {
//has no any index permission, just return //has no any index permission, just return
h.WriteAckOKJSON(w) h.WriteAckOKJSON(w)
return return

View File

@ -27,6 +27,13 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"math"
"net/http"
"strconv"
"strings"
"sync"
"time"
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
"infini.sh/console/core" "infini.sh/console/core"
v1 "infini.sh/console/modules/elastic/api/v1" v1 "infini.sh/console/modules/elastic/api/v1"
@ -39,12 +46,6 @@ import (
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common" "infini.sh/framework/modules/elastic/common"
"math"
"net/http"
"strconv"
"strings"
"sync"
"time"
) )
type APIHandler struct { type APIHandler struct {
@ -534,13 +535,13 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
key := h.GetParameter(req, "key") key := h.GetParameter(req, "key")
var metricType string var metricType string
switch key { switch key {
case v1.IndexThroughputMetricKey, v1.SearchThroughputMetricKey, v1.IndexLatencyMetricKey, v1.SearchLatencyMetricKey, CircuitBreakerMetricKey,ShardStateMetricKey: case v1.IndexThroughputMetricKey, v1.SearchThroughputMetricKey, v1.IndexLatencyMetricKey, v1.SearchLatencyMetricKey, CircuitBreakerMetricKey, ShardStateMetricKey:
metricType = v1.MetricTypeNodeStats metricType = v1.MetricTypeNodeStats
case ClusterDocumentsMetricKey, case ClusterDocumentsMetricKey,
ClusterStorageMetricKey, ClusterStorageMetricKey,
ClusterIndicesMetricKey, ClusterIndicesMetricKey,
ClusterNodeCountMetricKey: ClusterNodeCountMetricKey:
metricType = v1.MetricTypeClusterStats metricType = v1.MetricTypeClusterStats
case ClusterHealthMetricKey: case ClusterHealthMetricKey:
metricType = v1.MetricTypeClusterStats metricType = v1.MetricTypeClusterStats
case ShardCountMetricKey: case ShardCountMetricKey:
@ -649,7 +650,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
minBucketSize, err := v1.GetMetricMinBucketSize(id, metricType) minBucketSize, err := v1.GetMetricMinBucketSize(id, metricType)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[key].MinBucketSize = int64(minBucketSize) metrics[key].MinBucketSize = int64(minBucketSize)
} }
} }
@ -700,7 +701,7 @@ func (h *APIHandler) HandleNodeMetricsAction(w http.ResponseWriter, req *http.Re
minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats) minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[key].MinBucketSize = int64(minBucketSize) metrics[key].MinBucketSize = int64(minBucketSize)
} }
} }
@ -817,7 +818,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
} }
} }
}else{ } else {
metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, key) metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, key)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -830,7 +831,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats) minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[key].MinBucketSize = int64(minBucketSize) metrics[key].MinBucketSize = int64(minBucketSize)
} }
} }
@ -888,7 +889,7 @@ func (h *APIHandler) HandleQueueMetricsAction(w http.ResponseWriter, req *http.R
minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats) minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[key].MinBucketSize = int64(minBucketSize) metrics[key].MinBucketSize = int64(minBucketSize)
} }
} }
@ -1015,20 +1016,20 @@ const (
) )
const ( const (
ClusterStorageMetricKey = "cluster_storage" ClusterStorageMetricKey = "cluster_storage"
ClusterDocumentsMetricKey = "cluster_documents" ClusterDocumentsMetricKey = "cluster_documents"
ClusterIndicesMetricKey = "cluster_indices" ClusterIndicesMetricKey = "cluster_indices"
ClusterNodeCountMetricKey = "node_count" ClusterNodeCountMetricKey = "node_count"
ClusterHealthMetricKey = "cluster_health" ClusterHealthMetricKey = "cluster_health"
ShardCountMetricKey = "shard_count" ShardCountMetricKey = "shard_count"
CircuitBreakerMetricKey = "circuit_breaker" CircuitBreakerMetricKey = "circuit_breaker"
) )
func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) { func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
var ( var (
clusterMetricsResult = map[string]*common.MetricItem {} clusterMetricsResult = map[string]*common.MetricItem{}
err error err error
) )
switch metricKey { switch metricKey {
case ClusterDocumentsMetricKey, case ClusterDocumentsMetricKey,
@ -1343,7 +1344,7 @@ func (h *APIHandler) getCircuitBreakerMetric(ctx context.Context, id string, min
"query": util.MapStr{ "query": util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"should": should, "should": should,
"must": []util.MapStr{ "must": []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{

View File

@ -112,7 +112,7 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) { func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
queryDSL := util.MustToJSONBytes(query) queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(),nil, queryDSL) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -205,12 +205,12 @@ func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{
dataKey = dataKey + "_deriv" dataKey = dataKey + "_deriv"
} }
line.Data = grpMetricData[dataKey][line.Metric.Label] line.Data = grpMetricData[dataKey][line.Metric.Label]
if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 {
// remove first metric dot // remove first metric dot
temp := v[1:] temp := v[1:]
// // remove first last dot // // remove first last dot
if len(temp) > 0 { if len(temp) > 0 {
temp = temp[0: len(temp)-1] temp = temp[0 : len(temp)-1]
} }
line.Data = temp line.Data = temp
} }
@ -369,9 +369,9 @@ func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common
}, },
} }
queryDSL := util.MustToJSONBytes(query) queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var minDate, maxDate int64 var minDate, maxDate int64
@ -429,12 +429,12 @@ func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common
for _, line := range metricItem.Lines { for _, line := range metricItem.Lines {
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate} line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
line.Data = metricData[line.Metric.GetDataKey()] line.Data = metricData[line.Metric.GetDataKey()]
if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 {
// remove first metric dot // remove first metric dot
temp := v[1:] temp := v[1:]
// // remove first last dot // // remove first last dot
if len(temp) > 0 { if len(temp) > 0 {
temp = temp[0: len(temp)-1] temp = temp[0 : len(temp)-1]
} }
line.Data = temp line.Data = temp
} }
@ -912,13 +912,13 @@ func parseGroupMetricData(buckets []elastic.BucketBase, isPercent bool) ([]inter
if bkMap, ok := statusBk.(map[string]interface{}); ok { if bkMap, ok := statusBk.(map[string]interface{}); ok {
statusKey := bkMap["key"].(string) statusKey := bkMap["key"].(string)
count := bkMap["doc_count"].(float64) count := bkMap["doc_count"].(float64)
if isPercent{ if isPercent {
metricData = append(metricData, map[string]interface{}{ metricData = append(metricData, map[string]interface{}{
"x": dateTime, "x": dateTime,
"y": count / totalCount * 100, "y": count / totalCount * 100,
"g": statusKey, "g": statusKey,
}) })
}else{ } else {
metricData = append(metricData, map[string]interface{}{ metricData = append(metricData, map[string]interface{}{
"x": dateTime, "x": dateTime,
"y": count, "y": count,
@ -950,12 +950,12 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
"field": line.Metric.Field, "field": line.Metric.Field,
}, },
} }
var sumBucketPath = "term_node>"+ line.Metric.ID var sumBucketPath = "term_node>" + line.Metric.ID
aggs[line.Metric.ID] = leafAgg aggs[line.Metric.ID] = leafAgg
sumAggs[line.Metric.ID] = util.MapStr{ sumAggs[line.Metric.ID] = util.MapStr{
"sum_bucket": util.MapStr{ "sum_bucket": util.MapStr{
"buckets_path": sumBucketPath, "buckets_path": sumBucketPath,
}, },
} }
if line.Metric.Field2 != "" { if line.Metric.Field2 != "" {
@ -966,9 +966,9 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
} }
aggs[line.Metric.ID+"_field2"] = leafAgg2 aggs[line.Metric.ID+"_field2"] = leafAgg2
sumAggs[line.Metric.ID + "_field2"] = util.MapStr{ sumAggs[line.Metric.ID+"_field2"] = util.MapStr{
"sum_bucket": util.MapStr{ "sum_bucket": util.MapStr{
"buckets_path": sumBucketPath+"_field2", "buckets_path": sumBucketPath + "_field2",
}, },
} }
} }
@ -991,10 +991,10 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
} }
} }
sumAggs["term_node"]= util.MapStr{ sumAggs["term_node"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.node_id", "field": "metadata.labels.node_id",
"size": 1000, "size": 1000,
}, },
"aggs": aggs, "aggs": aggs,
} }
@ -1015,7 +1015,7 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
"aggs": sumAggs, "aggs": sumAggs,
}, },
} }
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap) return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize, metricData, metricItemsMap)
} }
func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) (map[string]*common.MetricItem, error) { func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) (map[string]*common.MetricItem, error) {
@ -1035,11 +1035,11 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
"field": line.Metric.Field, "field": line.Metric.Field,
}, },
} }
var sumBucketPath = "term_shard>"+ line.Metric.ID var sumBucketPath = "term_shard>" + line.Metric.ID
aggs[line.Metric.ID] = leafAgg aggs[line.Metric.ID] = leafAgg
sumAggs[line.Metric.ID] = util.MapStr{ sumAggs[line.Metric.ID] = util.MapStr{
"sum_bucket": util.MapStr{ "sum_bucket": util.MapStr{
"buckets_path": sumBucketPath, "buckets_path": sumBucketPath,
}, },
} }
if line.Metric.Field2 != "" { if line.Metric.Field2 != "" {
@ -1050,9 +1050,9 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
} }
aggs[line.Metric.ID+"_field2"] = leafAgg2 aggs[line.Metric.ID+"_field2"] = leafAgg2
sumAggs[line.Metric.ID + "_field2"] = util.MapStr{ sumAggs[line.Metric.ID+"_field2"] = util.MapStr{
"sum_bucket": util.MapStr{ "sum_bucket": util.MapStr{
"buckets_path": sumBucketPath+"_field2", "buckets_path": sumBucketPath + "_field2",
}, },
} }
} }
@ -1075,10 +1075,10 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
} }
} }
sumAggs["term_shard"]= util.MapStr{ sumAggs["term_shard"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.shard_id", "field": "metadata.labels.shard_id",
"size": 100000, "size": 100000,
}, },
"aggs": aggs, "aggs": aggs,
} }
@ -1092,7 +1092,7 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
if len(metricItems) > 0 && len(metricItems[0].Lines) > 0 && metricItems[0].Lines[0].Metric.OnlyPrimary { if len(metricItems) > 0 && len(metricItems[0].Lines) > 0 && metricItems[0].Lines[0].Metric.OnlyPrimary {
query["query"] = util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": []util.MapStr{ "must": []util.MapStr{
query["query"].(util.MapStr), query["query"].(util.MapStr),
{"term": util.MapStr{"payload.elasticsearch.shard_stats.routing.primary": true}}, {"term": util.MapStr{"payload.elasticsearch.shard_stats.routing.primary": true}},
}, },
@ -1109,7 +1109,7 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
"aggs": sumAggs, "aggs": sumAggs,
}, },
} }
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap) return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize, metricData, metricItemsMap)
} }
func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) (map[string]*common.MetricItem, error) { func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) (map[string]*common.MetricItem, error) {
@ -1174,12 +1174,12 @@ func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems
for _, line := range metricItem.Lines { for _, line := range metricItem.Lines {
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate} line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
line.Data = metricData[line.Metric.GetDataKey()] line.Data = metricData[line.Metric.GetDataKey()]
if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 { if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 {
// remove first metric dot // remove first metric dot
temp := v[1:] temp := v[1:]
// // remove first last dot // // remove first last dot
if len(temp) > 0 { if len(temp) > 0 {
temp = temp[0: len(temp)-1] temp = temp[0 : len(temp)-1]
} }
line.Data = temp line.Data = temp
} }
@ -1190,4 +1190,4 @@ func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems
} }
return result, nil return result, nil
} }

View File

@ -33,83 +33,81 @@ import (
) )
func TestGetMetricParams(t *testing.T) { func TestGetMetricParams(t *testing.T) {
handler:=APIHandler{} handler := APIHandler{}
req, err :=http.NewRequest("GET","https://infinilabs.com/api/?bucket_size=1m",nil) req, err := http.NewRequest("GET", "https://infinilabs.com/api/?bucket_size=1m", nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
bucketSize, min, max, err:=handler.GetMetricRangeAndBucketSize(req,"", "",15) bucketSize, min, max, err := handler.GetMetricRangeAndBucketSize(req, "", "", 15)
fmt.Println(bucketSize) fmt.Println(bucketSize)
fmt.Println(util.FormatUnixTimestamp(min/1000))//2022-01-27 15:28:57 fmt.Println(util.FormatUnixTimestamp(min / 1000)) //2022-01-27 15:28:57
fmt.Println(util.FormatUnixTimestamp(max/1000))//2022-01-27 15:28:57 fmt.Println(util.FormatUnixTimestamp(max / 1000)) //2022-01-27 15:28:57
fmt.Println(time.Now())//2022-01-27 15:28:57 fmt.Println(time.Now()) //2022-01-27 15:28:57
fmt.Println(bucketSize, min, max, err) fmt.Println(bucketSize, min, max, err)
} }
func TestConvertBucketItemsToAggQueryParams(t *testing.T) { func TestConvertBucketItemsToAggQueryParams(t *testing.T) {
bucketItem:=common.BucketItem{} bucketItem := common.BucketItem{}
bucketItem.Key="key1" bucketItem.Key = "key1"
bucketItem.Type=common.TermsBucket bucketItem.Type = common.TermsBucket
bucketItem.Parameters=map[string]interface{}{} bucketItem.Parameters = map[string]interface{}{}
bucketItem.Parameters["field"]="metadata.labels.cluster_id" bucketItem.Parameters["field"] = "metadata.labels.cluster_id"
bucketItem.Parameters["size"]=2 bucketItem.Parameters["size"] = 2
nestBucket := common.BucketItem{}
nestBucket.Key = "key2"
nestBucket.Type = common.DateHistogramBucket
nestBucket.Parameters = map[string]interface{}{}
nestBucket.Parameters["field"] = "timestamp"
nestBucket.Parameters["calendar_interval"] = "1d"
nestBucket.Parameters["time_zone"] = "+08:00"
nestBucket:=common.BucketItem{} leafBucket := common.NewBucketItem(common.TermsBucket, util.MapStr{
nestBucket.Key="key2" "size": 5,
nestBucket.Type=common.DateHistogramBucket "field": "payload.elasticsearch.cluster_health.status",
nestBucket.Parameters=map[string]interface{}{}
nestBucket.Parameters["field"]="timestamp"
nestBucket.Parameters["calendar_interval"]="1d"
nestBucket.Parameters["time_zone"]="+08:00"
leafBucket:=common.NewBucketItem(common.TermsBucket,util.MapStr{
"size":5,
"field":"payload.elasticsearch.cluster_health.status",
}) })
leafBucket.Key="key3" leafBucket.Key = "key3"
metricItems:=[]*common.MetricItem{} metricItems := []*common.MetricItem{}
var bucketSizeStr ="10s" var bucketSizeStr = "10s"
metricItem:=newMetricItem("cluster_summary", 2, "cluster") metricItem := newMetricItem("cluster_summary", 2, "cluster")
metricItem.Key="key4" metricItem.Key = "key4"
metricItem.AddLine("Indexing","Total Indexing","Number of documents being indexed for primary and replica shards.","group1", metricItem.AddLine("Indexing", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1",
"payload.elasticsearch.index_stats.total.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true) "payload.elasticsearch.index_stats.total.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.AddLine("Search","Total Search","Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!","group1", metricItem.AddLine("Search", "Total Search", "Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!", "group1",
"payload.elasticsearch.index_stats.total.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true) "payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
nestBucket.AddNestBucket(leafBucket) nestBucket.AddNestBucket(leafBucket)
nestBucket.Metrics=metricItems nestBucket.Metrics = metricItems
bucketItem.Buckets=[]*common.BucketItem{} bucketItem.Buckets = []*common.BucketItem{}
bucketItem.Buckets=append(bucketItem.Buckets,&nestBucket) bucketItem.Buckets = append(bucketItem.Buckets, &nestBucket)
aggs := ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem}, nil)
aggs:=ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem},nil)
fmt.Println(util.MustToJSON(aggs)) fmt.Println(util.MustToJSON(aggs))
response:="{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }" response := "{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }"
res:=SearchResponse{} res := SearchResponse{}
util.FromJSONBytes([]byte(response),&res) util.FromJSONBytes([]byte(response), &res)
fmt.Println(response) fmt.Println(response)
groupKey:="key1" groupKey := "key1"
metricLabelKey:="key2" metricLabelKey := "key2"
metricValueKey:="c7qi5hii4h935v9bs920" metricValueKey := "c7qi5hii4h935v9bs920"
data:=ParseAggregationResult(int(10),res.Aggregations,groupKey,metricLabelKey,metricValueKey) data := ParseAggregationResult(int(10), res.Aggregations, groupKey, metricLabelKey, metricValueKey)
fmt.Println(data) fmt.Println(data)
} }
func TestConvertBucketItems(t *testing.T) { func TestConvertBucketItems(t *testing.T) {
response:="{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }" response := "{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }"
res:=SearchResponse{} res := SearchResponse{}
util.FromJSONBytes([]byte(response),&res) util.FromJSONBytes([]byte(response), &res)
data:=ParseAggregationBucketResult(int(10),res.Aggregations,"c7v2gm3i7638vvo4pv80","c7v2gm3i7638vvo4pv8g","c7v2gm3i7638vvo4pv90", func() { data := ParseAggregationBucketResult(int(10), res.Aggregations, "c7v2gm3i7638vvo4pv80", "c7v2gm3i7638vvo4pv8g", "c7v2gm3i7638vvo4pv90", func() {
}) })

File diff suppressed because it is too large Load Diff

View File

@ -45,40 +45,40 @@ import (
) )
func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody:=util.MapStr{} resBody := util.MapStr{}
reqBody := struct{ reqBody := struct {
Keyword string `json:"keyword"` Keyword string `json:"keyword"`
Size int `json:"size"` Size int `json:"size"`
From int `json:"from"` From int `json:"from"`
Aggregations []elastic.SearchAggParam `json:"aggs"` Aggregations []elastic.SearchAggParam `json:"aggs"`
Highlight elastic.SearchHighlightParam `json:"highlight"` Highlight elastic.SearchHighlightParam `json:"highlight"`
Filter elastic.SearchFilterParam `json:"filter"` Filter elastic.SearchFilterParam `json:"filter"`
Sort []string `json:"sort"` Sort []string `json:"sort"`
SearchField string `json:"search_field"` SearchField string `json:"search_field"`
}{} }{}
err := h.DecodeJSON(req, &reqBody) err := h.DecodeJSON(req, &reqBody)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations) aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
aggs["term_cluster_id"] = util.MapStr{ aggs["term_cluster_id"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.cluster_id", "field": "metadata.cluster_id",
"size": 1000, "size": 1000,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"term_cluster_name": util.MapStr{ "term_cluster_name": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.cluster_name", "field": "metadata.cluster_name",
"size": 1, "size": 1,
}, },
}, },
}, },
} }
var should =[]util.MapStr{} var should = []util.MapStr{}
if reqBody.SearchField != ""{ if reqBody.SearchField != "" {
should = []util.MapStr{ should = []util.MapStr{
{ {
"prefix": util.MapStr{ "prefix": util.MapStr{
@ -101,7 +101,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
}, },
}, },
} }
}else{ } else {
should = []util.MapStr{ should = []util.MapStr{
{ {
"prefix": util.MapStr{ "prefix": util.MapStr{
@ -143,30 +143,25 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
} }
clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id") clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id")
if !hasPrivilege && clusterFilter == nil { if !hasPrivilege && clusterFilter == nil {
h.WriteJSON(w, elastic.SearchResponse{ h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
}, http.StatusOK)
return return
} }
must := []interface{}{ must := []interface{}{}
}
if !hasPrivilege && clusterFilter != nil { if !hasPrivilege && clusterFilter != nil {
must = append(must, clusterFilter) must = append(must, clusterFilter)
} }
query := util.MapStr{ query := util.MapStr{
"aggs": aggs, "aggs": aggs,
"size": reqBody.Size, "size": reqBody.Size,
"from": reqBody.From, "from": reqBody.From,
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight), "highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
"query": util.MapStr{ "query": util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"filter": elastic.BuildSearchTermFilter(reqBody.Filter), "filter": elastic.BuildSearchTermFilter(reqBody.Filter),
"should": should, "should": should,
"must": must, "must": must,
}, },
}, },
"sort": []util.MapStr{ "sort": []util.MapStr{
@ -178,7 +173,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
}, },
} }
if len(reqBody.Sort) > 1 { if len(reqBody.Sort) > 1 {
query["sort"] = []util.MapStr{ query["sort"] = []util.MapStr{
{ {
reqBody.Sort[0]: util.MapStr{ reqBody.Sort[0]: util.MapStr{
"order": reqBody.Sort[1], "order": reqBody.Sort[1],
@ -190,7 +185,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
w.Write(util.MustToJSONBytes(response)) w.Write(util.MustToJSONBytes(response))
@ -299,7 +294,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
source["shard_info"] = shardInfo source["shard_info"] = shardInfo
} }
if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok { if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok {
if clusterID, ok := tempClusterID.(string); ok { if clusterID, ok := tempClusterID.(string); ok {
if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil { if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil {
source["is_master_node"] = meta.ClusterState.MasterNode == nodeID source["is_master_node"] = meta.ClusterState.MasterNode == nodeID
} }
@ -317,28 +312,28 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
return return
} }
// 索引速率 // 索引速率
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems := []GroupMetricItem{} nodeMetricItems := []GroupMetricItem{}
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "indexing", Key: "indexing",
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: indexMetric, MetricItem: indexMetric,
FormatType: "num", FormatType: "num",
Units: "Indexing/s", Units: "Indexing/s",
}) })
queryMetric:=newMetricItem("search", 2, OperationGroupKey) queryMetric := newMetricItem("search", 2, OperationGroupKey)
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "search", Key: "search",
Field: "payload.elasticsearch.node_stats.indices.search.query_total", Field: "payload.elasticsearch.node_stats.indices.search.query_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryMetric, MetricItem: queryMetric,
FormatType: "num", FormatType: "num",
Units: "Search/s", Units: "Search/s",
}) })
bucketSize := GetMinBucketSize() bucketSize := GetMinBucketSize()
@ -346,11 +341,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
bucketSize = 60 bucketSize = 60
} }
var metricLen = 15 var metricLen = 15
aggs:=map[string]interface{}{} aggs := map[string]interface{}{}
query=map[string]interface{}{} query = map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": []util.MapStr{ "must": []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
"metadata.category": util.MapStr{ "metadata.category": util.MapStr{
@ -375,7 +370,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
}, },
}, },
}, },
@ -383,15 +378,15 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
}, },
} }
for _,metricItem:=range nodeMetricItems{ for _, metricItem := range nodeMetricItems {
aggs[metricItem.ID]=util.MapStr{ aggs[metricItem.ID] = util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field, "field": metricItem.Field,
}, },
} }
if metricItem.IsDerivative{ if metricItem.IsDerivative {
aggs[metricItem.ID+"_deriv"]=util.MapStr{ aggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID, "buckets_path": metricItem.ID,
}, },
} }
@ -403,8 +398,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
if err != nil { if err != nil {
panic(err) panic(err)
} }
query["size"]=0 query["size"] = 0
query["aggs"]= util.MapStr{ query["aggs"] = util.MapStr{
"group_by_level": util.MapStr{ "group_by_level": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.node_id", "field": "metadata.labels.node_id",
@ -412,11 +407,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram":util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs":aggs, "aggs": aggs,
}, },
}, },
}, },
@ -430,9 +425,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
indexMetrics := map[string]util.MapStr{} indexMetrics := map[string]util.MapStr{}
for key, item := range metrics { for key, item := range metrics {
for _, line := range item.Lines { for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok{ if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{ indexMetrics[line.Metric.Label] = util.MapStr{}
}
} }
indexMetrics[line.Metric.Label][key] = line.Data indexMetrics[line.Metric.Label][key] = line.Data
} }
@ -493,7 +487,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
// return // return
//} //}
q1 := orm.Query{ q1 := orm.Query{
Size: 1, Size: 1,
WildcardIndex: true, WildcardIndex: true,
} }
q1.Conds = orm.And( q1.Conds = orm.And(
@ -518,7 +512,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
tt, _ := time.Parse(time.RFC3339, ts) tt, _ := time.Parse(time.RFC3339, ts)
if time.Now().Sub(tt).Seconds() > 30 { if time.Now().Sub(tt).Seconds() > 30 {
kvs["status"] = "unavailable" kvs["status"] = "unavailable"
}else{ } else {
kvs["status"] = "available" kvs["status"] = "available"
} }
} }
@ -536,9 +530,9 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult) jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult)
if ok { if ok {
if jvmVal, ok := jvm.(map[string]interface{});ok { if jvmVal, ok := jvm.(map[string]interface{}); ok {
kvs["jvm"] = util.MapStr{ kvs["jvm"] = util.MapStr{
"mem": jvmVal["mem"], "mem": jvmVal["mem"],
"uptime": jvmVal["uptime_in_millis"], "uptime": jvmVal["uptime_in_millis"],
} }
} }
@ -559,7 +553,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
} }
} }
} }
if len( response.Hits.Hits) > 0 { if len(response.Hits.Hits) > 0 {
hit := response.Hits.Hits[0] hit := response.Hits.Hits[0]
innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source) innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source)
if mp, ok := innerMetaData.(map[string]interface{}); ok { if mp, ok := innerMetaData.(map[string]interface{}); ok {
@ -593,15 +587,15 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
} }
should := []util.MapStr{ should := []util.MapStr{
{ {
"term":util.MapStr{ "term": util.MapStr{
"metadata.labels.cluster_id":util.MapStr{ "metadata.labels.cluster_id": util.MapStr{
"value": clusterID, "value": clusterID,
}, },
}, },
}, },
{ {
"term":util.MapStr{ "term": util.MapStr{
"metadata.labels.cluster_uuid":util.MapStr{ "metadata.labels.cluster_uuid": util.MapStr{
"value": clusterUUID, "value": clusterUUID,
}, },
}, },
@ -632,19 +626,19 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
}, },
} }
resBody := map[string]interface{}{} resBody := map[string]interface{}{}
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req,clusterID, v1.MetricTypeNodeStats,60) bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, v1.MetricTypeNodeStats, 60)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
query:=map[string]interface{}{} query := map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": must, "must": must,
"minimum_should_match": 1, "minimum_should_match": 1,
"should": should, "should": should,
"filter": []util.MapStr{ "filter": []util.MapStr{
{ {
"range": util.MapStr{ "range": util.MapStr{
@ -658,8 +652,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
}, },
} }
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems:=[]*common.MetricItem{} metricItems := []*common.MetricItem{}
metricKey := h.GetParameter(req, "key") metricKey := h.GetParameter(req, "key")
timeout := h.GetParameterOrDefault(req, "timeout", "60s") timeout := h.GetParameterOrDefault(req, "timeout", "60s")
du, err := time.ParseDuration(timeout) du, err := time.ParseDuration(timeout)
@ -679,13 +673,13 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
return return
} }
metrics["node_health"] = healthMetric metrics["node_health"] = healthMetric
}else if metricKey == ShardStateMetricKey { } else if metricKey == ShardStateMetricKey {
query = util.MapStr{ query = util.MapStr{
"size": 0, "size": 0,
"query": util.MapStr{ "query": util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"should": should, "should": should,
"must": []util.MapStr{ "must": []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
@ -729,74 +723,74 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
return return
} }
metrics["shard_state"] = shardStateMetric metrics["shard_state"] = shardStateMetric
}else{ } else {
switch metricKey { switch metricKey {
case NodeProcessCPUMetricKey: case NodeProcessCPUMetricKey:
metricItem:=newMetricItem("cpu", 1, SystemGroupKey) metricItem := newMetricItem("cpu", 1, SystemGroupKey)
metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) metricItem.AddAxi("cpu", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true)
metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("Process CPU", "Process CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.process.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("OS CPU", "OS CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.os.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
case NodeCPUJVMMetricKey: case NodeCPUJVMMetricKey:
metricItem := newMetricItem("jvm", 2, SystemGroupKey) metricItem := newMetricItem("jvm", 2, SystemGroupKey)
metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) metricItem.AddAxi("JVM Heap", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("Max Heap", "Max Heap", "JVM max Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("Used Heap", "Used Heap", "JVM used Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
case v1.IndexThroughputMetricKey: case v1.IndexThroughputMetricKey:
metricItem := newMetricItem("index_throughput", 3, OperationGroupKey) metricItem := newMetricItem("index_throughput", 3, OperationGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Indexing Rate", "Total Shards", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
case v1.SearchThroughputMetricKey: case v1.SearchThroughputMetricKey:
metricItem := newMetricItem("search_throughput", 4, OperationGroupKey) metricItem := newMetricItem("search_throughput", 4, OperationGroupKey)
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false) metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Search Rate","Total Shards", metricItem.AddLine("Search Rate", "Total Shards",
"Number of search requests being executed.", "Number of search requests being executed.",
"group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true) "group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
case v1.IndexLatencyMetricKey: case v1.IndexLatencyMetricKey:
metricItem := newMetricItem("index_latency", 5, LatencyGroupKey) metricItem := newMetricItem("index_latency", 5, LatencyGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total" metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total" metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
case v1.SearchLatencyMetricKey: case v1.SearchLatencyMetricKey:
metricItem := newMetricItem("search_latency", 6, LatencyGroupKey) metricItem := newMetricItem("search_latency", 6, LatencyGroupKey)
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false) metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total" metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total" metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total" metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
case ParentBreakerMetricKey: case ParentBreakerMetricKey:
metricItem := newMetricItem("parent_breaker", 8, SystemGroupKey) metricItem := newMetricItem("parent_breaker", 8, SystemGroupKey)
metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Parent Breaker Tripped", "Parent Breaker Tripped", "Rate of the circuit breaker has been triggered and prevented an out of memory error.", "group1", "payload.elasticsearch.node_stats.breakers.parent.tripped", "max", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
} }
metrics, err = h.getSingleMetrics(ctx, metricItems,query, bucketSize) metrics, err = h.getSingleMetrics(ctx, metricItems, query, bucketSize)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
h.WriteError(w, err, http.StatusInternalServerError) h.WriteError(w, err, http.StatusInternalServerError)
@ -808,7 +802,7 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, v1.MetricTypeNodeStats) minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, v1.MetricTypeNodeStats)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[metricKey].MinBucketSize = int64(minBucketSize) metrics[metricKey].MinBucketSize = int64(minBucketSize)
} }
} }
@ -818,8 +812,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
h.WriteJSON(w, resBody, http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -828,14 +822,14 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize
query["aggs"] = util.MapStr{ query["aggs"] = util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"groups": util.MapStr{ "groups": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "payload.elasticsearch.shard_stats.routing.state", "field": "payload.elasticsearch.shard_stats.routing.state",
"size": 10, "size": 10,
}, },
}, },
}, },
@ -848,8 +842,8 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize
return nil, err return nil, err
} }
metricItem:=newMetricItem("shard_state", 0, "") metricItem := newMetricItem("shard_state", 0, "")
metricItem.AddLine("Shard State","Shard State","","group1","payload.elasticsearch.shard_stats.routing.state","count",bucketSizeStr,"","ratio","0.[00]","0.[00]",false,false) metricItem.AddLine("Shard State", "Shard State", "", "group1", "payload.elasticsearch.shard_stats.routing.state", "count", bucketSizeStr, "", "ratio", "0.[00]", "0.[00]", false, false)
metricData := []interface{}{} metricData := []interface{}{}
if response.StatusCode == 200 { if response.StatusCode == 200 {
@ -864,8 +858,8 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize
return metricItem, nil return metricItem, nil
} }
func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -873,7 +867,7 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)
query["aggs"] = util.MapStr{ query["aggs"] = util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
@ -886,14 +880,14 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)
}, },
} }
queryDSL := util.MustToJSONBytes(query) queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
return nil, err return nil, err
} }
metricItem:=newMetricItem("node_health", 0, "") metricItem := newMetricItem("node_health", 0, "")
metricItem.AddLine("Node health","Node Health","","group1","payload.elasticsearch.node_stats.jvm.uptime_in_millis","min",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false) metricItem.AddLine("Node health", "Node Health", "", "group1", "payload.elasticsearch.node_stats.jvm.uptime_in_millis", "min", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false)
metricData := []interface{}{} metricData := []interface{}{}
if response.StatusCode == 200 { if response.StatusCode == 200 {
@ -923,7 +917,7 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)
return metricItem, nil return metricItem, nil
} }
func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, error){ func getNodeOnlineStatusOfRecentDay(nodeIDs []string) (map[string][]interface{}, error) {
q := orm.Query{ q := orm.Query{
WildcardIndex: true, WildcardIndex: true,
} }
@ -932,64 +926,64 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
"group_by_node_id": util.MapStr{ "group_by_node_id": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.node_id", "field": "metadata.labels.node_id",
"size": 100, "size": 100,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"uptime_histogram": util.MapStr{ "uptime_histogram": util.MapStr{
"date_range": util.MapStr{ "date_range": util.MapStr{
"field": "timestamp", "field": "timestamp",
"format": "yyyy-MM-dd", "format": "yyyy-MM-dd",
"time_zone": "+08:00", "time_zone": "+08:00",
"ranges": []util.MapStr{ "ranges": []util.MapStr{
{ {
"from": "now-13d/d", "from": "now-13d/d",
"to": "now-12d/d", "to": "now-12d/d",
}, { }, {
"from": "now-12d/d", "from": "now-12d/d",
"to": "now-11d/d", "to": "now-11d/d",
}, },
{ {
"from": "now-11d/d", "from": "now-11d/d",
"to": "now-10d/d", "to": "now-10d/d",
}, },
{ {
"from": "now-10d/d", "from": "now-10d/d",
"to": "now-9d/d", "to": "now-9d/d",
}, { }, {
"from": "now-9d/d", "from": "now-9d/d",
"to": "now-8d/d", "to": "now-8d/d",
}, },
{ {
"from": "now-8d/d", "from": "now-8d/d",
"to": "now-7d/d", "to": "now-7d/d",
}, },
{ {
"from": "now-7d/d", "from": "now-7d/d",
"to": "now-6d/d", "to": "now-6d/d",
}, },
{ {
"from": "now-6d/d", "from": "now-6d/d",
"to": "now-5d/d", "to": "now-5d/d",
}, { }, {
"from": "now-5d/d", "from": "now-5d/d",
"to": "now-4d/d", "to": "now-4d/d",
}, },
{ {
"from": "now-4d/d", "from": "now-4d/d",
"to": "now-3d/d", "to": "now-3d/d",
},{ }, {
"from": "now-3d/d", "from": "now-3d/d",
"to": "now-2d/d", "to": "now-2d/d",
}, { }, {
"from": "now-2d/d", "from": "now-2d/d",
"to": "now-1d/d", "to": "now-1d/d",
}, { }, {
"from": "now-1d/d", "from": "now-1d/d",
"to": "now/d", "to": "now/d",
}, },
{ {
"from": "now/d", "from": "now/d",
"to": "now", "to": "now",
}, },
}, },
}, },
@ -1018,7 +1012,7 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte":"now-15d", "gte": "now-15d",
"lte": "now", "lte": "now",
}, },
}, },
@ -1056,13 +1050,13 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
recentStatus[nodeKey] = []interface{}{} recentStatus[nodeKey] = []interface{}{}
if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok { if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok {
if bks, ok := histogramAgg["buckets"].([]interface{}); ok { if bks, ok := histogramAgg["buckets"].([]interface{}); ok {
for _, bkItem := range bks { for _, bkItem := range bks {
if bkVal, ok := bkItem.(map[string]interface{}); ok { if bkVal, ok := bkItem.(map[string]interface{}); ok {
if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok { if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok {
//mark node status as offline when uptime less than 10m //mark node status as offline when uptime less than 10m
if v, ok := minUptime.(float64); ok && v >= 600000 { if v, ok := minUptime.(float64); ok && v >= 600000 {
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"}) recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"})
}else{ } else {
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"}) recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"})
} }
} }
@ -1080,10 +1074,10 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
max = h.GetParameterOrDefault(req, "max", "now") max = h.GetParameterOrDefault(req, "max", "now")
) )
resBody := map[string] interface{}{} resBody := map[string]interface{}{}
id := ps.ByName("id") id := ps.ByName("id")
nodeUUID := ps.ByName("node_id") nodeUUID := ps.ByName("node_id")
q := &orm.Query{ Size: 1} q := &orm.Query{Size: 1}
q.AddSort("timestamp", orm.DESC) q.AddSort("timestamp", orm.DESC)
q.Conds = orm.And( q.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -1095,16 +1089,16 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
err, result := orm.Search(event.Event{}, q) err, result := orm.Search(event.Event{}, q)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
namesM := util.MapStr{} namesM := util.MapStr{}
if len(result.Result) > 0 { if len(result.Result) > 0 {
if data, ok := result.Result[0].(map[string]interface{}); ok { if data, ok := result.Result[0].(map[string]interface{}); ok {
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists {
if rows, ok := routingTable.([]interface{}); ok{ if rows, ok := routingTable.([]interface{}); ok {
for _, row := range rows { for _, row := range rows {
if v, ok := row.(map[string]interface{}); ok { if v, ok := row.(map[string]interface{}); ok {
if indexName, ok := v["index"].(string); ok{ if indexName, ok := v["index"].(string); ok {
namesM[indexName] = true namesM[indexName] = true
} }
} }
@ -1114,12 +1108,12 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
} }
} }
indexNames := make([]interface{}, 0, len(namesM) ) indexNames := make([]interface{}, 0, len(namesM))
for name, _ := range namesM { for name, _ := range namesM {
indexNames = append(indexNames, name) indexNames = append(indexNames, name)
} }
q1 := &orm.Query{ Size: 100} q1 := &orm.Query{Size: 100}
q1.AddSort("timestamp", orm.DESC) q1.AddSort("timestamp", orm.DESC)
q1.Conds = orm.And( q1.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -1130,28 +1124,29 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
err, result = orm.Search(elastic.IndexConfig{}, q1) err, result = orm.Search(elastic.IndexConfig{}, q1)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
indices, err := h.getLatestIndices(req, min, max, id, &result) indices, err := h.getLatestIndices(req, min, max, id, &result)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
h.WriteJSON(w, indices, http.StatusOK) h.WriteJSON(w, indices, http.StatusOK)
} }
type ShardsSummary struct { type ShardsSummary struct {
Index string `json:"index"` Index string `json:"index"`
Shards int `json:"shards"` Shards int `json:"shards"`
Replicas int `json:"replicas"` Replicas int `json:"replicas"`
DocsCount int64 `json:"docs_count"` DocsCount int64 `json:"docs_count"`
DocsDeleted int64 `json:"docs_deleted"` DocsDeleted int64 `json:"docs_deleted"`
StoreInBytes int64 `json:"store_in_bytes"` StoreInBytes int64 `json:"store_in_bytes"`
PriStoreInBytes int64 `json:"pri_store_in_bytes"` PriStoreInBytes int64 `json:"pri_store_in_bytes"`
Timestamp interface{} `json:"timestamp"` Timestamp interface{} `json:"timestamp"`
} }
func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, clusterID string, result *orm.Result) ([]interface{}, error) { func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, clusterID string, result *orm.Result) ([]interface{}, error) {
//filter indices //filter indices
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID) allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID)
@ -1165,7 +1160,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
query := util.MapStr{ query := util.MapStr{
"size": 10000, "size": 10000,
"_source": []string{"metadata.labels.index_name", "payload.elasticsearch.shard_stats.docs","payload.elasticsearch.shard_stats.store", "payload.elasticsearch.shard_stats.routing", "timestamp"}, "_source": []string{"metadata.labels.index_name", "payload.elasticsearch.shard_stats.docs", "payload.elasticsearch.shard_stats.store", "payload.elasticsearch.shard_stats.routing", "timestamp"},
"collapse": util.MapStr{ "collapse": util.MapStr{
"field": "metadata.labels.shard_id", "field": "metadata.labels.shard_id",
}, },
@ -1240,7 +1235,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
} }
if primary == true { if primary == true {
indexInfo.Shards++ indexInfo.Shards++
}else{ } else {
indexInfo.Replicas++ indexInfo.Replicas++
} }
indexInfo.Timestamp = hitM["timestamp"] indexInfo.Timestamp = hitM["timestamp"]
@ -1249,7 +1244,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
} }
indices := []interface{}{} indices := []interface{}{}
var indexPattern *radix.Pattern var indexPattern *radix.Pattern
if !hasAllPrivilege{ if !hasAllPrivilege {
indexPattern = radix.Compile(allowedIndices...) indexPattern = radix.Compile(allowedIndices...)
} }
@ -1273,21 +1268,21 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
} }
if indexInfos[v] != nil { if indexInfos[v] != nil {
indices = append(indices, util.MapStr{ indices = append(indices, util.MapStr{
"index": v, "index": v,
"status": state, "status": state,
"health": health, "health": health,
"timestamp": indexInfos[v].Timestamp, "timestamp": indexInfos[v].Timestamp,
"docs_count": indexInfos[v].DocsCount, "docs_count": indexInfos[v].DocsCount,
"shards": indexInfos[v].Shards, "shards": indexInfos[v].Shards,
"replicas": replicasNum, "replicas": replicasNum,
"unassigned_shards": (replicasNum + 1) * shardsNum - indexInfos[v].Shards - replicasNum, "unassigned_shards": (replicasNum+1)*shardsNum - indexInfos[v].Shards - replicasNum,
"store_size": util.FormatBytes(float64(indexInfos[v].StoreInBytes), 1), "store_size": util.FormatBytes(float64(indexInfos[v].StoreInBytes), 1),
}) })
} else { } else {
indices = append(indices, util.MapStr{ indices = append(indices, util.MapStr{
"index": v, "index": v,
"status": state, "status": state,
"health": health, "health": health,
"timestamp": hitM["timestamp"], "timestamp": hitM["timestamp"],
}) })
} }
@ -1297,7 +1292,6 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
return indices, nil return indices, nil
} }
func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
if GetMonitorState(clusterID) == elastic.ModeAgentless { if GetMonitorState(clusterID) == elastic.ModeAgentless {
@ -1306,7 +1300,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
} }
nodeID := ps.MustGetParameter("node_id") nodeID := ps.MustGetParameter("node_id")
q1 := orm.Query{ q1 := orm.Query{
Size: 1000, Size: 1000,
WildcardIndex: true, WildcardIndex: true,
CollapseField: "metadata.labels.shard_id", CollapseField: "metadata.labels.shard_id",
} }
@ -1327,7 +1321,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
err, result := orm.Search(&event.Event{}, &q1) err, result := orm.Search(&event.Event{}, &q1)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError ) h.WriteError(w, err.Error(), http.StatusInternalServerError)
return return
} }
var shards = []interface{}{} var shards = []interface{}{}
@ -1360,7 +1354,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
primary, _ := shardM.GetValue("routing.primary") primary, _ := shardM.GetValue("routing.primary")
if primary == true { if primary == true {
shardInfo["prirep"] = "p" shardInfo["prirep"] = "p"
}else{ } else {
shardInfo["prirep"] = "r" shardInfo["prirep"] = "r"
} }
shardInfo["state"], _ = shardM.GetValue("routing.state") shardInfo["state"], _ = shardM.GetValue("routing.state")
@ -1380,7 +1374,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
h.WriteJSON(w, shards, http.StatusOK) h.WriteJSON(w, shards, http.StatusOK)
} }
//deleteNodeMetadata used to clean node metadata after node is offline and not active within 7 days // deleteNodeMetadata used to clean node metadata after node is offline and not active within 7 days
func (h APIHandler) deleteNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h APIHandler) deleteNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
indexName := orm.GetIndexName(elastic.NodeConfig{}) indexName := orm.GetIndexName(elastic.NodeConfig{})
@ -1409,4 +1403,4 @@ func (h APIHandler) deleteNodeMetadata(w http.ResponseWriter, req *http.Request,
h.WriteError(w, err, http.StatusInternalServerError) h.WriteError(w, err, http.StatusInternalServerError)
} }
h.WriteAckOKJSON(w) h.WriteAckOKJSON(w)
} }

View File

@ -278,5 +278,3 @@ func rewriteTableNamesOfSqlRequest(req *http.Request, distribution string) (stri
} }
return strings.Join(unescapedTableNames, ","), nil return strings.Join(unescapedTableNames, ","), nil
} }

View File

@ -38,11 +38,10 @@ import (
"time" "time"
) )
func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -51,8 +50,8 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
return return
} }
if !exists{ if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound) h.WriteJSON(w, resBody, http.StatusNotFound)
return return
@ -69,7 +68,7 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
} }
var body = map[string]interface{}{ var body = map[string]interface{}{
"script": map[string]interface{}{ "script": map[string]interface{}{
"lang": "mustache", "lang": "mustache",
"source": template.Source, "source": template.Source,
}, },
} }
@ -89,7 +88,7 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
template.Created = time.Now() template.Created = time.Now()
template.Updated = template.Created template.Updated = template.Created
template.ClusterID = targetClusterID template.ClusterID = targetClusterID
index:=orm.GetIndexName(elastic.SearchTemplate{}) index := orm.GetIndexName(elastic.SearchTemplate{})
insertRes, err := esClient.Index(index, "", id, template, "wait_for") insertRes, err := esClient.Index(index, "", id, template, "wait_for")
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -102,14 +101,13 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
resBody["_id"] = id resBody["_id"] = id
resBody["result"] = insertRes.Result resBody["result"] = insertRes.Result
h.WriteJSON(w, resBody,http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -118,8 +116,8 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
return return
} }
if !exists{ if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound) h.WriteJSON(w, resBody, http.StatusNotFound)
return return
@ -136,8 +134,8 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
} }
templateID := ps.ByName("template_id") templateID := ps.ByName("template_id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
index:=orm.GetIndexName(elastic.SearchTemplate{}) index := orm.GetIndexName(elastic.SearchTemplate{})
getRes, err := esClient.Get(index, "",templateID) getRes, err := esClient.Get(index, "", templateID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
@ -197,9 +195,9 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
ht := &elastic.SearchTemplateHistory{ ht := &elastic.SearchTemplateHistory{
TemplateID: templateID, TemplateID: templateID,
Action: "update", Action: "update",
Content: originTemplate, Content: originTemplate,
Created: time.Now(), Created: time.Now(),
} }
esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "") esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "")
@ -207,14 +205,13 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
resBody["_id"] = templateID resBody["_id"] = templateID
resBody["result"] = insertRes.Result resBody["result"] = insertRes.Result
h.WriteJSON(w, resBody,http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
@ -222,8 +219,8 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
return return
} }
if !exists{ if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound) h.WriteJSON(w, resBody, http.StatusNotFound)
return return
@ -231,7 +228,7 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
templateID := ps.ByName("template_id") templateID := ps.ByName("template_id")
index:=orm.GetIndexName(elastic.SearchTemplate{}) index := orm.GetIndexName(elastic.SearchTemplate{})
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
res, err := esClient.Get(index, "", templateID) res, err := esClient.Get(index, "", templateID)
if err != nil { if err != nil {
@ -258,9 +255,9 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
ht := &elastic.SearchTemplateHistory{ ht := &elastic.SearchTemplateHistory{
TemplateID: templateID, TemplateID: templateID,
Action: "delete", Action: "delete",
Content: res.Source, Content: res.Source,
Created: time.Now(), Created: time.Now(),
} }
_, err = esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "wait_for") _, err = esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "wait_for")
if err != nil { if err != nil {
@ -273,21 +270,20 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
} }
func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
var ( var (
name = h.GetParameterOrDefault(req, "name", "") name = h.GetParameterOrDefault(req, "name", "")
strFrom = h.GetParameterOrDefault(req, "from", "0") strFrom = h.GetParameterOrDefault(req, "from", "0")
strSize = h.GetParameterOrDefault(req, "size", "20") strSize = h.GetParameterOrDefault(req, "size", "20")
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}` queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
mustBuilder = &strings.Builder{} mustBuilder = &strings.Builder{}
) )
from, _ := strconv.Atoi(strFrom) from, _ := strconv.Atoi(strFrom)
size, _ := strconv.Atoi(strSize) size, _ := strconv.Atoi(strSize)
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
mustBuilder.WriteString(fmt.Sprintf(`{"match":{"cluster_id": "%s"}}`, targetClusterID)) mustBuilder.WriteString(fmt.Sprintf(`{"match":{"cluster_id": "%s"}}`, targetClusterID))
if name != ""{ if name != "" {
mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"name": "%s"}}`, name)) mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"name": "%s"}}`, name))
} }
@ -305,8 +301,8 @@ func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req
h.WriteJSON(w, res, http.StatusOK) h.WriteJSON(w, res, http.StatusOK)
} }
func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{} resBody := map[string]interface{}{}
id := ps.ByName("template_id") id := ps.ByName("template_id")
indexName := orm.GetIndexName(elastic.SearchTemplate{}) indexName := orm.GetIndexName(elastic.SearchTemplate{})
@ -314,31 +310,30 @@ func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *h
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
if getResponse!=nil{ if getResponse != nil {
h.WriteJSON(w, resBody, getResponse.StatusCode) h.WriteJSON(w, resBody, getResponse.StatusCode)
}else{ } else {
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
return return
} }
h.WriteJSON(w,getResponse,200) h.WriteJSON(w, getResponse, 200)
} }
func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
var ( var (
templateID = h.GetParameterOrDefault(req, "template_id", "") templateID = h.GetParameterOrDefault(req, "template_id", "")
strFrom = h.GetParameterOrDefault(req, "from", "0") strFrom = h.GetParameterOrDefault(req, "from", "0")
strSize = h.GetParameterOrDefault(req, "size", "20") strSize = h.GetParameterOrDefault(req, "size", "20")
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}` queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
mustBuilder = &strings.Builder{} mustBuilder = &strings.Builder{}
) )
from, _ := strconv.Atoi(strFrom) from, _ := strconv.Atoi(strFrom)
size, _ := strconv.Atoi(strSize) size, _ := strconv.Atoi(strSize)
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
mustBuilder.WriteString(fmt.Sprintf(`{"match":{"content.cluster_id": "%s"}}`, targetClusterID)) mustBuilder.WriteString(fmt.Sprintf(`{"match":{"content.cluster_id": "%s"}}`, targetClusterID))
if templateID != ""{ if templateID != "" {
mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"template_id": "%s"}}`, templateID)) mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"template_id": "%s"}}`, templateID))
} }
@ -356,11 +351,10 @@ func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWrit
h.WriteJSON(w, res, http.StatusOK) h.WriteJSON(w, res, http.StatusOK)
} }
func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
@ -368,8 +362,8 @@ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http
return return
} }
if !exists{ if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound) h.WriteJSON(w, resBody, http.StatusNotFound)
return return
@ -394,11 +388,10 @@ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http
h.WriteJSON(w, string(res), http.StatusOK) h.WriteJSON(w, string(res), http.StatusOK)
} }
func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
@ -406,8 +399,8 @@ func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http
return return
} }
if !exists{ if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound) h.WriteJSON(w, resBody, http.StatusNotFound)
return return
@ -430,4 +423,4 @@ func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http
} }
h.WriteJSON(w, string(res), http.StatusOK) h.WriteJSON(w, string(res), http.StatusOK)
} }

View File

@ -36,8 +36,7 @@ import (
) )
func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
@ -58,12 +57,11 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, []byte(queryDSL)) searchRes, err := esClient.SearchWithRawQueryDSL(indexName, []byte(queryDSL))
if len(searchRes.Hits.Hits) > 0 { if len(searchRes.Hits.Hits) > 0 {
_, err = esClient.Index(indexName, "", searchRes.Hits.Hits[0].ID, reqParams, "wait_for") _, err = esClient.Index(indexName, "", searchRes.Hits.Hits[0].ID, reqParams, "wait_for")
}else{ } else {
reqParams.ID = util.GetUUID() reqParams.ID = util.GetUUID()
_, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for") _, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
} }
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err resBody["error"] = err
@ -71,12 +69,11 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques
return return
} }
resBody["acknowledged"] = true resBody["acknowledged"] = true
h.WriteJSON(w, resBody ,http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
@ -94,8 +91,8 @@ func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Req
var value interface{} var value interface{}
if len(searchRes.Hits.Hits) > 0 { if len(searchRes.Hits.Hits) > 0 {
value = searchRes.Hits.Hits[0].Source["value"] value = searchRes.Hits.Hits[0].Source["value"]
}else{ } else {
value = "" value = ""
} }
h.WriteJSON(w, value ,http.StatusOK) h.WriteJSON(w, value, http.StatusOK)
} }

View File

@ -28,12 +28,12 @@
package api package api
import ( import (
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/event" "infini.sh/framework/core/event"
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/modules/elastic/adapter" "infini.sh/framework/modules/elastic/adapter"
"net/http" "net/http"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
) )
func (h *APIHandler) GetShardInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) GetShardInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {

View File

@ -36,7 +36,7 @@ import (
"src/github.com/buger/jsonparser" "src/github.com/buger/jsonparser"
) )
func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
esClient := elastic.GetClient(clusterID) esClient := elastic.GetClient(clusterID)
templates, err := esClient.GetTemplate("") templates, err := esClient.GetTemplate("")
@ -48,7 +48,7 @@ func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Re
h.WriteJSON(w, templates, http.StatusOK) h.WriteJSON(w, templates, http.StatusOK)
} }
func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
templateName := ps.MustGetParameter("template_name") templateName := ps.MustGetParameter("template_name")
esClient := elastic.GetClient(clusterID) esClient := elastic.GetClient(clusterID)
@ -66,10 +66,10 @@ func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.R
} }
resErr, _, _, _ := jsonparser.Get(esResBody, "error") resErr, _, _, _ := jsonparser.Get(esResBody, "error")
if resErr != nil { if resErr != nil {
errStr := string(resErr) errStr := string(resErr)
log.Errorf("put template error: %s", errStr) log.Errorf("put template error: %s", errStr)
h.WriteError(w, errStr, http.StatusInternalServerError) h.WriteError(w, errStr, http.StatusInternalServerError)
return return
} }
h.WriteAckOKJSON(w) h.WriteAckOKJSON(w)
} }

View File

@ -35,57 +35,57 @@ import (
) )
const ( const (
ThreadPoolGetGroupKey = "thread_pool_get" ThreadPoolGetGroupKey = "thread_pool_get"
ThreadPoolSearchGroupKey = "thread_pool_search" ThreadPoolSearchGroupKey = "thread_pool_search"
ThreadPoolFlushGroupKey = "thread_pool_flush" ThreadPoolFlushGroupKey = "thread_pool_flush"
ThreadPoolRefreshGroupKey = "thread_pool_refresh" ThreadPoolRefreshGroupKey = "thread_pool_refresh"
ThreadPoolWriteGroupKey = "thread_pool_write" ThreadPoolWriteGroupKey = "thread_pool_write"
ThreadPoolForceMergeGroupKey = "thread_pool_force_merge" ThreadPoolForceMergeGroupKey = "thread_pool_force_merge"
ThreadPoolIndexGroupKey = "thread_pool_index" ThreadPoolIndexGroupKey = "thread_pool_index"
ThreadPoolBulkGroupKey = "thread_pool_bulk" ThreadPoolBulkGroupKey = "thread_pool_bulk"
) )
const ( const (
SearchThreadsMetricKey = "search_threads" SearchThreadsMetricKey = "search_threads"
IndexThreadsMetricKey = "index_threads" IndexThreadsMetricKey = "index_threads"
BulkThreadsMetricKey = "bulk_threads" BulkThreadsMetricKey = "bulk_threads"
FlushThreadsMetricKey = "flush_threads" FlushThreadsMetricKey = "flush_threads"
RefreshThreadsMetricKey = "refresh_threads" RefreshThreadsMetricKey = "refresh_threads"
WriteThreadsMetricKey = "write_threads" WriteThreadsMetricKey = "write_threads"
ForceMergeThreadsMetricKey = "force_merge_threads" ForceMergeThreadsMetricKey = "force_merge_threads"
SearchQueueMetricKey = "search_queue" SearchQueueMetricKey = "search_queue"
IndexQueueMetricKey = "index_queue" IndexQueueMetricKey = "index_queue"
BulkQueueMetricKey = "bulk_queue" BulkQueueMetricKey = "bulk_queue"
FlushQueueMetricKey = "flush_queue" FlushQueueMetricKey = "flush_queue"
RefreshQueueMetricKey = "refresh_queue" RefreshQueueMetricKey = "refresh_queue"
WriteQueueMetricKey = "write_queue" WriteQueueMetricKey = "write_queue"
SearchActiveMetricKey = "search_active" SearchActiveMetricKey = "search_active"
IndexActiveMetricKey = "index_active" IndexActiveMetricKey = "index_active"
BulkActiveMetricKey = "bulk_active" BulkActiveMetricKey = "bulk_active"
FlushActiveMetricKey = "flush_active" FlushActiveMetricKey = "flush_active"
WriteActiveMetricKey = "write_active" WriteActiveMetricKey = "write_active"
ForceMergeActiveMetricKey = "force_merge_active" ForceMergeActiveMetricKey = "force_merge_active"
SearchRejectedMetricKey = "search_rejected" SearchRejectedMetricKey = "search_rejected"
IndexRejectedMetricKey = "index_rejected" IndexRejectedMetricKey = "index_rejected"
BulkRejectedMetricKey = "bulk_rejected" BulkRejectedMetricKey = "bulk_rejected"
FlushRejectedMetricKey = "flush_rejected" FlushRejectedMetricKey = "flush_rejected"
WriteRejectedMetricKey = "write_rejected" WriteRejectedMetricKey = "write_rejected"
ForceMergeRejectedMetricKey = "force_merge_rejected" ForceMergeRejectedMetricKey = "force_merge_rejected"
GetThreadsMetricKey = "get_threads" GetThreadsMetricKey = "get_threads"
GetQueueMetricKey = "get_queue" GetQueueMetricKey = "get_queue"
GetActiveMetricKey = "get_active" GetActiveMetricKey = "get_active"
GetRejectedMetricKey = "get_rejected" GetRejectedMetricKey = "get_rejected"
RefreshActiveMetricKey = "refresh_active" RefreshActiveMetricKey = "refresh_active"
RefreshRejectedMetricKey = "refresh_rejected" RefreshRejectedMetricKey = "refresh_rejected"
ForceMergeQueueMetricKey = "force_merge_queue" ForceMergeQueueMetricKey = "force_merge_queue"
) )
func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error) {
clusterUUID, err := h.getClusterUUID(clusterID) clusterUUID, err := h.getClusterUUID(clusterID)
if err != nil { if err != nil {
return nil, err return nil, err
} }
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
var must = []util.MapStr{ var must = []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
@ -108,7 +108,7 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
if nodeName != "" { if nodeName != "" {
nodeNames = strings.Split(nodeName, ",") nodeNames = strings.Split(nodeName, ",")
top = len(nodeNames) top = len(nodeNames)
}else{ } else {
nodeNames, err = h.getTopNodeName(clusterID, top, 15) nodeNames, err = h.getTopNodeName(clusterID, top, 15)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -131,10 +131,9 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
}, },
}, },
}, },
}) })
} }
should := []util.MapStr{ should := []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
"metadata.labels.cluster_id": util.MapStr{ "metadata.labels.cluster_id": util.MapStr{
@ -143,20 +142,20 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
}, },
}, },
{ {
"term":util.MapStr{ "term": util.MapStr{
"metadata.labels.cluster_uuid":util.MapStr{ "metadata.labels.cluster_uuid": util.MapStr{
"value": clusterUUID, "value": clusterUUID,
}, },
}, },
}, },
} }
query:=map[string]interface{}{} query := map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": must, "must": must,
"minimum_should_match": 1, "minimum_should_match": 1,
"should": should, "should": should,
"filter": []util.MapStr{ "filter": []util.MapStr{
{ {
"range": util.MapStr{ "range": util.MapStr{
@ -173,159 +172,159 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
switch metricKey { switch metricKey {
case SearchThreadsMetricKey: case SearchThreadsMetricKey:
searchThreadsMetric := newMetricItem(SearchThreadsMetricKey, 1, ThreadPoolSearchGroupKey) searchThreadsMetric := newMetricItem(SearchThreadsMetricKey, 1, ThreadPoolSearchGroupKey)
searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) searchThreadsMetric.AddAxi("Search Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_threads", Key: "search_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads", Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: searchThreadsMetric, MetricItem: searchThreadsMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case SearchQueueMetricKey: case SearchQueueMetricKey:
searchQueueMetric := newMetricItem(SearchQueueMetricKey, 1, ThreadPoolSearchGroupKey) searchQueueMetric := newMetricItem(SearchQueueMetricKey, 1, ThreadPoolSearchGroupKey)
searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) searchQueueMetric.AddAxi("Search Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_queue", Key: "search_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.search.queue", Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: searchQueueMetric, MetricItem: searchQueueMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case SearchActiveMetricKey: case SearchActiveMetricKey:
searchActiveMetric := newMetricItem(SearchActiveMetricKey, 1, ThreadPoolSearchGroupKey) searchActiveMetric := newMetricItem(SearchActiveMetricKey, 1, ThreadPoolSearchGroupKey)
searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) searchActiveMetric.AddAxi("Search Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_active", Key: "search_active",
Field: "payload.elasticsearch.node_stats.thread_pool.search.active", Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: searchActiveMetric, MetricItem: searchActiveMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case SearchRejectedMetricKey: case SearchRejectedMetricKey:
searchRejectedMetric := newMetricItem(SearchRejectedMetricKey, 1, ThreadPoolSearchGroupKey) searchRejectedMetric := newMetricItem(SearchRejectedMetricKey, 1, ThreadPoolSearchGroupKey)
searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) searchRejectedMetric.AddAxi("Search Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_rejected", Key: "search_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected", Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: searchRejectedMetric, MetricItem: searchRejectedMetric,
FormatType: "num", FormatType: "num",
Units: "rejected/s", Units: "rejected/s",
}) })
case GetThreadsMetricKey: case GetThreadsMetricKey:
getThreadsMetric := newMetricItem(GetThreadsMetricKey, 1, ThreadPoolGetGroupKey) getThreadsMetric := newMetricItem(GetThreadsMetricKey, 1, ThreadPoolGetGroupKey)
getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) getThreadsMetric.AddAxi("Get Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_threads", Key: "get_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.get.threads", Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: getThreadsMetric, MetricItem: getThreadsMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case GetQueueMetricKey: case GetQueueMetricKey:
getQueueMetric := newMetricItem(GetQueueMetricKey, 1, ThreadPoolGetGroupKey) getQueueMetric := newMetricItem(GetQueueMetricKey, 1, ThreadPoolGetGroupKey)
getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) getQueueMetric.AddAxi("Get Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_queue", Key: "get_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.get.queue", Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: getQueueMetric, MetricItem: getQueueMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case GetActiveMetricKey: case GetActiveMetricKey:
getActiveMetric := newMetricItem(GetActiveMetricKey, 1, ThreadPoolGetGroupKey) getActiveMetric := newMetricItem(GetActiveMetricKey, 1, ThreadPoolGetGroupKey)
getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) getActiveMetric.AddAxi("Get Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_active", Key: "get_active",
Field: "payload.elasticsearch.node_stats.thread_pool.get.active", Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: getActiveMetric, MetricItem: getActiveMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case GetRejectedMetricKey: case GetRejectedMetricKey:
getRejectedMetric := newMetricItem(GetRejectedMetricKey, 1, ThreadPoolGetGroupKey) getRejectedMetric := newMetricItem(GetRejectedMetricKey, 1, ThreadPoolGetGroupKey)
getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) getRejectedMetric.AddAxi("Get Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_rejected", Key: "get_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected", Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: getRejectedMetric, MetricItem: getRejectedMetric,
FormatType: "num", FormatType: "num",
Units: "rejected/s", Units: "rejected/s",
}) })
case FlushThreadsMetricKey: case FlushThreadsMetricKey:
flushThreadsMetric := newMetricItem(FlushThreadsMetricKey, 1, ThreadPoolFlushGroupKey) flushThreadsMetric := newMetricItem(FlushThreadsMetricKey, 1, ThreadPoolFlushGroupKey)
flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) flushThreadsMetric.AddAxi("Flush Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_threads", Key: "flush_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads", Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: flushThreadsMetric, MetricItem: flushThreadsMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case FlushQueueMetricKey: case FlushQueueMetricKey:
flushQueueMetric := newMetricItem(FlushQueueMetricKey, 1, ThreadPoolFlushGroupKey) flushQueueMetric := newMetricItem(FlushQueueMetricKey, 1, ThreadPoolFlushGroupKey)
flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) flushQueueMetric.AddAxi("Get Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_queue", Key: "flush_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue", Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: flushQueueMetric, MetricItem: flushQueueMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case FlushActiveMetricKey: case FlushActiveMetricKey:
flushActiveMetric := newMetricItem(FlushActiveMetricKey, 1, ThreadPoolFlushGroupKey) flushActiveMetric := newMetricItem(FlushActiveMetricKey, 1, ThreadPoolFlushGroupKey)
flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) flushActiveMetric.AddAxi("Flush Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_active", Key: "flush_active",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.active", Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: flushActiveMetric, MetricItem: flushActiveMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case FlushRejectedMetricKey: case FlushRejectedMetricKey:
flushRejectedMetric := newMetricItem(FlushRejectedMetricKey, 1, ThreadPoolFlushGroupKey) flushRejectedMetric := newMetricItem(FlushRejectedMetricKey, 1, ThreadPoolFlushGroupKey)
flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) flushRejectedMetric.AddAxi("Flush Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_rejected", Key: "flush_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected", Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: flushRejectedMetric, MetricItem: flushRejectedMetric,
FormatType: "num", FormatType: "num",
Units: "rejected/s", Units: "rejected/s",
}) })
case IndexThreadsMetricKey: case IndexThreadsMetricKey:
indexThreadsMetric := newMetricItem(IndexThreadsMetricKey, 1, ThreadPoolIndexGroupKey) indexThreadsMetric := newMetricItem(IndexThreadsMetricKey, 1, ThreadPoolIndexGroupKey)
@ -485,137 +484,136 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
}) })
case RefreshThreadsMetricKey: case RefreshThreadsMetricKey:
refreshThreadsMetric := newMetricItem(RefreshThreadsMetricKey, 1, ThreadPoolRefreshGroupKey) refreshThreadsMetric := newMetricItem(RefreshThreadsMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) refreshThreadsMetric.AddAxi("Refresh Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_threads", Key: "refresh_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads", Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: refreshThreadsMetric, MetricItem: refreshThreadsMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case RefreshQueueMetricKey: case RefreshQueueMetricKey:
refreshQueueMetric := newMetricItem(RefreshQueueMetricKey, 1, ThreadPoolRefreshGroupKey) refreshQueueMetric := newMetricItem(RefreshQueueMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) refreshQueueMetric.AddAxi("Refresh Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_queue", Key: "refresh_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue", Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: refreshQueueMetric, MetricItem: refreshQueueMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case RefreshActiveMetricKey: case RefreshActiveMetricKey:
refreshActiveMetric := newMetricItem(RefreshActiveMetricKey, 1, ThreadPoolRefreshGroupKey) refreshActiveMetric := newMetricItem(RefreshActiveMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) refreshActiveMetric.AddAxi("Refresh Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_active", Key: "refresh_active",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active", Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: refreshActiveMetric, MetricItem: refreshActiveMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case RefreshRejectedMetricKey: case RefreshRejectedMetricKey:
refreshRejectedMetric := newMetricItem(RefreshRejectedMetricKey, 1, ThreadPoolRefreshGroupKey) refreshRejectedMetric := newMetricItem(RefreshRejectedMetricKey, 1, ThreadPoolRefreshGroupKey)
refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) refreshRejectedMetric.AddAxi("Refresh Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_rejected", Key: "refresh_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected", Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: refreshRejectedMetric, MetricItem: refreshRejectedMetric,
FormatType: "num", FormatType: "num",
Units: "rejected/s", Units: "rejected/s",
}) })
case ForceMergeThreadsMetricKey: case ForceMergeThreadsMetricKey:
forceMergeThreadsMetric := newMetricItem(ForceMergeThreadsMetricKey, 1, ThreadPoolForceMergeGroupKey) forceMergeThreadsMetric := newMetricItem(ForceMergeThreadsMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) forceMergeThreadsMetric.AddAxi("Force Merge Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_threads", Key: "force_merge_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads", Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: forceMergeThreadsMetric, MetricItem: forceMergeThreadsMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case ForceMergeQueueMetricKey: case ForceMergeQueueMetricKey:
forceMergeQueueMetric := newMetricItem(ForceMergeQueueMetricKey, 1, ThreadPoolForceMergeGroupKey) forceMergeQueueMetric := newMetricItem(ForceMergeQueueMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) forceMergeQueueMetric.AddAxi("Force Merge Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_queue", Key: "force_merge_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue", Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: forceMergeQueueMetric, MetricItem: forceMergeQueueMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case ForceMergeActiveMetricKey: case ForceMergeActiveMetricKey:
forceMergeActiveMetric := newMetricItem(ForceMergeActiveMetricKey, 1, ThreadPoolForceMergeGroupKey) forceMergeActiveMetric := newMetricItem(ForceMergeActiveMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) forceMergeActiveMetric.AddAxi("Force Merge Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_active", Key: "force_merge_active",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active", Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: forceMergeActiveMetric, MetricItem: forceMergeActiveMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case ForceMergeRejectedMetricKey: case ForceMergeRejectedMetricKey:
forceMergeRejectedMetric := newMetricItem(ForceMergeRejectedMetricKey, 1, ThreadPoolForceMergeGroupKey) forceMergeRejectedMetric := newMetricItem(ForceMergeRejectedMetricKey, 1, ThreadPoolForceMergeGroupKey)
forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{ queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_rejected", Key: "force_merge_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected", Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: forceMergeRejectedMetric, MetricItem: forceMergeRejectedMetric,
FormatType: "num", FormatType: "num",
Units: "rejected/s", Units: "rejected/s",
}) })
} }
//Get Thread Pool queue //Get Thread Pool queue
aggs:=map[string]interface{}{} aggs := map[string]interface{}{}
for _,metricItem:=range queueMetricItems{ for _, metricItem := range queueMetricItems {
aggs[metricItem.ID]=util.MapStr{ aggs[metricItem.ID] = util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field, "field": metricItem.Field,
}, },
} }
if metricItem.Field2 != "" { if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2"]=util.MapStr{ aggs[metricItem.ID+"_field2"] = util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field2, "field": metricItem.Field2,
}, },
} }
} }
if metricItem.IsDerivative{ if metricItem.IsDerivative {
aggs[metricItem.ID+"_deriv"]=util.MapStr{ aggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID, "buckets_path": metricItem.ID,
}, },
} }
if metricItem.Field2 != "" { if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2_deriv"]=util.MapStr{ aggs[metricItem.ID+"_field2_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID + "_field2", "buckets_path": metricItem.ID + "_field2",
}, },
} }
@ -628,8 +626,8 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
panic(err) panic(err)
} }
query["size"]=0 query["size"] = 0
query["aggs"]= util.MapStr{ query["aggs"] = util.MapStr{
"group_by_level": util.MapStr{ "group_by_level": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.transport_address", "field": "metadata.labels.transport_address",
@ -637,11 +635,11 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram":util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs":aggs, "aggs": aggs,
}, },
}, },
}, },

View File

@ -38,10 +38,9 @@ import (
) )
func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID) exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -50,16 +49,14 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *
return return
} }
if !exists{ if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID) resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound) h.WriteJSON(w, resBody, http.StatusNotFound)
return return
} }
var traceReq = &elastic.TraceTemplate{ var traceReq = &elastic.TraceTemplate{}
}
err = h.DecodeJSON(req, traceReq) err = h.DecodeJSON(req, traceReq)
if err != nil { if err != nil {
@ -84,22 +81,21 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *
resBody["_id"] = insertRes.ID resBody["_id"] = insertRes.ID
resBody["result"] = insertRes.Result resBody["result"] = insertRes.Result
h.WriteJSON(w, resBody,http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{ resBody := map[string]interface{}{}
}
var ( var (
name = h.GetParameterOrDefault(req, "name", "") name = h.GetParameterOrDefault(req, "name", "")
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}` queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
strSize = h.GetParameterOrDefault(req, "size", "20") strSize = h.GetParameterOrDefault(req, "size", "20")
strFrom = h.GetParameterOrDefault(req, "from", "0") strFrom = h.GetParameterOrDefault(req, "from", "0")
mustBuilder = &strings.Builder{} mustBuilder = &strings.Builder{}
) )
targetClusterID := ps.ByName("id") targetClusterID := ps.ByName("id")
mustBuilder.WriteString(fmt.Sprintf(`{"term":{"cluster_id":{"value": "%s"}}}`, targetClusterID)) mustBuilder.WriteString(fmt.Sprintf(`{"term":{"cluster_id":{"value": "%s"}}}`, targetClusterID))
if name != ""{ if name != "" {
mustBuilder.WriteString(fmt.Sprintf(`,{"prefix":{"name": "%s"}}`, name)) mustBuilder.WriteString(fmt.Sprintf(`,{"prefix":{"name": "%s"}}`, name))
} }
size, _ := strconv.Atoi(strSize) size, _ := strconv.Atoi(strSize)
@ -126,8 +122,7 @@ func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req
} }
func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{ resBody := map[string]interface{}{}
}
reqParams := elastic.TraceTemplate{} reqParams := elastic.TraceTemplate{}
err := h.DecodeJSON(req, &reqParams) err := h.DecodeJSON(req, &reqParams)
@ -140,7 +135,7 @@ func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *h
reqParams.ID = ps.ByName("template_id") reqParams.ID = ps.ByName("template_id")
reqParams.Updated = time.Now() reqParams.Updated = time.Now()
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
_, err = esClient.Index(orm.GetIndexName(reqParams),"", reqParams.ID, reqParams, "wait_for") _, err = esClient.Index(orm.GetIndexName(reqParams), "", reqParams.ID, reqParams, "wait_for")
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
@ -152,11 +147,11 @@ func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *h
resBody["result"] = "updated" resBody["result"] = "updated"
resBody["_source"] = reqParams resBody["_source"] = reqParams
h.WriteJSON(w, resBody,http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){ func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{} resBody := map[string]interface{}{}
id := ps.ByName("template_id") id := ps.ByName("template_id")
indexName := orm.GetIndexName(elastic.TraceTemplate{}) indexName := orm.GetIndexName(elastic.TraceTemplate{})
@ -166,7 +161,7 @@ func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *ht
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
h.WriteJSON(w,getResponse, getResponse.StatusCode) h.WriteJSON(w, getResponse, getResponse.StatusCode)
} }
func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -177,9 +172,9 @@ func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
if delRes!=nil{ if delRes != nil {
h.WriteJSON(w, resBody, delRes.StatusCode) h.WriteJSON(w, resBody, delRes.StatusCode)
}else{ } else {
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
} }

View File

@ -178,7 +178,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
} }
histgram := common.NewBucketItem( histgram := common.NewBucketItem(
common.DateHistogramBucket, util.MapStr{ common.DateHistogramBucket, util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}) })
histgram.AddMetricItems(metricItems...) histgram.AddMetricItems(metricItems...)
@ -669,8 +669,8 @@ type RealtimeNodeInfo struct {
func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) { func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
ver := h.Client().GetVersion() ver := h.Client().GetVersion()
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds) bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr) intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -685,7 +685,7 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map
"aggs": util.MapStr{ "aggs": util.MapStr{
"date": util.MapStr{ "date": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: "10s", intervalField: "10s",
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
@ -775,9 +775,9 @@ func (h *APIHandler) QueryQPS(query util.MapStr, bucketSizeInSeconds int) (map[s
maxIndexRate float64 maxIndexRate float64
maxQueryRate float64 maxQueryRate float64
maxIndexBytesRate float64 maxIndexBytesRate float64
preIndexTotal float64 preIndexTotal float64
dropNext bool dropNext bool
maxTimestamp float64 maxTimestamp float64
) )
for _, dateBk := range bks { for _, dateBk := range bks {
if dateBkVal, ok := dateBk.(map[string]interface{}); ok { if dateBkVal, ok := dateBk.(map[string]interface{}); ok {
@ -786,11 +786,11 @@ func (h *APIHandler) QueryQPS(query util.MapStr, bucketSizeInSeconds int) (map[s
if preIndexTotal > 0 { if preIndexTotal > 0 {
//if value of indexTotal is decreasing, drop the next value, //if value of indexTotal is decreasing, drop the next value,
//and we will drop current and next qps value //and we will drop current and next qps value
if indexTotalVal - preIndexTotal < 0 { if indexTotalVal-preIndexTotal < 0 {
dropNext = true dropNext = true
preIndexTotal = indexTotalVal preIndexTotal = indexTotalVal
continue continue
}else{ } else {
dropNext = false dropNext = false
} }
} }
@ -866,11 +866,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
{ {
"match": util.MapStr{ "match": util.MapStr{
reqBody.SearchField: util.MapStr{ reqBody.SearchField: util.MapStr{
"query": reqBody.Keyword, "query": reqBody.Keyword,
"fuzziness": "AUTO", "fuzziness": "AUTO",
"max_expansions": 10, "max_expansions": 10,
"prefix_length": 2, "prefix_length": 2,
"boost": 2, "boost": 2,
}, },
}, },
}, },
@ -912,11 +912,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
{ {
"match": util.MapStr{ "match": util.MapStr{
"search_text": util.MapStr{ "search_text": util.MapStr{
"query": reqBody.Keyword, "query": reqBody.Keyword,
"fuzziness": "AUTO", "fuzziness": "AUTO",
"max_expansions": 10, "max_expansions": 10,
"prefix_length": 2, "prefix_length": 2,
"boost": 2, "boost": 2,
}, },
}, },
}, },

View File

@ -39,54 +39,53 @@ import (
) )
const ( const (
IndexStorageMetricKey = "index_storage" IndexStorageMetricKey = "index_storage"
SegmentCountMetricKey = "segment_count" SegmentCountMetricKey = "segment_count"
DocCountMetricKey = "doc_count" DocCountMetricKey = "doc_count"
DocsDeletedMetricKey = "docs_deleted" DocsDeletedMetricKey = "docs_deleted"
QueryTimesMetricKey = "query_times" QueryTimesMetricKey = "query_times"
FetchTimesMetricKey = "fetch_times" FetchTimesMetricKey = "fetch_times"
ScrollTimesMetricKey = "scroll_times" ScrollTimesMetricKey = "scroll_times"
MergeTimesMetricKey = "merge_times" MergeTimesMetricKey = "merge_times"
RefreshTimesMetricKey = "refresh_times" RefreshTimesMetricKey = "refresh_times"
FlushTimesMetricKey = "flush_times" FlushTimesMetricKey = "flush_times"
IndexingRateMetricKey = "indexing_rate" IndexingRateMetricKey = "indexing_rate"
IndexingBytesMetricKey = "indexing_bytes" IndexingBytesMetricKey = "indexing_bytes"
IndexingLatencyMetricKey = "indexing_latency" IndexingLatencyMetricKey = "indexing_latency"
QueryLatencyMetricKey = "query_latency" QueryLatencyMetricKey = "query_latency"
FetchLatencyMetricKey = "fetch_latency" FetchLatencyMetricKey = "fetch_latency"
MergeLatencyMetricKey = "merge_latency" MergeLatencyMetricKey = "merge_latency"
RefreshLatencyMetricKey = "refresh_latency" RefreshLatencyMetricKey = "refresh_latency"
ScrollLatencyMetricKey = "scroll_latency" ScrollLatencyMetricKey = "scroll_latency"
FlushLatencyMetricKey = "flush_latency" FlushLatencyMetricKey = "flush_latency"
QueryCacheMetricKey = "query_cache" QueryCacheMetricKey = "query_cache"
RequestCacheMetricKey = "request_cache" RequestCacheMetricKey = "request_cache"
RequestCacheHitMetricKey = "request_cache_hit" RequestCacheHitMetricKey = "request_cache_hit"
RequestCacheMissMetricKey = "request_cache_miss" RequestCacheMissMetricKey = "request_cache_miss"
QueryCacheCountMetricKey = "query_cache_count" QueryCacheCountMetricKey = "query_cache_count"
QueryCacheHitMetricKey = "query_cache_hit" QueryCacheHitMetricKey = "query_cache_hit"
QueryCacheMissMetricKey = "query_cache_miss" QueryCacheMissMetricKey = "query_cache_miss"
FielddataCacheMetricKey = "fielddata_cache" FielddataCacheMetricKey = "fielddata_cache"
SegmentMemoryMetricKey = "segment_memory" SegmentMemoryMetricKey = "segment_memory"
SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory" SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory"
SegmentTermsMemoryMetricKey = "segment_terms_memory" SegmentTermsMemoryMetricKey = "segment_terms_memory"
SegmentFieldsMemoryMetricKey = "segment_fields_memory" SegmentFieldsMemoryMetricKey = "segment_fields_memory"
SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory" SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory"
SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory" SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory"
DocPercentMetricKey = "doc_percent" DocPercentMetricKey = "doc_percent"
SegmentNormsMetricKey = "segment_norms_memory" SegmentNormsMetricKey = "segment_norms_memory"
SegmentPointsMetricKey = "segment_points_memory" SegmentPointsMetricKey = "segment_points_memory"
VersionMapMetricKey = "segment_version_map" VersionMapMetricKey = "segment_version_map"
FixedBitSetMetricKey = "segment_fixed_bit_set" FixedBitSetMetricKey = "segment_fixed_bit_set"
) )
func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) (map[string]*common.MetricItem, error){ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) (map[string]*common.MetricItem, error) {
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
var must = []util.MapStr{ var must = []util.MapStr{
{ {
"term":util.MapStr{ "term": util.MapStr{
"metadata.labels.cluster_id":util.MapStr{ "metadata.labels.cluster_id": util.MapStr{
"value": clusterID, "value": clusterID,
}, },
}, },
@ -108,7 +107,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
} }
var ( var (
indexNames []string indexNames []string
err error err error
) )
if indexName != "" { if indexName != "" {
indexNames = strings.Split(indexName, ",") indexNames = strings.Split(indexName, ",")
@ -116,11 +115,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
if !hasAllPrivilege && len(allowedIndices) == 0 { if !hasAllPrivilege && len(allowedIndices) == 0 {
return nil, nil return nil, nil
} }
if !hasAllPrivilege{ if !hasAllPrivilege {
namePattern := radix.Compile(allowedIndices...) namePattern := radix.Compile(allowedIndices...)
var filterNames []string var filterNames []string
for _, name := range indexNames { for _, name := range indexNames {
if namePattern.Match(name){ if namePattern.Match(name) {
filterNames = append(filterNames, name) filterNames = append(filterNames, name)
} }
} }
@ -131,7 +130,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
} }
top = len(indexNames) top = len(indexNames)
}else{ } else {
indexNames, err = h.getTopIndexName(req, clusterID, top, 15) indexNames, err = h.getTopIndexName(req, clusterID, top, 15)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -146,8 +145,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
}) })
} }
query:=map[string]interface{}{} query := map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": must, "must": must,
"must_not": []util.MapStr{ "must_not": []util.MapStr{
@ -295,32 +294,32 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
case FlushTimesMetricKey: case FlushTimesMetricKey:
//flush 次数 //flush 次数
flushTimesMetric := newMetricItem(FlushTimesMetricKey, 6, OperationGroupKey) flushTimesMetric := newMetricItem(FlushTimesMetricKey, 6, OperationGroupKey)
flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) flushTimesMetric.AddAxi("flush times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_times", Key: "flush_times",
Field: "payload.elasticsearch.index_stats.total.flush.total", Field: "payload.elasticsearch.index_stats.total.flush.total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: flushTimesMetric, MetricItem: flushTimesMetric,
FormatType: "num", FormatType: "num",
Units: "requests/s", Units: "requests/s",
}) })
case IndexingRateMetricKey: case IndexingRateMetricKey:
//写入速率 //写入速率
indexingRateMetric := newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey) indexingRateMetric := newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey)
indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) indexingRateMetric.AddAxi("Indexing rate", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_rate", Key: "indexing_rate",
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total", Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: indexingRateMetric, MetricItem: indexingRateMetric,
FormatType: "num", FormatType: "num",
Units: "doc/s", Units: "doc/s",
}) })
case IndexingBytesMetricKey: case IndexingBytesMetricKey:
indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey) indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey)
indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) indexingBytesMetric.AddAxi("Indexing bytes", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_bytes", Key: "indexing_bytes",
Field: "payload.elasticsearch.index_stats.primaries.store.size_in_bytes", Field: "payload.elasticsearch.index_stats.primaries.store.size_in_bytes",
@ -333,382 +332,381 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
case IndexingLatencyMetricKey: case IndexingLatencyMetricKey:
//写入时延 //写入时延
indexingLatencyMetric := newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey) indexingLatencyMetric := newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey)
indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) indexingLatencyMetric.AddAxi("Indexing latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_latency", Key: "indexing_latency",
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis", Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis",
Field2: "payload.elasticsearch.index_stats.primaries.indexing.index_total", Field2: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
Calc: func(value, value2 float64) float64 { Calc: func(value, value2 float64) float64 {
return value/value2 return value / value2
}, },
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: indexingLatencyMetric, MetricItem: indexingLatencyMetric,
FormatType: "num", FormatType: "num",
Units: "ms", Units: "ms",
}) })
case QueryLatencyMetricKey: case QueryLatencyMetricKey:
//查询时延 //查询时延
queryLatencyMetric := newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey) queryLatencyMetric := newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey)
queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) queryLatencyMetric.AddAxi("Query latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_latency", Key: "query_latency",
Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis", Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.search.query_total", Field2: "payload.elasticsearch.index_stats.total.search.query_total",
Calc: func(value, value2 float64) float64 { Calc: func(value, value2 float64) float64 {
return value/value2 return value / value2
}, },
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryLatencyMetric, MetricItem: queryLatencyMetric,
FormatType: "num", FormatType: "num",
Units: "ms", Units: "ms",
}) })
case FetchLatencyMetricKey: case FetchLatencyMetricKey:
//fetch时延 //fetch时延
fetchLatencyMetric := newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey) fetchLatencyMetric := newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey)
fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) fetchLatencyMetric.AddAxi("Fetch latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_latency", Key: "fetch_latency",
Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis", Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.search.fetch_total", Field2: "payload.elasticsearch.index_stats.total.search.fetch_total",
Calc: func(value, value2 float64) float64 { Calc: func(value, value2 float64) float64 {
return value/value2 return value / value2
}, },
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: fetchLatencyMetric, MetricItem: fetchLatencyMetric,
FormatType: "num", FormatType: "num",
Units: "ms", Units: "ms",
}) })
case MergeLatencyMetricKey: case MergeLatencyMetricKey:
//merge时延 //merge时延
mergeLatencyMetric := newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey) mergeLatencyMetric := newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey)
mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) mergeLatencyMetric.AddAxi("Merge latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_latency", Key: "merge_latency",
Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis", Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.merges.total", Field2: "payload.elasticsearch.index_stats.total.merges.total",
Calc: func(value, value2 float64) float64 { Calc: func(value, value2 float64) float64 {
return value/value2 return value / value2
}, },
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: mergeLatencyMetric, MetricItem: mergeLatencyMetric,
FormatType: "num", FormatType: "num",
Units: "ms", Units: "ms",
}) })
case RefreshLatencyMetricKey: case RefreshLatencyMetricKey:
//refresh时延 //refresh时延
refreshLatencyMetric := newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey) refreshLatencyMetric := newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey)
refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) refreshLatencyMetric.AddAxi("Refresh latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_latency", Key: "refresh_latency",
Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis", Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.refresh.total", Field2: "payload.elasticsearch.index_stats.total.refresh.total",
Calc: func(value, value2 float64) float64 { Calc: func(value, value2 float64) float64 {
return value/value2 return value / value2
}, },
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: refreshLatencyMetric, MetricItem: refreshLatencyMetric,
FormatType: "num", FormatType: "num",
Units: "ms", Units: "ms",
}) })
case ScrollLatencyMetricKey: case ScrollLatencyMetricKey:
//scroll时延 //scroll时延
scrollLatencyMetric := newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey) scrollLatencyMetric := newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey)
scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) scrollLatencyMetric.AddAxi("Scroll Latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_latency", Key: "scroll_latency",
Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis", Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.search.scroll_total", Field2: "payload.elasticsearch.index_stats.total.search.scroll_total",
Calc: func(value, value2 float64) float64 { Calc: func(value, value2 float64) float64 {
return value/value2 return value / value2
}, },
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: scrollLatencyMetric, MetricItem: scrollLatencyMetric,
FormatType: "num", FormatType: "num",
Units: "ms", Units: "ms",
}) })
case FlushLatencyMetricKey: case FlushLatencyMetricKey:
//flush 时延 //flush 时延
flushLatencyMetric := newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey) flushLatencyMetric := newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey)
flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true) flushLatencyMetric.AddAxi("Flush latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_latency", Key: "flush_latency",
Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis", Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.flush.total", Field2: "payload.elasticsearch.index_stats.total.flush.total",
Calc: func(value, value2 float64) float64 { Calc: func(value, value2 float64) float64 {
return value/value2 return value / value2
}, },
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: flushLatencyMetric, MetricItem: flushLatencyMetric,
FormatType: "num", FormatType: "num",
Units: "ms", Units: "ms",
}) })
case QueryCacheMetricKey: case QueryCacheMetricKey:
//queryCache //queryCache
queryCacheMetric := newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey) queryCacheMetric := newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey)
queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) queryCacheMetric.AddAxi("Query cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache", Key: "query_cache",
Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes", Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: queryCacheMetric, MetricItem: queryCacheMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case RequestCacheMetricKey: case RequestCacheMetricKey:
//requestCache //requestCache
requestCacheMetric := newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey) requestCacheMetric := newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey)
requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) requestCacheMetric.AddAxi("request cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "request_cache", Key: "request_cache",
Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes", Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: requestCacheMetric, MetricItem: requestCacheMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case RequestCacheHitMetricKey: case RequestCacheHitMetricKey:
// Request Cache Hit // Request Cache Hit
requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey) requestCacheHitMetric := newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey)
requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) requestCacheHitMetric.AddAxi("request cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "request_cache_hit", Key: "request_cache_hit",
Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count", Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: requestCacheHitMetric, MetricItem: requestCacheHitMetric,
FormatType: "num", FormatType: "num",
Units: "hits", Units: "hits",
}) })
case RequestCacheMissMetricKey: case RequestCacheMissMetricKey:
// Request Cache Miss // Request Cache Miss
requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey) requestCacheMissMetric := newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey)
requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) requestCacheMissMetric.AddAxi("request cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "request_cache_miss", Key: "request_cache_miss",
Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count", Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: requestCacheMissMetric, MetricItem: requestCacheMissMetric,
FormatType: "num", FormatType: "num",
Units: "misses", Units: "misses",
}) })
case QueryCacheCountMetricKey: case QueryCacheCountMetricKey:
// Query Cache Count // Query Cache Count
queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey) queryCacheCountMetric := newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey)
queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) queryCacheCountMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache_count", Key: "query_cache_count",
Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count", Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryCacheCountMetric, MetricItem: queryCacheCountMetric,
FormatType: "num", FormatType: "num",
Units: "", Units: "",
}) })
case QueryCacheHitMetricKey: case QueryCacheHitMetricKey:
// Query Cache Miss // Query Cache Miss
queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey) queryCacheHitMetric := newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey)
queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) queryCacheHitMetric.AddAxi("query cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache_hit", Key: "query_cache_hit",
Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count", Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryCacheHitMetric, MetricItem: queryCacheHitMetric,
FormatType: "num", FormatType: "num",
Units: "hits", Units: "hits",
}) })
case QueryCacheMissMetricKey: case QueryCacheMissMetricKey:
// Query Cache Miss // Query Cache Miss
queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey) queryCacheMissMetric := newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey)
queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) queryCacheMissMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache_miss", Key: "query_cache_miss",
Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count", Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryCacheMissMetric, MetricItem: queryCacheMissMetric,
FormatType: "num", FormatType: "num",
Units: "misses", Units: "misses",
}) })
case FielddataCacheMetricKey: case FielddataCacheMetricKey:
// Fielddata内存占用大小 // Fielddata内存占用大小
fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey) fieldDataCacheMetric := newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey)
fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) fieldDataCacheMetric.AddAxi("FieldData Cache", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fielddata_cache", Key: "fielddata_cache",
Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes", Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: fieldDataCacheMetric, MetricItem: fieldDataCacheMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentMemoryMetricKey: case SegmentMemoryMetricKey:
//segment memory //segment memory
segmentMemoryMetric := newMetricItem(SegmentMemoryMetricKey, 13, MemoryGroupKey) segmentMemoryMetric := newMetricItem(SegmentMemoryMetricKey, 13, MemoryGroupKey)
segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) segmentMemoryMetric.AddAxi("Segment memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_memory", Key: "segment_memory",
Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: segmentMemoryMetric, MetricItem: segmentMemoryMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentDocValuesMemoryMetricKey: case SegmentDocValuesMemoryMetricKey:
//segment doc values memory //segment doc values memory
docValuesMemoryMetric := newMetricItem(SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey) docValuesMemoryMetric := newMetricItem(SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey)
docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) docValuesMemoryMetric.AddAxi("Segment Doc values Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_doc_values_memory", Key: "segment_doc_values_memory",
Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: docValuesMemoryMetric, MetricItem: docValuesMemoryMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentTermsMemoryMetricKey: case SegmentTermsMemoryMetricKey:
//segment terms memory //segment terms memory
termsMemoryMetric := newMetricItem(SegmentTermsMemoryMetricKey, 13, MemoryGroupKey) termsMemoryMetric := newMetricItem(SegmentTermsMemoryMetricKey, 13, MemoryGroupKey)
termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) termsMemoryMetric.AddAxi("Segment Terms Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_terms_memory", Key: "segment_terms_memory",
Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: termsMemoryMetric, MetricItem: termsMemoryMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentFieldsMemoryMetricKey: case SegmentFieldsMemoryMetricKey:
//segment fields memory //segment fields memory
fieldsMemoryMetric := newMetricItem(SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey) fieldsMemoryMetric := newMetricItem(SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey)
fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) fieldsMemoryMetric.AddAxi("Segment Fields Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_fields_memory", Key: "segment_fields_memory",
Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: fieldsMemoryMetric, MetricItem: fieldsMemoryMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentIndexWriterMemoryMetricKey: case SegmentIndexWriterMemoryMetricKey:
// segment index writer memory // segment index writer memory
segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey) segmentIndexWriterMemoryMetric := newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_index_writer_memory", Key: "segment_index_writer_memory",
Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: segmentIndexWriterMemoryMetric, MetricItem: segmentIndexWriterMemoryMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentTermVectorsMemoryMetricKey: case SegmentTermVectorsMemoryMetricKey:
// segment term vectors memory // segment term vectors memory
segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey) segmentTermVectorsMemoryMetric := newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true) segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_term_vectors_memory", Key: "segment_term_vectors_memory",
Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: segmentTermVectorsMemoryMetric, MetricItem: segmentTermVectorsMemoryMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentNormsMetricKey: case SegmentNormsMetricKey:
segmentNormsMetric := newMetricItem(SegmentNormsMetricKey, 17, MemoryGroupKey) segmentNormsMetric := newMetricItem(SegmentNormsMetricKey, 17, MemoryGroupKey)
segmentNormsMetric.AddAxi("Segment norms memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) segmentNormsMetric.AddAxi("Segment norms memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: SegmentNormsMetricKey, Key: SegmentNormsMetricKey,
Field: "payload.elasticsearch.index_stats.total.segments.norms_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.norms_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: segmentNormsMetric, MetricItem: segmentNormsMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case SegmentPointsMetricKey: case SegmentPointsMetricKey:
segmentPointsMetric := newMetricItem(SegmentPointsMetricKey, 18, MemoryGroupKey) segmentPointsMetric := newMetricItem(SegmentPointsMetricKey, 18, MemoryGroupKey)
segmentPointsMetric.AddAxi("Segment points memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) segmentPointsMetric.AddAxi("Segment points memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: SegmentPointsMetricKey, Key: SegmentPointsMetricKey,
Field: "payload.elasticsearch.index_stats.total.segments.points_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.points_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: segmentPointsMetric, MetricItem: segmentPointsMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case VersionMapMetricKey: case VersionMapMetricKey:
segmentVersionMapMetric := newMetricItem(VersionMapMetricKey, 18, MemoryGroupKey) segmentVersionMapMetric := newMetricItem(VersionMapMetricKey, 18, MemoryGroupKey)
segmentVersionMapMetric.AddAxi("Segment version map memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) segmentVersionMapMetric.AddAxi("Segment version map memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: VersionMapMetricKey, Key: VersionMapMetricKey,
Field: "payload.elasticsearch.index_stats.total.segments.version_map_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.version_map_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: segmentVersionMapMetric, MetricItem: segmentVersionMapMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
case FixedBitSetMetricKey: case FixedBitSetMetricKey:
segmentFixedBitSetMetric := newMetricItem(FixedBitSetMetricKey, 18, MemoryGroupKey) segmentFixedBitSetMetric := newMetricItem(FixedBitSetMetricKey, 18, MemoryGroupKey)
segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{ indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: FixedBitSetMetricKey, Key: FixedBitSetMetricKey,
Field: "payload.elasticsearch.index_stats.total.segments.fixed_bit_set_memory_in_bytes", Field: "payload.elasticsearch.index_stats.total.segments.fixed_bit_set_memory_in_bytes",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: false, IsDerivative: false,
MetricItem: segmentFixedBitSetMetric, MetricItem: segmentFixedBitSetMetric,
FormatType: "bytes", FormatType: "bytes",
Units: "", Units: "",
}) })
} }
aggs := map[string]interface{}{}
aggs:=map[string]interface{}{} for _, metricItem := range indexMetricItems {
aggs[metricItem.ID] = util.MapStr{
for _,metricItem:=range indexMetricItems { "max": util.MapStr{
aggs[metricItem.ID]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field, "field": metricItem.Field,
}, },
} }
if metricItem.Field2 != ""{ if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2"]=util.MapStr{ aggs[metricItem.ID+"_field2"] = util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field2, "field": metricItem.Field2,
}, },
} }
} }
if metricItem.IsDerivative{ if metricItem.IsDerivative {
aggs[metricItem.ID+"_deriv"]=util.MapStr{ aggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID, "buckets_path": metricItem.ID,
}, },
} }
if metricItem.Field2 != "" { if metricItem.Field2 != "" {
aggs[metricItem.ID + "_deriv_field2"]=util.MapStr{ aggs[metricItem.ID+"_deriv_field2"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID + "_field2", "buckets_path": metricItem.ID + "_field2",
}, },
} }
@ -720,8 +718,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
return nil, err return nil, err
} }
query["size"]=0 query["size"] = 0
query["aggs"]= util.MapStr{ query["aggs"] = util.MapStr{
"group_by_level": util.MapStr{ "group_by_level": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.index_name", "field": "metadata.labels.index_name",
@ -732,11 +730,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram":util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs":aggs, "aggs": aggs,
}, },
"max_store": util.MapStr{ "max_store": util.MapStr{
"max": util.MapStr{ "max": util.MapStr{
@ -750,7 +748,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
} }
func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error){ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error) {
ver := h.Client().GetVersion() ver := h.Client().GetVersion()
cr, _ := util.VersionCompare(ver.Number, "6.1") cr, _ := util.VersionCompare(ver.Number, "6.1")
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 { if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 {
@ -758,8 +756,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
} }
var ( var (
now = time.Now() now = time.Now()
max = now.UnixNano()/1e6 max = now.UnixNano() / 1e6
min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6 min = now.Add(-time.Duration(lastMinutes)*time.Minute).UnixNano() / 1e6
) )
var must = []util.MapStr{ var must = []util.MapStr{
{ {
@ -791,8 +789,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
if !hasAllPrivilege { if !hasAllPrivilege {
must = append(must, util.MapStr{ must = append(must, util.MapStr{
"query_string": util.MapStr{ "query_string": util.MapStr{
"query": strings.Join(allowedIndices, " "), "query": strings.Join(allowedIndices, " "),
"fields": []string{"metadata.labels.index_name"}, "fields": []string{"metadata.labels.index_name"},
"default_operator": "OR", "default_operator": "OR",
}, },
}) })
@ -850,7 +848,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
}, },
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
@ -889,7 +887,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
}, },
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
@ -909,20 +907,20 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
}, },
}, },
} }
response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query)) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
if err!=nil{ if err != nil {
log.Error(err) log.Error(err)
return nil, err return nil, err
} }
var maxQpsKVS = map[string] float64{} var maxQpsKVS = map[string]float64{}
for _, agg := range response.Aggregations { for _, agg := range response.Aggregations {
for _, bk := range agg.Buckets { for _, bk := range agg.Buckets {
key := bk["key"].(string) key := bk["key"].(string)
if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok { if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok {
val := maxQps["value"].(float64) val := maxQps["value"].(float64)
if _, ok = maxQpsKVS[key] ; ok { if _, ok = maxQpsKVS[key]; ok {
maxQpsKVS[key] = maxQpsKVS[key] + val maxQpsKVS[key] = maxQpsKVS[key] + val
}else{ } else {
maxQpsKVS[key] = val maxQpsKVS[key] = val
} }
} }
@ -943,7 +941,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
length = len(qpsValues) length = len(qpsValues)
} }
indexNames := []string{} indexNames := []string{}
for i := 0; i <length; i++ { for i := 0; i < length; i++ {
indexNames = append(indexNames, qpsValues[i].Key) indexNames = append(indexNames, qpsValues[i].Key)
} }
return indexNames, nil return indexNames, nil
@ -954,12 +952,13 @@ type TopTerm struct {
Value float64 Value float64
} }
type TopTermOrder []TopTerm type TopTermOrder []TopTerm
func (t TopTermOrder) Len() int{
func (t TopTermOrder) Len() int {
return len(t) return len(t)
} }
func (t TopTermOrder) Less(i, j int) bool{ func (t TopTermOrder) Less(i, j int) bool {
return t[i].Value > t[j].Value //desc return t[i].Value > t[j].Value //desc
} }
func (t TopTermOrder) Swap(i, j int){ func (t TopTermOrder) Swap(i, j int) {
t[i], t[j] = t[j], t[i] t[i], t[j] = t[j], t[i]
} }

View File

@ -72,7 +72,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
for _, v := range results.Result { for _, v := range results.Result {
result, ok := v.(map[string]interface{}) result, ok := v.(map[string]interface{})
if ok { if ok {
if indexID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "index_id"}, result); ok { if indexID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "index_id"}, result); ok {
summary := map[string]interface{}{} summary := map[string]interface{}{}
if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_stats", "total", "docs"}, result); ok { if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_stats", "total", "docs"}, result); ok {
if docsM, ok := docs.(map[string]interface{}); ok { if docsM, ok := docs.(map[string]interface{}); ok {
@ -97,7 +97,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
if sinfo, ok := shardInfo.([]interface{}); ok { if sinfo, ok := shardInfo.([]interface{}); ok {
unassignedCount := 0 unassignedCount := 0
for _, item := range sinfo { for _, item := range sinfo {
if itemMap, ok := item.(map[string]interface{}); ok{ if itemMap, ok := item.(map[string]interface{}); ok {
if itemMap["state"] == "UNASSIGNED" { if itemMap["state"] == "UNASSIGNED" {
unassignedCount++ unassignedCount++
} }
@ -121,7 +121,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
return return
} }
firstClusterID, firstIndexName = parts[0], parts[1] firstClusterID, firstIndexName = parts[0], parts[1]
}else{ } else {
h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError) h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError)
return return
} }
@ -137,35 +137,35 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
} }
var metricLen = 15 var metricLen = 15
// 索引速率 // 索引速率
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems := []GroupMetricItem{} nodeMetricItems := []GroupMetricItem{}
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "indexing", Key: "indexing",
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total", Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: indexMetric, MetricItem: indexMetric,
FormatType: "num", FormatType: "num",
Units: "Indexing/s", Units: "Indexing/s",
}) })
queryMetric:=newMetricItem("search", 2, OperationGroupKey) queryMetric := newMetricItem("search", 2, OperationGroupKey)
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "search", Key: "search",
Field: "payload.elasticsearch.index_stats.total.search.query_total", Field: "payload.elasticsearch.index_stats.total.search.query_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryMetric, MetricItem: queryMetric,
FormatType: "num", FormatType: "num",
Units: "Search/s", Units: "Search/s",
}) })
aggs:=map[string]interface{}{} aggs := map[string]interface{}{}
query :=map[string]interface{}{} query := map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": []util.MapStr{ "must": []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
"metadata.category": util.MapStr{ "metadata.category": util.MapStr{
@ -190,7 +190,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize), "gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
}, },
}, },
}, },
@ -198,15 +198,15 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
}, },
} }
for _,metricItem:=range nodeMetricItems{ for _, metricItem := range nodeMetricItems {
aggs[metricItem.ID]=util.MapStr{ aggs[metricItem.ID] = util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field, "field": metricItem.Field,
}, },
} }
if metricItem.IsDerivative{ if metricItem.IsDerivative {
aggs[metricItem.ID+"_deriv"]=util.MapStr{ aggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID, "buckets_path": metricItem.ID,
}, },
} }
@ -218,8 +218,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
if err != nil { if err != nil {
panic(err) panic(err)
} }
query["size"]=0 query["size"] = 0
query["aggs"]= util.MapStr{ query["aggs"] = util.MapStr{
"group_by_level": util.MapStr{ "group_by_level": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.index_id", "field": "metadata.labels.index_id",
@ -227,11 +227,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram":util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs":aggs, "aggs": aggs,
}, },
}, },
}, },
@ -245,9 +245,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
indexMetrics := map[string]util.MapStr{} indexMetrics := map[string]util.MapStr{}
for key, item := range metrics { for key, item := range metrics {
for _, line := range item.Lines { for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok{ if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{ indexMetrics[line.Metric.Label] = util.MapStr{}
}
} }
indexMetrics[line.Metric.Label][key] = line.Data indexMetrics[line.Metric.Label][key] = line.Data
} }
@ -292,11 +291,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
indexID := ps.MustGetParameter("index") indexID := ps.MustGetParameter("index")
parts := strings.Split(indexID, ":") parts := strings.Split(indexID, ":")
if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) { if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) {
h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return return
} }
if len(parts) < 2 { if len(parts) < 2 {
h.WriteError(w, "invalid index id: "+ indexID, http.StatusInternalServerError) h.WriteError(w, "invalid index id: "+indexID, http.StatusInternalServerError)
return return
} }
@ -320,7 +319,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
return return
} }
q1 := orm.Query{ q1 := orm.Query{
Size: 1, Size: 1,
WildcardIndex: true, WildcardIndex: true,
} }
q1.Conds = orm.And( q1.Conds = orm.And(
@ -340,7 +339,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
summary["aliases"] = aliases summary["aliases"] = aliases
summary["timestamp"] = hit["timestamp"] summary["timestamp"] = hit["timestamp"]
summary["index_info"] = util.MapStr{ summary["index_info"] = util.MapStr{
"health":health, "health": health,
"status": state, "status": state,
} }
} }
@ -361,11 +360,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
if tm, ok := result["timestamp"].(string); ok { if tm, ok := result["timestamp"].(string); ok {
issueTime, _ := time.Parse(time.RFC3339, tm) issueTime, _ := time.Parse(time.RFC3339, tm)
if time.Now().Sub(issueTime).Seconds() > 30 { if time.Now().Sub(issueTime).Seconds() > 30 {
health, _:= util.GetMapValueByKeys([]string{"metadata", "labels", "health_status"}, response.Hits.Hits[0].Source) health, _ := util.GetMapValueByKeys([]string{"metadata", "labels", "health_status"}, response.Hits.Hits[0].Source)
infoM["health"] = health infoM["health"] = health
} }
} }
state, _:= util.GetMapValueByKeys([]string{"metadata", "labels", "state"}, response.Hits.Hits[0].Source) state, _ := util.GetMapValueByKeys([]string{"metadata", "labels", "state"}, response.Hits.Hits[0].Source)
if state == "delete" { if state == "delete" {
infoM["status"] = "delete" infoM["status"] = "delete"
infoM["health"] = "N/A" infoM["health"] = "N/A"
@ -377,7 +376,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
if sinfo, ok := shardInfo.([]interface{}); ok { if sinfo, ok := shardInfo.([]interface{}); ok {
unassignedCount := 0 unassignedCount := 0
for _, item := range sinfo { for _, item := range sinfo {
if itemMap, ok := item.(map[string]interface{}); ok{ if itemMap, ok := item.(map[string]interface{}); ok {
if itemMap["state"] == "UNASSIGNED" { if itemMap["state"] == "UNASSIGNED" {
unassignedCount++ unassignedCount++
} }
@ -398,7 +397,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
indexName := ps.MustGetParameter("index") indexName := ps.MustGetParameter("index")
q1 := orm.Query{ q1 := orm.Query{
Size: 1, Size: 1,
WildcardIndex: true, WildcardIndex: true,
} }
q1.Conds = orm.And( q1.Conds = orm.And(
@ -411,9 +410,9 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
q1.AddSort("timestamp", orm.DESC) q1.AddSort("timestamp", orm.DESC)
err, result := orm.Search(&event.Event{}, &q1) err, result := orm.Search(&event.Event{}, &q1)
if err != nil { if err != nil {
h.WriteJSON(w,util.MapStr{ h.WriteJSON(w, util.MapStr{
"error": err.Error(), "error": err.Error(),
}, http.StatusInternalServerError ) }, http.StatusInternalServerError)
return return
} }
var shardInfo interface{} = []interface{}{} var shardInfo interface{} = []interface{}{}
@ -512,7 +511,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
log.Error(err) log.Error(err)
} }
metrics["index_health"] = healthMetric metrics["index_health"] = healthMetric
}else { } else {
switch metricKey { switch metricKey {
case IndexThroughputMetricKey: case IndexThroughputMetricKey:
metricItem := newMetricItem("index_throughput", 1, OperationGroupKey) metricItem := newMetricItem("index_throughput", 1, OperationGroupKey)
@ -582,7 +581,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
minBucketSize, err := GetMetricMinBucketSize(clusterID, MetricTypeIndexStats) minBucketSize, err := GetMetricMinBucketSize(clusterID, MetricTypeIndexStats)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[metricKey].MinBucketSize = int64(minBucketSize) metrics[metricKey].MinBucketSize = int64(minBucketSize)
} }
} }
@ -591,8 +590,8 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
h.WriteJSON(w, resBody, http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -645,14 +644,14 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"group_status": util.MapStr{ "group_status": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "payload.elasticsearch.index_health.status", "field": "payload.elasticsearch.index_health.status",
"size": 5, "size": 5,
}, },
}, },
}, },
@ -666,8 +665,8 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str
return nil, err return nil, err
} }
metricItem:=newMetricItem("index_health", 1, "") metricItem := newMetricItem("index_health", 1, "")
metricItem.AddLine("health","Health","","group1","payload.elasticsearch.index_health.status","max",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false) metricItem.AddLine("health", "Health", "", "group1", "payload.elasticsearch.index_health.status", "max", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false)
metricData := []interface{}{} metricData := []interface{}{}
if response.StatusCode == 200 { if response.StatusCode == 200 {
@ -683,8 +682,7 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str
return metricItem, nil return metricItem, nil
} }
func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string) (map[string][]interface{}, error) {
func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[string][]interface{}, error){
q := orm.Query{ q := orm.Query{
WildcardIndex: true, WildcardIndex: true,
} }
@ -698,53 +696,53 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[
"ranges": []util.MapStr{ "ranges": []util.MapStr{
{ {
"from": "now-13d/d", "from": "now-13d/d",
"to": "now-12d/d", "to": "now-12d/d",
}, { }, {
"from": "now-12d/d", "from": "now-12d/d",
"to": "now-11d/d", "to": "now-11d/d",
}, },
{ {
"from": "now-11d/d", "from": "now-11d/d",
"to": "now-10d/d", "to": "now-10d/d",
}, },
{ {
"from": "now-10d/d", "from": "now-10d/d",
"to": "now-9d/d", "to": "now-9d/d",
}, { }, {
"from": "now-9d/d", "from": "now-9d/d",
"to": "now-8d/d", "to": "now-8d/d",
}, },
{ {
"from": "now-8d/d", "from": "now-8d/d",
"to": "now-7d/d", "to": "now-7d/d",
}, },
{ {
"from": "now-7d/d", "from": "now-7d/d",
"to": "now-6d/d", "to": "now-6d/d",
}, },
{ {
"from": "now-6d/d", "from": "now-6d/d",
"to": "now-5d/d", "to": "now-5d/d",
}, { }, {
"from": "now-5d/d", "from": "now-5d/d",
"to": "now-4d/d", "to": "now-4d/d",
}, },
{ {
"from": "now-4d/d", "from": "now-4d/d",
"to": "now-3d/d", "to": "now-3d/d",
},{ }, {
"from": "now-3d/d", "from": "now-3d/d",
"to": "now-2d/d", "to": "now-2d/d",
}, { }, {
"from": "now-2d/d", "from": "now-2d/d",
"to": "now-1d/d", "to": "now-1d/d",
}, { }, {
"from": "now-1d/d", "from": "now-1d/d",
"to": "now/d", "to": "now/d",
}, },
{ {
"from": "now/d", "from": "now/d",
"to": "now", "to": "now",
}, },
}, },
}, },
@ -817,16 +815,16 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[
} }
healthMap := map[string]int{} healthMap := map[string]int{}
status := "unknown" status := "unknown"
for _, hbkItem := range healthBks { for _, hbkItem := range healthBks {
if hitem, ok := hbkItem.(map[string]interface{}); ok { if hitem, ok := hbkItem.(map[string]interface{}); ok {
healthMap[hitem["key"].(string)] = 1 healthMap[hitem["key"].(string)] = 1
} }
} }
if _, ok = healthMap["red"]; ok { if _, ok = healthMap["red"]; ok {
status = "red" status = "red"
}else if _, ok = healthMap["yellow"]; ok { } else if _, ok = healthMap["yellow"]; ok {
status = "yellow" status = "yellow"
}else if _, ok = healthMap["green"]; ok { } else if _, ok = healthMap["green"]; ok {
status = "green" status = "green"
} }
key := fmt.Sprintf("%s:%s", clusterID, indexName) key := fmt.Sprintf("%s:%s", clusterID, indexName)
@ -838,7 +836,7 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[
} }
func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{} resBody := map[string]interface{}{}
id := ps.ByName("id") id := ps.ByName("id")
indexName := ps.ByName("index") indexName := ps.ByName("index")
if !h.IsIndexAllowed(req, id, indexName) { if !h.IsIndexAllowed(req, id, indexName) {
@ -847,7 +845,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
}, http.StatusForbidden) }, http.StatusForbidden)
return return
} }
q := &orm.Query{ Size: 1} q := &orm.Query{Size: 1}
q.AddSort("timestamp", orm.DESC) q.AddSort("timestamp", orm.DESC)
q.Conds = orm.And( q.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -859,13 +857,13 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
err, result := orm.Search(event.Event{}, q) err, result := orm.Search(event.Event{}, q)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
namesM := util.MapStr{} namesM := util.MapStr{}
if len(result.Result) > 0 { if len(result.Result) > 0 {
if data, ok := result.Result[0].(map[string]interface{}); ok { if data, ok := result.Result[0].(map[string]interface{}); ok {
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists {
if table, ok := routingTable.(map[string]interface{}); ok{ if table, ok := routingTable.(map[string]interface{}); ok {
if shardsM, ok := table["shards"].(map[string]interface{}); ok { if shardsM, ok := table["shards"].(map[string]interface{}); ok {
for _, rows := range shardsM { for _, rows := range shardsM {
if rowsArr, ok := rows.([]interface{}); ok { if rowsArr, ok := rows.([]interface{}); ok {
@ -887,12 +885,12 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
} }
//node uuid //node uuid
nodeIds := make([]interface{}, 0, len(namesM) ) nodeIds := make([]interface{}, 0, len(namesM))
for name, _ := range namesM { for name, _ := range namesM {
nodeIds = append(nodeIds, name) nodeIds = append(nodeIds, name)
} }
q1 := &orm.Query{ Size: 100} q1 := &orm.Query{Size: 100}
q1.AddSort("timestamp", orm.DESC) q1.AddSort("timestamp", orm.DESC)
q1.Conds = orm.And( q1.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -902,7 +900,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
err, result = orm.Search(elastic.NodeConfig{}, q1) err, result = orm.Search(elastic.NodeConfig{}, q1)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
nodes := []interface{}{} nodes := []interface{}{}
for _, hit := range result.Result { for _, hit := range result.Result {
@ -922,11 +920,11 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
if v, ok := nodeId.(string); ok { if v, ok := nodeId.(string); ok {
ninfo := util.MapStr{ ninfo := util.MapStr{
"id": v, "id": v,
"name": nodeName, "name": nodeName,
"ip": ip, "ip": ip,
"port": port, "port": port,
"status": status, "status": status,
"timestamp": hitM["timestamp"], "timestamp": hitM["timestamp"],
} }
nodes = append(nodes, ninfo) nodes = append(nodes, ninfo)
@ -947,7 +945,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
} }
var must = []util.MapStr{} var must = []util.MapStr{}
if !util.StringInArray(ids, "*"){ if !util.StringInArray(ids, "*") {
must = append(must, util.MapStr{ must = append(must, util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
@ -958,9 +956,8 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
if keyword != "" { if keyword != "" {
must = append(must, util.MapStr{ must = append(must, util.MapStr{
"wildcard":util.MapStr{ "wildcard": util.MapStr{
"metadata.index_name": "metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
}, },
}) })
} }
@ -986,7 +983,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
}, },
} }
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
indexName := orm.GetIndexName(elastic.IndexConfig{}) indexName := orm.GetIndexName(elastic.IndexConfig{})
resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl)) resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl))

View File

@ -545,7 +545,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
minBucketSize, err := GetMetricMinBucketSize(id, metricType) minBucketSize, err := GetMetricMinBucketSize(id, metricType)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[key].MinBucketSize = int64(minBucketSize) metrics[key].MinBucketSize = int64(minBucketSize)
} }
} }
@ -648,7 +648,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
} }
} }
} }
}else{ } else {
metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, key) metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, key)
if err != nil { if err != nil {
h.WriteError(w, err, http.StatusInternalServerError) h.WriteError(w, err, http.StatusInternalServerError)
@ -660,7 +660,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
minBucketSize, err := GetMetricMinBucketSize(id, MetricTypeNodeStats) minBucketSize, err := GetMetricMinBucketSize(id, MetricTypeNodeStats)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
metrics[key].MinBucketSize = int64(minBucketSize) metrics[key].MinBucketSize = int64(minBucketSize)
} }
} }
@ -788,19 +788,20 @@ const (
) )
const ( const (
ClusterStorageMetricKey = "cluster_storage" ClusterStorageMetricKey = "cluster_storage"
ClusterDocumentsMetricKey = "cluster_documents" ClusterDocumentsMetricKey = "cluster_documents"
ClusterIndicesMetricKey = "cluster_indices" ClusterIndicesMetricKey = "cluster_indices"
ClusterNodeCountMetricKey = "node_count" ClusterNodeCountMetricKey = "node_count"
ClusterHealthMetricKey = "cluster_health" ClusterHealthMetricKey = "cluster_health"
ShardCountMetricKey = "shard_count" ShardCountMetricKey = "shard_count"
CircuitBreakerMetricKey = "circuit_breaker" CircuitBreakerMetricKey = "circuit_breaker"
) )
func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) { func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
var ( var (
clusterMetricsResult = map[string]*common.MetricItem {} clusterMetricsResult = map[string]*common.MetricItem{}
err error err error
) )
switch metricKey { switch metricKey {
case ClusterDocumentsMetricKey, case ClusterDocumentsMetricKey,
@ -915,12 +916,14 @@ func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, buck
} }
return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize) return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize)
} }
const ( const (
IndexThroughputMetricKey = "index_throughput" IndexThroughputMetricKey = "index_throughput"
SearchThroughputMetricKey = "search_throughput" SearchThroughputMetricKey = "search_throughput"
IndexLatencyMetricKey = "index_latency" IndexLatencyMetricKey = "index_latency"
SearchLatencyMetricKey = "search_latency" SearchLatencyMetricKey = "search_latency"
) )
func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) { func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{} metricItems := []*common.MetricItem{}

View File

@ -113,7 +113,7 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) { func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
queryDSL := util.MustToJSONBytes(query) queryDSL := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -229,11 +229,12 @@ func GetMinBucketSize() int {
const ( const (
MetricTypeClusterHealth = "cluster_health" MetricTypeClusterHealth = "cluster_health"
MetricTypeClusterStats = "cluster_stats" MetricTypeClusterStats = "cluster_stats"
MetricTypeNodeStats = "node_stats" MetricTypeNodeStats = "node_stats"
MetricTypeIndexStats = "index_stats" MetricTypeIndexStats = "index_stats"
) )
//GetMetricMinBucketSize returns twice the metrics collection interval based on the cluster ID and metric type
// GetMetricMinBucketSize returns twice the metrics collection interval based on the cluster ID and metric type
func GetMetricMinBucketSize(clusterID, metricType string) (int, error) { func GetMetricMinBucketSize(clusterID, metricType string) (int, error) {
meta := elastic.GetMetadata(clusterID) meta := elastic.GetMetadata(clusterID)
if meta == nil { if meta == nil {
@ -243,19 +244,19 @@ func GetMetricMinBucketSize(clusterID, metricType string) (int, error) {
switch metricType { switch metricType {
case MetricTypeClusterHealth: case MetricTypeClusterHealth:
if meta.Config.MonitorConfigs != nil { if meta.Config.MonitorConfigs != nil {
interval = meta.Config.MonitorConfigs.ClusterHealth.Interval interval = meta.Config.MonitorConfigs.ClusterHealth.Interval
} }
case MetricTypeClusterStats: case MetricTypeClusterStats:
if meta.Config.MonitorConfigs != nil { if meta.Config.MonitorConfigs != nil {
interval = meta.Config.MonitorConfigs.ClusterStats.Interval interval = meta.Config.MonitorConfigs.ClusterStats.Interval
} }
case MetricTypeNodeStats: case MetricTypeNodeStats:
if meta.Config.MonitorConfigs != nil { if meta.Config.MonitorConfigs != nil {
interval = meta.Config.MonitorConfigs.NodeStats.Interval interval = meta.Config.MonitorConfigs.NodeStats.Interval
} }
case MetricTypeIndexStats: case MetricTypeIndexStats:
if meta.Config.MonitorConfigs != nil { if meta.Config.MonitorConfigs != nil {
interval = meta.Config.MonitorConfigs.IndexStats.Interval interval = meta.Config.MonitorConfigs.IndexStats.Interval
} }
default: default:
return 0, fmt.Errorf("invalid metric name: %s", metricType) return 0, fmt.Errorf("invalid metric name: %s", metricType)
@ -278,7 +279,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m
} }
bucketSize := 0 bucketSize := 0
bucketSizeStr := h.GetParameterOrDefault(req, "bucket_size", "") //默认 10每个 bucket 的时间范围,单位秒 bucketSizeStr := h.GetParameterOrDefault(req, "bucket_size", "") //默认 10每个 bucket 的时间范围,单位秒
if bucketSizeStr != "" { if bucketSizeStr != "" {
du, err := util.ParseDuration(bucketSizeStr) du, err := util.ParseDuration(bucketSizeStr)
if err != nil { if err != nil {
@ -293,7 +294,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m
maxStr := h.Get(req, "max", "") maxStr := h.Get(req, "max", "")
var ( var (
minBucketSize = 0 minBucketSize = 0
err error err error
) )
//clusterID may be empty when querying host metrics //clusterID may be empty when querying host metrics
if clusterID != "" { if clusterID != "" {
@ -301,7 +302,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("failed to get min bucket size for cluster [%s]:%w", clusterID, err) return 0, 0, 0, fmt.Errorf("failed to get min bucket size for cluster [%s]:%w", clusterID, err)
} }
}else{ } else {
//default to 20 //default to 20
minBucketSize = 20 minBucketSize = 20
} }

View File

@ -45,40 +45,40 @@ import (
) )
func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody:=util.MapStr{} resBody := util.MapStr{}
reqBody := struct{ reqBody := struct {
Keyword string `json:"keyword"` Keyword string `json:"keyword"`
Size int `json:"size"` Size int `json:"size"`
From int `json:"from"` From int `json:"from"`
Aggregations []elastic.SearchAggParam `json:"aggs"` Aggregations []elastic.SearchAggParam `json:"aggs"`
Highlight elastic.SearchHighlightParam `json:"highlight"` Highlight elastic.SearchHighlightParam `json:"highlight"`
Filter elastic.SearchFilterParam `json:"filter"` Filter elastic.SearchFilterParam `json:"filter"`
Sort []string `json:"sort"` Sort []string `json:"sort"`
SearchField string `json:"search_field"` SearchField string `json:"search_field"`
}{} }{}
err := h.DecodeJSON(req, &reqBody) err := h.DecodeJSON(req, &reqBody)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations) aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
aggs["term_cluster_id"] = util.MapStr{ aggs["term_cluster_id"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.cluster_id", "field": "metadata.cluster_id",
"size": 1000, "size": 1000,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"term_cluster_name": util.MapStr{ "term_cluster_name": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.cluster_name", "field": "metadata.cluster_name",
"size": 1, "size": 1,
}, },
}, },
}, },
} }
var should =[]util.MapStr{} var should = []util.MapStr{}
if reqBody.SearchField != ""{ if reqBody.SearchField != "" {
should = []util.MapStr{ should = []util.MapStr{
{ {
"prefix": util.MapStr{ "prefix": util.MapStr{
@ -101,7 +101,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
}, },
}, },
} }
}else{ } else {
should = []util.MapStr{ should = []util.MapStr{
{ {
"prefix": util.MapStr{ "prefix": util.MapStr{
@ -143,30 +143,25 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
} }
clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id") clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id")
if !hasPrivilege && clusterFilter == nil { if !hasPrivilege && clusterFilter == nil {
h.WriteJSON(w, elastic.SearchResponse{ h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
}, http.StatusOK)
return return
} }
must := []interface{}{ must := []interface{}{}
}
if !hasPrivilege && clusterFilter != nil { if !hasPrivilege && clusterFilter != nil {
must = append(must, clusterFilter) must = append(must, clusterFilter)
} }
query := util.MapStr{ query := util.MapStr{
"aggs": aggs, "aggs": aggs,
"size": reqBody.Size, "size": reqBody.Size,
"from": reqBody.From, "from": reqBody.From,
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight), "highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
"query": util.MapStr{ "query": util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"minimum_should_match": 1, "minimum_should_match": 1,
"filter": elastic.BuildSearchTermFilter(reqBody.Filter), "filter": elastic.BuildSearchTermFilter(reqBody.Filter),
"should": should, "should": should,
"must": must, "must": must,
}, },
}, },
"sort": []util.MapStr{ "sort": []util.MapStr{
@ -178,7 +173,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
}, },
} }
if len(reqBody.Sort) > 1 { if len(reqBody.Sort) > 1 {
query["sort"] = []util.MapStr{ query["sort"] = []util.MapStr{
{ {
reqBody.Sort[0]: util.MapStr{ reqBody.Sort[0]: util.MapStr{
"order": reqBody.Sort[1], "order": reqBody.Sort[1],
@ -190,7 +185,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl) response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
w.Write(util.MustToJSONBytes(response)) w.Write(util.MustToJSONBytes(response))
@ -293,7 +288,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
source["shard_info"] = shardInfo source["shard_info"] = shardInfo
} }
if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok { if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok {
if clusterID, ok = tempClusterID.(string); ok { if clusterID, ok = tempClusterID.(string); ok {
if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil { if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil {
source["is_master_node"] = meta.ClusterState.MasterNode == nodeID source["is_master_node"] = meta.ClusterState.MasterNode == nodeID
} }
@ -316,35 +311,35 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
return return
} }
// 索引速率 // 索引速率
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey) indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems := []GroupMetricItem{} nodeMetricItems := []GroupMetricItem{}
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "indexing", Key: "indexing",
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total", Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: indexMetric, MetricItem: indexMetric,
FormatType: "num", FormatType: "num",
Units: "Indexing/s", Units: "Indexing/s",
}) })
queryMetric:=newMetricItem("search", 2, OperationGroupKey) queryMetric := newMetricItem("search", 2, OperationGroupKey)
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{ nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
Key: "search", Key: "search",
Field: "payload.elasticsearch.node_stats.indices.search.query_total", Field: "payload.elasticsearch.node_stats.indices.search.query_total",
ID: util.GetUUID(), ID: util.GetUUID(),
IsDerivative: true, IsDerivative: true,
MetricItem: queryMetric, MetricItem: queryMetric,
FormatType: "num", FormatType: "num",
Units: "Search/s", Units: "Search/s",
}) })
aggs:=map[string]interface{}{} aggs := map[string]interface{}{}
query=map[string]interface{}{} query = map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": []util.MapStr{ "must": []util.MapStr{
{ {
"term": util.MapStr{ "term": util.MapStr{
"metadata.category": util.MapStr{ "metadata.category": util.MapStr{
@ -378,15 +373,15 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
}, },
} }
for _,metricItem:=range nodeMetricItems{ for _, metricItem := range nodeMetricItems {
aggs[metricItem.ID]=util.MapStr{ aggs[metricItem.ID] = util.MapStr{
"max":util.MapStr{ "max": util.MapStr{
"field": metricItem.Field, "field": metricItem.Field,
}, },
} }
if metricItem.IsDerivative{ if metricItem.IsDerivative {
aggs[metricItem.ID+"_deriv"]=util.MapStr{ aggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative":util.MapStr{ "derivative": util.MapStr{
"buckets_path": metricItem.ID, "buckets_path": metricItem.ID,
}, },
} }
@ -398,8 +393,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
if err != nil { if err != nil {
panic(err) panic(err)
} }
query["size"]=0 query["size"] = 0
query["aggs"]= util.MapStr{ query["aggs"] = util.MapStr{
"group_by_level": util.MapStr{ "group_by_level": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.node_id", "field": "metadata.labels.node_id",
@ -407,11 +402,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram":util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs":aggs, "aggs": aggs,
}, },
}, },
}, },
@ -425,9 +420,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
indexMetrics := map[string]util.MapStr{} indexMetrics := map[string]util.MapStr{}
for key, item := range metrics { for key, item := range metrics {
for _, line := range item.Lines { for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok{ if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{ indexMetrics[line.Metric.Label] = util.MapStr{}
}
} }
indexMetrics[line.Metric.Label][key] = line.Data indexMetrics[line.Metric.Label][key] = line.Data
} }
@ -487,7 +481,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
// return // return
//} //}
q1 := orm.Query{ q1 := orm.Query{
Size: 1, Size: 1,
WildcardIndex: true, WildcardIndex: true,
} }
q1.Conds = orm.And( q1.Conds = orm.And(
@ -512,7 +506,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
tt, _ := time.Parse(time.RFC3339, ts) tt, _ := time.Parse(time.RFC3339, ts)
if time.Now().Sub(tt).Seconds() > 30 { if time.Now().Sub(tt).Seconds() > 30 {
kvs["status"] = "unavailable" kvs["status"] = "unavailable"
}else{ } else {
kvs["status"] = "available" kvs["status"] = "available"
} }
} }
@ -530,9 +524,9 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult) jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult)
if ok { if ok {
if jvmVal, ok := jvm.(map[string]interface{});ok { if jvmVal, ok := jvm.(map[string]interface{}); ok {
kvs["jvm"] = util.MapStr{ kvs["jvm"] = util.MapStr{
"mem": jvmVal["mem"], "mem": jvmVal["mem"],
"uptime": jvmVal["uptime_in_millis"], "uptime": jvmVal["uptime_in_millis"],
} }
} }
@ -553,7 +547,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
} }
} }
} }
if len( response.Hits.Hits) > 0 { if len(response.Hits.Hits) > 0 {
hit := response.Hits.Hits[0] hit := response.Hits.Hits[0]
innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source) innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source)
if mp, ok := innerMetaData.(map[string]interface{}); ok { if mp, ok := innerMetaData.(map[string]interface{}); ok {
@ -583,8 +577,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
nodeID := ps.MustGetParameter("node_id") nodeID := ps.MustGetParameter("node_id")
var must = []util.MapStr{ var must = []util.MapStr{
{ {
"term":util.MapStr{ "term": util.MapStr{
"metadata.labels.cluster_uuid":util.MapStr{ "metadata.labels.cluster_uuid": util.MapStr{
"value": clusterUUID, "value": clusterUUID,
}, },
}, },
@ -612,15 +606,15 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
}, },
} }
resBody := map[string]interface{}{} resBody := map[string]interface{}{}
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req,clusterID, MetricTypeNodeStats,60) bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, MetricTypeNodeStats, 60)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
query:=map[string]interface{}{} query := map[string]interface{}{}
query["query"]=util.MapStr{ query["query"] = util.MapStr{
"bool": util.MapStr{ "bool": util.MapStr{
"must": must, "must": must,
"filter": []util.MapStr{ "filter": []util.MapStr{
@ -636,67 +630,67 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
}, },
} }
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems:=[]*common.MetricItem{} metricItems := []*common.MetricItem{}
metricItem:=newMetricItem("cpu", 1, SystemGroupKey) metricItem := newMetricItem("cpu", 1, SystemGroupKey)
metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true) metricItem.AddAxi("cpu", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true)
metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("Process CPU", "Process CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.process.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("OS CPU", "OS CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.os.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
metricItem =newMetricItem("jvm", 2, SystemGroupKey) metricItem = newMetricItem("jvm", 2, SystemGroupKey)
metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true) metricItem.AddAxi("JVM Heap", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("Max Heap", "Max Heap", "JVM max Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false) metricItem.AddLine("Used Heap", "Used Heap", "JVM used Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
metricItem=newMetricItem("index_throughput", 3, OperationGroupKey) metricItem = newMetricItem("index_throughput", 3, OperationGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Indexing Rate", "Total Shards", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
metricItem=newMetricItem("search_throughput", 4, OperationGroupKey) metricItem = newMetricItem("search_throughput", 4, OperationGroupKey)
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false) metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Search Rate","Total Shards", metricItem.AddLine("Search Rate", "Total Shards",
"Number of search requests being executed.", "Number of search requests being executed.",
"group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true) "group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
metricItem=newMetricItem("index_latency", 5, LatencyGroupKey) metricItem = newMetricItem("index_latency", 5, LatencyGroupKey)
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true) metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total" metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total" metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
metricItem=newMetricItem("search_latency", 6, LatencyGroupKey) metricItem = newMetricItem("search_latency", 6, LatencyGroupKey)
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false) metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total" metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total" metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total" metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 { metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
return value/value2 return value / value2
} }
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
metricItem =newMetricItem("parent_breaker", 8, SystemGroupKey) metricItem = newMetricItem("parent_breaker", 8, SystemGroupKey)
metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true) metricItem.AddLine("Parent Breaker Tripped", "Parent Breaker Tripped", "Rate of the circuit breaker has been triggered and prevented an out of memory error.", "group1", "payload.elasticsearch.node_stats.breakers.parent.tripped", "max", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
metricItems=append(metricItems,metricItem) metricItems = append(metricItems, metricItem)
metrics, err := h.getSingleMetrics(context.Background(), metricItems,query, bucketSize) metrics, err := h.getSingleMetrics(context.Background(), metricItems, query, bucketSize)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
h.WriteError(w, err, http.StatusInternalServerError) h.WriteError(w, err, http.StatusInternalServerError)
@ -713,8 +707,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
h.WriteJSON(w, resBody, http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, error){ func getNodeHealthMetric(query util.MapStr, bucketSize int) (*common.MetricItem, error) {
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize) bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr) intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil { if err != nil {
return nil, err return nil, err
@ -722,7 +716,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
query["aggs"] = util.MapStr{ query["aggs"] = util.MapStr{
"dates": util.MapStr{ "dates": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": "timestamp", "field": "timestamp",
intervalField: bucketSizeStr, intervalField: bucketSizeStr,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
@ -740,8 +734,8 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
return nil, err return nil, err
} }
metricItem:=newMetricItem("node_health", 0, "") metricItem := newMetricItem("node_health", 0, "")
metricItem.AddLine("Node health","Node Health","","group1","payload.elasticsearch.node_stats.jvm.uptime_in_millis","min",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false) metricItem.AddLine("Node health", "Node Health", "", "group1", "payload.elasticsearch.node_stats.jvm.uptime_in_millis", "min", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false)
metricData := []interface{}{} metricData := []interface{}{}
if response.StatusCode == 200 { if response.StatusCode == 200 {
@ -770,7 +764,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
return metricItem, nil return metricItem, nil
} }
func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, error){ func getNodeOnlineStatusOfRecentDay(nodeIDs []string) (map[string][]interface{}, error) {
q := orm.Query{ q := orm.Query{
WildcardIndex: true, WildcardIndex: true,
} }
@ -779,64 +773,64 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
"group_by_node_id": util.MapStr{ "group_by_node_id": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "metadata.labels.node_id", "field": "metadata.labels.node_id",
"size": 100, "size": 100,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"uptime_histogram": util.MapStr{ "uptime_histogram": util.MapStr{
"date_range": util.MapStr{ "date_range": util.MapStr{
"field": "timestamp", "field": "timestamp",
"format": "yyyy-MM-dd", "format": "yyyy-MM-dd",
"time_zone": "+08:00", "time_zone": "+08:00",
"ranges": []util.MapStr{ "ranges": []util.MapStr{
{ {
"from": "now-13d/d", "from": "now-13d/d",
"to": "now-12d/d", "to": "now-12d/d",
}, { }, {
"from": "now-12d/d", "from": "now-12d/d",
"to": "now-11d/d", "to": "now-11d/d",
}, },
{ {
"from": "now-11d/d", "from": "now-11d/d",
"to": "now-10d/d", "to": "now-10d/d",
}, },
{ {
"from": "now-10d/d", "from": "now-10d/d",
"to": "now-9d/d", "to": "now-9d/d",
}, { }, {
"from": "now-9d/d", "from": "now-9d/d",
"to": "now-8d/d", "to": "now-8d/d",
}, },
{ {
"from": "now-8d/d", "from": "now-8d/d",
"to": "now-7d/d", "to": "now-7d/d",
}, },
{ {
"from": "now-7d/d", "from": "now-7d/d",
"to": "now-6d/d", "to": "now-6d/d",
}, },
{ {
"from": "now-6d/d", "from": "now-6d/d",
"to": "now-5d/d", "to": "now-5d/d",
}, { }, {
"from": "now-5d/d", "from": "now-5d/d",
"to": "now-4d/d", "to": "now-4d/d",
}, },
{ {
"from": "now-4d/d", "from": "now-4d/d",
"to": "now-3d/d", "to": "now-3d/d",
},{ }, {
"from": "now-3d/d", "from": "now-3d/d",
"to": "now-2d/d", "to": "now-2d/d",
}, { }, {
"from": "now-2d/d", "from": "now-2d/d",
"to": "now-1d/d", "to": "now-1d/d",
}, { }, {
"from": "now-1d/d", "from": "now-1d/d",
"to": "now/d", "to": "now/d",
}, },
{ {
"from": "now/d", "from": "now/d",
"to": "now", "to": "now",
}, },
}, },
}, },
@ -865,7 +859,7 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
{ {
"range": util.MapStr{ "range": util.MapStr{
"timestamp": util.MapStr{ "timestamp": util.MapStr{
"gte":"now-15d", "gte": "now-15d",
"lte": "now", "lte": "now",
}, },
}, },
@ -903,13 +897,13 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
recentStatus[nodeKey] = []interface{}{} recentStatus[nodeKey] = []interface{}{}
if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok { if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok {
if bks, ok := histogramAgg["buckets"].([]interface{}); ok { if bks, ok := histogramAgg["buckets"].([]interface{}); ok {
for _, bkItem := range bks { for _, bkItem := range bks {
if bkVal, ok := bkItem.(map[string]interface{}); ok { if bkVal, ok := bkItem.(map[string]interface{}); ok {
if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok { if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok {
//mark node status as offline when uptime less than 10m //mark node status as offline when uptime less than 10m
if v, ok := minUptime.(float64); ok && v >= 600000 { if v, ok := minUptime.(float64); ok && v >= 600000 {
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"}) recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"})
}else{ } else {
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"}) recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"})
} }
} }
@ -927,10 +921,10 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
max = h.GetParameterOrDefault(req, "max", "now") max = h.GetParameterOrDefault(req, "max", "now")
) )
resBody := map[string] interface{}{} resBody := map[string]interface{}{}
id := ps.ByName("id") id := ps.ByName("id")
nodeUUID := ps.ByName("node_id") nodeUUID := ps.ByName("node_id")
q := &orm.Query{ Size: 1} q := &orm.Query{Size: 1}
q.AddSort("timestamp", orm.DESC) q.AddSort("timestamp", orm.DESC)
q.Conds = orm.And( q.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -942,16 +936,16 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
err, result := orm.Search(event.Event{}, q) err, result := orm.Search(event.Event{}, q)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
namesM := util.MapStr{} namesM := util.MapStr{}
if len(result.Result) > 0 { if len(result.Result) > 0 {
if data, ok := result.Result[0].(map[string]interface{}); ok { if data, ok := result.Result[0].(map[string]interface{}); ok {
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists { if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists {
if rows, ok := routingTable.([]interface{}); ok{ if rows, ok := routingTable.([]interface{}); ok {
for _, row := range rows { for _, row := range rows {
if v, ok := row.(map[string]interface{}); ok { if v, ok := row.(map[string]interface{}); ok {
if indexName, ok := v["index"].(string); ok{ if indexName, ok := v["index"].(string); ok {
namesM[indexName] = true namesM[indexName] = true
} }
} }
@ -961,12 +955,12 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
} }
} }
indexNames := make([]interface{}, 0, len(namesM) ) indexNames := make([]interface{}, 0, len(namesM))
for name, _ := range namesM { for name, _ := range namesM {
indexNames = append(indexNames, name) indexNames = append(indexNames, name)
} }
q1 := &orm.Query{ Size: 100} q1 := &orm.Query{Size: 100}
q1.AddSort("timestamp", orm.DESC) q1.AddSort("timestamp", orm.DESC)
q1.Conds = orm.And( q1.Conds = orm.And(
orm.Eq("metadata.category", "elasticsearch"), orm.Eq("metadata.category", "elasticsearch"),
@ -977,13 +971,13 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
err, result = orm.Search(elastic.IndexConfig{}, q1) err, result = orm.Search(elastic.IndexConfig{}, q1)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
indices, err := h.getLatestIndices(req, min, max, id, &result) indices, err := h.getLatestIndices(req, min, max, id, &result)
if err != nil { if err != nil {
resBody["error"] = err.Error() resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError ) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
h.WriteJSON(w, indices, http.StatusOK) h.WriteJSON(w, indices, http.StatusOK)
@ -1069,7 +1063,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
} }
indices := []interface{}{} indices := []interface{}{}
var indexPattern *radix.Pattern var indexPattern *radix.Pattern
if !hasAllPrivilege{ if !hasAllPrivilege {
indexPattern = radix.Compile(allowedIndices...) indexPattern = radix.Compile(allowedIndices...)
} }
@ -1102,12 +1096,11 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
return indices, nil return indices, nil
} }
func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id") clusterID := ps.MustGetParameter("id")
nodeID := ps.MustGetParameter("node_id") nodeID := ps.MustGetParameter("node_id")
q1 := orm.Query{ q1 := orm.Query{
Size: 1, Size: 1,
WildcardIndex: true, WildcardIndex: true,
} }
q1.Conds = orm.And( q1.Conds = orm.And(
@ -1119,9 +1112,9 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
q1.AddSort("timestamp", orm.DESC) q1.AddSort("timestamp", orm.DESC)
err, result := orm.Search(&event.Event{}, &q1) err, result := orm.Search(&event.Event{}, &q1)
if err != nil { if err != nil {
h.WriteJSON(w,util.MapStr{ h.WriteJSON(w, util.MapStr{
"error": err.Error(), "error": err.Error(),
}, http.StatusInternalServerError ) }, http.StatusInternalServerError)
return return
} }
var shardInfo interface{} = []interface{}{} var shardInfo interface{} = []interface{}{}
@ -1155,4 +1148,4 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
} }
h.WriteJSON(w, shardInfo, http.StatusOK) h.WriteJSON(w, shardInfo, http.StatusOK)
} }

View File

@ -28,15 +28,15 @@ import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"fmt" "fmt"
"github.com/crewjam/saml"
"net/http" "net/http"
"net/url" "net/url"
"github.com/crewjam/saml"
"github.com/crewjam/saml/samlsp" "github.com/crewjam/saml/samlsp"
) )
var metdataurl = "https://sso.infini.ltd/metadata" //Metadata of the IDP var metdataurl = "https://sso.infini.ltd/metadata" //Metadata of the IDP
var sessioncert = "./sessioncert" //Key pair used for creating a signed session var sessioncert = "./sessioncert" //Key pair used for creating a signed session
var sessionkey = "./sessionkey" var sessionkey = "./sessionkey"
var serverkey = "./serverkey" //Server TLS var serverkey = "./serverkey" //Server TLS
var servercert = "./servercert" var servercert = "./servercert"
@ -68,13 +68,13 @@ func main() {
rootURL, err := url.Parse(serverurl) rootURL, err := url.Parse(serverurl)
panicIfError(err) panicIfError(err)
samlSP, _ := samlsp.New(samlsp.Options{ samlSP, _ := samlsp.New(samlsp.Options{
URL: *rootURL, URL: *rootURL,
Key: keyPair.PrivateKey.(*rsa.PrivateKey), Key: keyPair.PrivateKey.(*rsa.PrivateKey),
Certificate: keyPair.Leaf, Certificate: keyPair.Leaf,
IDPMetadata: &saml.EntityDescriptor{ IDPMetadata: &saml.EntityDescriptor{
//EntityID: //EntityID:
}, // you can also have Metadata XML instead of URL }, // you can also have Metadata XML instead of URL
EntityID: entityId, EntityID: entityId,
}) })
app := http.HandlerFunc(hello) app := http.HandlerFunc(hello)
http.Handle("/hello", samlSP.RequireAccount(app)) http.Handle("/hello", samlSP.RequireAccount(app))

View File

@ -49,7 +49,7 @@ func (h *AlertAPI) getAlert(w http.ResponseWriter, req *http.Request, ps httprou
q := orm.Query{ q := orm.Query{
WildcardIndex: true, WildcardIndex: true,
Size: 1, Size: 1,
} }
q.Conds = orm.And(orm.Eq("id", id)) q.Conds = orm.And(orm.Eq("id", id))
err, result := orm.Search(obj, &q) err, result := orm.Search(obj, &q)
@ -76,16 +76,16 @@ func (h *AlertAPI) getAlert(w http.ResponseWriter, req *http.Request, ps httprou
func (h *AlertAPI) searchAlert(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *AlertAPI) searchAlert(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var ( var (
keyword = h.GetParameterOrDefault(req, "keyword", "") keyword = h.GetParameterOrDefault(req, "keyword", "")
queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}` queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
strSize = h.GetParameterOrDefault(req, "size", "20") strSize = h.GetParameterOrDefault(req, "size", "20")
strFrom = h.GetParameterOrDefault(req, "from", "0") strFrom = h.GetParameterOrDefault(req, "from", "0")
state = h.GetParameterOrDefault(req, "state", "") state = h.GetParameterOrDefault(req, "state", "")
priority = h.GetParameterOrDefault(req, "priority", "") priority = h.GetParameterOrDefault(req, "priority", "")
sort = h.GetParameterOrDefault(req, "sort", "") sort = h.GetParameterOrDefault(req, "sort", "")
ruleID = h.GetParameterOrDefault(req, "rule_id", "") ruleID = h.GetParameterOrDefault(req, "rule_id", "")
min = h.GetParameterOrDefault(req, "min", "") min = h.GetParameterOrDefault(req, "min", "")
max = h.GetParameterOrDefault(req, "max", "") max = h.GetParameterOrDefault(req, "max", "")
mustBuilder = &strings.Builder{} mustBuilder = &strings.Builder{}
sortBuilder = strings.Builder{} sortBuilder = strings.Builder{}
) )
@ -160,13 +160,13 @@ func (h *AlertAPI) getAlertStats(w http.ResponseWriter, req *http.Request, ps ht
"terms_by_state": util.MapStr{ "terms_by_state": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "priority", "field": "priority",
"size": 5, "size": 5,
}, },
}, },
}, },
} }
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl) ) searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl))
if err != nil { if err != nil {
h.WriteJSON(w, util.MapStr{ h.WriteJSON(w, util.MapStr{
"error": err.Error(), "error": err.Error(),
@ -191,4 +191,4 @@ func (h *AlertAPI) getAlertStats(w http.ResponseWriter, req *http.Request, ps ht
"current": priorityAlerts, "current": priorityAlerts,
}, },
}, http.StatusOK) }, http.StatusOK)
} }

View File

@ -209,17 +209,17 @@ func (h *AlertAPI) deleteChannel(w http.ResponseWriter, req *http.Request, ps ht
func (h *AlertAPI) searchChannel(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *AlertAPI) searchChannel(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var ( var (
keyword = h.GetParameterOrDefault(req, "keyword", "") keyword = h.GetParameterOrDefault(req, "keyword", "")
strSize = h.GetParameterOrDefault(req, "size", "20") strSize = h.GetParameterOrDefault(req, "size", "20")
strFrom = h.GetParameterOrDefault(req, "from", "0") strFrom = h.GetParameterOrDefault(req, "from", "0")
subType = h.GetParameterOrDefault(req, "sub_type", "") subType = h.GetParameterOrDefault(req, "sub_type", "")
typ = h.GetParameterOrDefault(req, "type", "") typ = h.GetParameterOrDefault(req, "type", "")
sort = h.GetParameterOrDefault(req, "sort", "updated:desc") sort = h.GetParameterOrDefault(req, "sort", "updated:desc")
) )
mustQ := []interface{}{} mustQ := []interface{}{}
if keyword != "" { if keyword != "" {
mustQ = append(mustQ, util.MapStr{ mustQ = append(mustQ, util.MapStr{
"query_string": util.MapStr{"default_field":"*","query": keyword}, "query_string": util.MapStr{"default_field": "*", "query": keyword},
}) })
} }
if typ != "" { if typ != "" {
@ -249,7 +249,7 @@ func (h *AlertAPI) searchChannel(w http.ResponseWriter, req *http.Request, ps ht
from = 0 from = 0
} }
var ( var (
sortField string sortField string
sortDirection string sortDirection string
) )
sortParts := strings.Split(sort, ":") sortParts := strings.Split(sort, ":")
@ -306,26 +306,26 @@ func (h *AlertAPI) testChannel(w http.ResponseWriter, req *http.Request, ps http
} }
firstGrpValue := global.MustLookupString(elastic.GlobalSystemElasticsearchID) firstGrpValue := global.MustLookupString(elastic.GlobalSystemElasticsearchID)
ctx := map[string]interface{}{ ctx := map[string]interface{}{
"title": "INFINI platform test alert message", "title": "INFINI platform test alert message",
"message": "This is just a test message, do not reply!", "message": "This is just a test message, do not reply!",
"objects": []string{".infini_metrics"}, "objects": []string{".infini_metrics"},
"trigger_at": time.Now().UnixMilli(), "trigger_at": time.Now().UnixMilli(),
"duration": "5m10s", "duration": "5m10s",
"rule_id": util.GetUUID(), "rule_id": util.GetUUID(),
"rule_name": "test rule", "rule_name": "test rule",
"resource_id": util.GetUUID(), "resource_id": util.GetUUID(),
"resource_name": "test resource", "resource_name": "test resource",
"event_id": util.GetUUID(), "event_id": util.GetUUID(),
"timestamp": time.Now().UnixMilli(), "timestamp": time.Now().UnixMilli(),
"first_group_value": firstGrpValue, "first_group_value": firstGrpValue,
"first_threshold": "90", "first_threshold": "90",
"priority": "critical", "priority": "critical",
"results": []util.MapStr{ "results": []util.MapStr{
{"threshold": "90", {"threshold": "90",
"priority": "critical", "priority": "critical",
"group_values": []string{firstGrpValue, "group_value2" }, "group_values": []string{firstGrpValue, "group_value2"},
"issue_timestamp": time.Now().UnixMilli()-500, "issue_timestamp": time.Now().UnixMilli() - 500,
"result_value": 90, "result_value": 90,
"relation_values": util.MapStr{"a": 100, "b": 90}, "relation_values": util.MapStr{"a": 100, "b": 90},
}, },
}, },
@ -399,4 +399,4 @@ func setChannelEnabled(enabled bool, channelIDs []string) error {
} }
err := orm.UpdateBy(alerting.Channel{}, util.MustToJSONBytes(q)) err := orm.UpdateBy(alerting.Channel{}, util.MustToJSONBytes(q))
return err return err
} }

View File

@ -47,11 +47,11 @@ import (
func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
body := struct { body := struct {
Messages []alerting.AlertMessage `json:"messages"` Messages []alerting.AlertMessage `json:"messages"`
IgnoredReason string `json:"ignored_reason"` IgnoredReason string `json:"ignored_reason"`
IsReset bool `json:"is_reset"` IsReset bool `json:"is_reset"`
}{} }{}
err := h.DecodeJSON(req, &body) err := h.DecodeJSON(req, &body)
if err != nil { if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError) h.WriteError(w, err.Error(), http.StatusInternalServerError)
return return
@ -83,7 +83,7 @@ func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request,
}, },
}) })
source = fmt.Sprintf("ctx._source['status'] = '%s'", alerting.MessageStateAlerting) source = fmt.Sprintf("ctx._source['status'] = '%s'", alerting.MessageStateAlerting)
}else { } else {
must = append(must, util.MapStr{ must = append(must, util.MapStr{
"term": util.MapStr{ "term": util.MapStr{
"status": util.MapStr{ "status": util.MapStr{
@ -114,9 +114,8 @@ func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request,
_ = kv.DeleteKey(alerting2.KVLastMessageState, []byte(msg.RuleID)) _ = kv.DeleteKey(alerting2.KVLastMessageState, []byte(msg.RuleID))
} }
h.WriteJSON(w, util.MapStr{ h.WriteJSON(w, util.MapStr{
"ids": messageIDs, "ids": messageIDs,
"result": "updated", "result": "updated",
}, 200) }, 200)
} }
@ -138,7 +137,7 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
return return
} }
if !hasAllPrivilege { if !hasAllPrivilege {
must = append(must,clusterFilter) must = append(must, clusterFilter)
} }
queryDsl := util.MapStr{ queryDsl := util.MapStr{
"size": 0, "size": 0,
@ -151,13 +150,13 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
"terms_by_priority": util.MapStr{ "terms_by_priority": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "priority", "field": "priority",
"size": 5, "size": 5,
}, },
}, },
}, },
} }
indexName := orm.GetWildcardIndexName(alerting.AlertMessage{}) indexName := orm.GetWildcardIndexName(alerting.AlertMessage{})
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl) ) searchRes, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl))
if err != nil { if err != nil {
h.WriteJSON(w, util.MapStr{ h.WriteJSON(w, util.MapStr{
"error": err.Error(), "error": err.Error(),
@ -172,7 +171,7 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
} }
} }
} }
for _, status := range []string{"info", "low","medium","high", "critical"} { for _, status := range []string{"info", "low", "medium", "high", "critical"} {
if _, ok := statusCounts[status]; !ok { if _, ok := statusCounts[status]; !ok {
statusCounts[status] = 0 statusCounts[status] = 0
} }
@ -206,18 +205,18 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
"terms_by_category": util.MapStr{ "terms_by_category": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "category", "field": "category",
"size": 100, "size": 100,
}, },
}, },
"terms_by_tags": util.MapStr{ "terms_by_tags": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "tags", "field": "tags",
"size": 100, "size": 100,
}, },
}, },
}, },
} }
searchRes, err = esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl) ) searchRes, err = esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl))
if err != nil { if err != nil {
h.WriteJSON(w, util.MapStr{ h.WriteJSON(w, util.MapStr{
"error": err.Error(), "error": err.Error(),
@ -245,15 +244,14 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
"current": statusCounts, "current": statusCounts,
}, },
"categories": categories, "categories": categories,
"tags": tags, "tags": tags,
}, http.StatusOK) }, http.StatusOK)
} }
func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var ( var (
queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d,"aggs": { queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d,"aggs": {
"max_updated": { "max_updated": {
"max": { "max": {
"field": "updated" "field": "updated"
@ -267,16 +265,16 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request,
}}` }}`
strSize = h.GetParameterOrDefault(req, "size", "20") strSize = h.GetParameterOrDefault(req, "size", "20")
strFrom = h.GetParameterOrDefault(req, "from", "0") strFrom = h.GetParameterOrDefault(req, "from", "0")
status = h.GetParameterOrDefault(req, "status", "") status = h.GetParameterOrDefault(req, "status", "")
priority = h.GetParameterOrDefault(req, "priority", "") priority = h.GetParameterOrDefault(req, "priority", "")
sort = h.GetParameterOrDefault(req, "sort", "") sort = h.GetParameterOrDefault(req, "sort", "")
ruleID = h.GetParameterOrDefault(req, "rule_id", "") ruleID = h.GetParameterOrDefault(req, "rule_id", "")
min = h.GetParameterOrDefault(req, "min", "") min = h.GetParameterOrDefault(req, "min", "")
max = h.GetParameterOrDefault(req, "max", "") max = h.GetParameterOrDefault(req, "max", "")
mustBuilder = &strings.Builder{} mustBuilder = &strings.Builder{}
sortBuilder = strings.Builder{} sortBuilder = strings.Builder{}
category = h.GetParameterOrDefault(req, "category", "") category = h.GetParameterOrDefault(req, "category", "")
tags = h.GetParameterOrDefault(req, "tags", "") tags = h.GetParameterOrDefault(req, "tags", "")
) )
timeRange := util.MapStr{} timeRange := util.MapStr{}
if min != "" { if min != "" {
@ -292,7 +290,7 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request,
}, },
} }
mustBuilder.Write(util.MustToJSONBytes(timeFilter)) mustBuilder.Write(util.MustToJSONBytes(timeFilter))
}else{ } else {
mustBuilder.WriteString(`{"match_all":{}}`) mustBuilder.WriteString(`{"match_all":{}}`)
} }
@ -374,7 +372,7 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request,
h.WriteJSON(w, esRes, http.StatusOK) h.WriteJSON(w, esRes, http.StatusOK)
} }
func parseTime( t interface{}, layout string) (time.Time, error){ func parseTime(t interface{}, layout string) (time.Time, error) {
switch t.(type) { switch t.(type) {
case string: case string:
return time.Parse(layout, t.(string)) return time.Parse(layout, t.(string))
@ -384,7 +382,7 @@ func parseTime( t interface{}, layout string) (time.Time, error){
} }
func (h *AlertAPI) getAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *AlertAPI) getAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
message := &alerting.AlertMessage{ message := &alerting.AlertMessage{
ID: ps.ByName("message_id"), ID: ps.ByName("message_id"),
} }
exists, err := orm.Get(message) exists, err := orm.Get(message)
@ -417,36 +415,36 @@ func (h *AlertAPI) getAlertMessage(w http.ResponseWriter, req *http.Request, ps
var duration time.Duration var duration time.Duration
if message.Status == alerting.MessageStateRecovered { if message.Status == alerting.MessageStateRecovered {
duration = message.Updated.Sub(message.Created) duration = message.Updated.Sub(message.Created)
}else{ } else {
duration = time.Now().Sub(message.Created) duration = time.Now().Sub(message.Created)
} }
detailObj := util.MapStr{ detailObj := util.MapStr{
"message_id": message.ID, "message_id": message.ID,
"rule_id": message.RuleID, "rule_id": message.RuleID,
"rule_name": rule.Name, "rule_name": rule.Name,
"rule_enabled": rule.Enabled, "rule_enabled": rule.Enabled,
"title": message.Title, "title": message.Title,
"message": message.Message, "message": message.Message,
"priority": message.Priority, "priority": message.Priority,
"created": message.Created, "created": message.Created,
"updated": message.Updated, "updated": message.Updated,
"resource_name": rule.Resource.Name, "resource_name": rule.Resource.Name,
"resource_id": rule.Resource.ID, "resource_id": rule.Resource.ID,
"resource_objects": rule.Resource.Objects, "resource_objects": rule.Resource.Objects,
"conditions": rule.Conditions, "conditions": rule.Conditions,
"duration": duration.Milliseconds(), "duration": duration.Milliseconds(),
"ignored_time": message.IgnoredTime, "ignored_time": message.IgnoredTime,
"ignored_reason": message.IgnoredReason, "ignored_reason": message.IgnoredReason,
"ignored_user": message.IgnoredUser, "ignored_user": message.IgnoredUser,
"status": message.Status, "status": message.Status,
"expression": rule.Metrics.Expression, "expression": rule.Metrics.Expression,
"hit_condition": hitCondition, "hit_condition": hitCondition,
} }
h.WriteJSON(w, detailObj, http.StatusOK) h.WriteJSON(w, detailObj, http.StatusOK)
} }
func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
message := &alerting.AlertMessage{ message := &alerting.AlertMessage{
ID: ps.ByName("message_id"), ID: ps.ByName("message_id"),
} }
exists, err := orm.Get(message) exists, err := orm.Get(message)
@ -481,12 +479,12 @@ func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.R
} }
if rule.NotificationConfig != nil { if rule.NotificationConfig != nil {
notificationInfo["alerting"] = util.MapStr{ notificationInfo["alerting"] = util.MapStr{
"accept_time_range": rule.NotificationConfig.AcceptTimeRange, "accept_time_range": rule.NotificationConfig.AcceptTimeRange,
"throttle_period": rule.NotificationConfig.ThrottlePeriod, "throttle_period": rule.NotificationConfig.ThrottlePeriod,
"escalation_enabled": rule.NotificationConfig.EscalationEnabled, "escalation_enabled": rule.NotificationConfig.EscalationEnabled,
"escalation_throttle_period": rule.NotificationConfig.EscalationThrottlePeriod, "escalation_throttle_period": rule.NotificationConfig.EscalationThrottlePeriod,
"normal_stats": stats["normal"], "normal_stats": stats["normal"],
"escalation_stats": stats["escalation"], "escalation_stats": stats["escalation"],
} }
} }
if rule.RecoveryNotificationConfig != nil { if rule.RecoveryNotificationConfig != nil {
@ -497,7 +495,7 @@ func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.R
h.WriteJSON(w, notificationInfo, http.StatusOK) h.WriteJSON(w, notificationInfo, http.StatusOK)
} }
func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error){ func getMessageNotificationStats(msg *alerting.AlertMessage) (util.MapStr, error) {
rangeQ := util.MapStr{ rangeQ := util.MapStr{
"gte": msg.Created.UnixMilli(), "gte": msg.Created.UnixMilli(),
} }
@ -508,7 +506,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
"grp_normal_channel": util.MapStr{ "grp_normal_channel": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "action_execution_results.channel_type", "field": "action_execution_results.channel_type",
"size": 20, "size": 20,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"top": util.MapStr{ "top": util.MapStr{
@ -531,7 +529,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
"grp_escalation_channel": util.MapStr{ "grp_escalation_channel": util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "escalation_action_results.channel_type", "field": "escalation_action_results.channel_type",
"size": 20, "size": 20,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"top": util.MapStr{ "top": util.MapStr{
@ -556,7 +554,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
aggs["grp_recover_channel"] = util.MapStr{ aggs["grp_recover_channel"] = util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": "recover_action_results.channel_type", "field": "recover_action_results.channel_type",
"size": 20, "size": 20,
}, },
"aggs": util.MapStr{ "aggs": util.MapStr{
"top": util.MapStr{ "top": util.MapStr{
@ -610,7 +608,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
var normalStats = extractStatsFromRaw(result.Raw, "grp_normal_channel", "action_execution_results") var normalStats = extractStatsFromRaw(result.Raw, "grp_normal_channel", "action_execution_results")
var escalationStats = extractStatsFromRaw(result.Raw, "grp_escalation_channel", "escalation_action_results") var escalationStats = extractStatsFromRaw(result.Raw, "grp_escalation_channel", "escalation_action_results")
stats := util.MapStr{ stats := util.MapStr{
"normal": normalStats, "normal": normalStats,
"escalation": escalationStats, "escalation": escalationStats,
} }
if msg.Status == alerting.MessageStateRecovered { if msg.Status == alerting.MessageStateRecovered {
@ -627,15 +625,15 @@ func extractStatsFromRaw(searchRawRes []byte, grpKey string, actionKey string) [
statsItem["channel_type"], _ = jsonparser.GetString(value, "key") statsItem["channel_type"], _ = jsonparser.GetString(value, "key")
statsItem["count"], _ = jsonparser.GetInt(value, "doc_count") statsItem["count"], _ = jsonparser.GetInt(value, "doc_count")
jsonparser.ArrayEach(value, func(v []byte, dataType jsonparser.ValueType, offset int, err error) { jsonparser.ArrayEach(value, func(v []byte, dataType jsonparser.ValueType, offset int, err error) {
ck, _ := jsonparser.GetString(v, "channel_type") ck, _ := jsonparser.GetString(v, "channel_type")
cn, _ := jsonparser.GetString(v, "channel_name") cn, _ := jsonparser.GetString(v, "channel_name")
if ck == statsItem["channel_type"] { if ck == statsItem["channel_type"] {
statsItem["channel_name"] = cn statsItem["channel_name"] = cn
statsItem["error"], _ = jsonparser.GetString(v, "error") statsItem["error"], _ = jsonparser.GetString(v, "error")
} }
}, "top", "hits","hits", "[0]", "_source",actionKey) }, "top", "hits", "hits", "[0]", "_source", actionKey)
statsItem["last_time"], _ = jsonparser.GetString(value, "top", "hits","hits", "[0]", "_source","created") statsItem["last_time"], _ = jsonparser.GetString(value, "top", "hits", "hits", "[0]", "_source", "created")
stats = append(stats, statsItem) stats = append(stats, statsItem)
}, "aggregations", grpKey, "buckets") }, "aggregations", grpKey, "buckets")
return stats return stats
} }

View File

@ -48,9 +48,9 @@ func InitAPI() {
api.HandleAPIMethod(api.POST, "/email/server/_test", email.RequirePermission(email.testEmailServer, enum.PermissionSmtpServerRead)) api.HandleAPIMethod(api.POST, "/email/server/_test", email.RequirePermission(email.testEmailServer, enum.PermissionSmtpServerRead))
api.HandleAPIMethod(api.GET, "/email/server/:email_server_id", email.RequirePermission(email.getEmailServer, enum.PermissionAlertRuleRead)) api.HandleAPIMethod(api.GET, "/email/server/:email_server_id", email.RequirePermission(email.getEmailServer, enum.PermissionAlertRuleRead))
api.HandleAPIMethod(api.POST, "/email/server", email.RequirePermission(email.createEmailServer, enum.PermissionSmtpServerWrite)) api.HandleAPIMethod(api.POST, "/email/server", email.RequirePermission(email.createEmailServer, enum.PermissionSmtpServerWrite))
api.HandleAPIMethod(api.PUT, "/email/server/:email_server_id", email.RequirePermission(email.updateEmailServer, enum.PermissionSmtpServerWrite)) api.HandleAPIMethod(api.PUT, "/email/server/:email_server_id", email.RequirePermission(email.updateEmailServer, enum.PermissionSmtpServerWrite))
api.HandleAPIMethod(api.DELETE, "/email/server/:email_server_id", email.RequirePermission(email.deleteEmailServer, enum.PermissionSmtpServerWrite)) api.HandleAPIMethod(api.DELETE, "/email/server/:email_server_id", email.RequirePermission(email.deleteEmailServer, enum.PermissionSmtpServerWrite))
api.HandleAPIMethod(api.GET, "/email/server/_search", email.RequirePermission(email.searchEmailServer, enum.PermissionSmtpServerRead)) api.HandleAPIMethod(api.GET, "/email/server/_search", email.RequirePermission(email.searchEmailServer, enum.PermissionSmtpServerRead))
credential.RegisterChangeEvent(func(cred *credential.Credential) { credential.RegisterChangeEvent(func(cred *credential.Credential) {
query := util.MapStr{ query := util.MapStr{

View File

@ -38,8 +38,7 @@ import (
) )
func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{ resBody := map[string]interface{}{}
}
reqParams := elastic.CommonCommand{} reqParams := elastic.CommonCommand{}
err := h.DecodeJSON(req, &reqParams) err := h.DecodeJSON(req, &reqParams)
@ -54,8 +53,8 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht
reqParams.ID = util.GetUUID() reqParams.ID = util.GetUUID()
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
queryDSL :=[]byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title)) queryDSL := []byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title))
var indexName = orm.GetIndexName(reqParams) var indexName = orm.GetIndexName(reqParams)
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL) searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -63,13 +62,13 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht
h.WriteJSON(w, resBody, http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
return return
} }
if len(searchRes.Hits.Hits) > 0 { if len(searchRes.Hits.Hits) > 0 {
resBody["error"] = "title already exists" resBody["error"] = "title already exists"
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
return return
} }
_, err = esClient.Index(indexName,"", reqParams.ID, reqParams, "wait_for") _, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
@ -81,12 +80,11 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht
resBody["result"] = "created" resBody["result"] = "created"
resBody["_source"] = reqParams resBody["_source"] = reqParams
h.WriteJSON(w, resBody,http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{ resBody := map[string]interface{}{}
}
reqParams := elastic.CommonCommand{} reqParams := elastic.CommonCommand{}
err := h.DecodeJSON(req, &reqParams) err := h.DecodeJSON(req, &reqParams)
@ -99,8 +97,8 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h
reqParams.ID = ps.ByName("cid") reqParams.ID = ps.ByName("cid")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)) esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
queryDSL :=[]byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title)) queryDSL := []byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title))
var indexName = orm.GetIndexName(reqParams) var indexName = orm.GetIndexName(reqParams)
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL) searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL)
if err != nil { if err != nil {
log.Error(err) log.Error(err)
@ -108,13 +106,13 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
if len(searchRes.Hits.Hits) > 0 && searchRes.Hits.Hits[0].ID != reqParams.ID { if len(searchRes.Hits.Hits) > 0 && searchRes.Hits.Hits[0].ID != reqParams.ID {
resBody["error"] = "title already exists" resBody["error"] = "title already exists"
log.Error(resBody["error"]) log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
return return
} }
_, err = esClient.Index(indexName,"", reqParams.ID, reqParams, "wait_for") _, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
@ -126,21 +124,20 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h
resBody["result"] = "updated" resBody["result"] = "updated"
resBody["_source"] = reqParams resBody["_source"] = reqParams
h.WriteJSON(w, resBody,http.StatusOK) h.WriteJSON(w, resBody, http.StatusOK)
} }
func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{ resBody := map[string]interface{}{}
}
var ( var (
keyword = h.GetParameterOrDefault(req, "keyword", "") keyword = h.GetParameterOrDefault(req, "keyword", "")
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}` queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
strSize = h.GetParameterOrDefault(req, "size", "20") strSize = h.GetParameterOrDefault(req, "size", "20")
strFrom = h.GetParameterOrDefault(req, "from", "0") strFrom = h.GetParameterOrDefault(req, "from", "0")
filterBuilder = &strings.Builder{} filterBuilder = &strings.Builder{}
) )
if keyword != ""{ if keyword != "" {
filterBuilder.WriteString(fmt.Sprintf(`{"query_string": { filterBuilder.WriteString(fmt.Sprintf(`{"query_string": {
"default_field": "*", "default_field": "*",
"query": "%s" "query": "%s"
@ -167,7 +164,7 @@ func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req *
return return
} }
h.WriteJSON(w, searchRes,http.StatusOK) h.WriteJSON(w, searchRes, http.StatusOK)
} }
func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -178,9 +175,9 @@ func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req
if err != nil { if err != nil {
log.Error(err) log.Error(err)
resBody["error"] = err.Error() resBody["error"] = err.Error()
if delRes!=nil{ if delRes != nil {
h.WriteJSON(w, resBody, delRes.StatusCode) h.WriteJSON(w, resBody, delRes.StatusCode)
}else{ } else {
h.WriteJSON(w, resBody, http.StatusInternalServerError) h.WriteJSON(w, resBody, http.StatusInternalServerError)
} }
return return
@ -190,4 +187,4 @@ func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req
resBody["_id"] = id resBody["_id"] = id
resBody["result"] = delRes.Result resBody["result"] = delRes.Result
h.WriteJSON(w, resBody, delRes.StatusCode) h.WriteJSON(w, resBody, delRes.StatusCode)
} }

View File

@ -34,8 +34,8 @@ import (
) )
type docReqBody struct { type docReqBody struct {
From int `json:"from"` From int `json:"from"`
Size int `json:"size"` Size int `json:"size"`
Filter string `json:"filter"` Filter string `json:"filter"`
Cluster string `json:"cluster"` Cluster string `json:"cluster"`
Keyword string `json:"keyword"` Keyword string `json:"keyword"`
@ -155,7 +155,7 @@ func (handler APIHandler) HandleSearchDocumentAction(w http.ResponseWriter, req
} }
indexName := ps.ByName("index") indexName := ps.ByName("index")
var ( var (
sort = "" sort = ""
) )
if reqBody.From < 0 { if reqBody.From < 0 {
reqBody.From = 0 reqBody.From = 0
@ -206,7 +206,7 @@ func (handler APIHandler) ValidateDocIDAction(w http.ResponseWriter, req *http.R
var ( var (
index = handler.GetParameter(req, "index") index = handler.GetParameter(req, "index")
docID = handler.GetParameter(req, "doc_id") docID = handler.GetParameter(req, "doc_id")
typ = handler.GetParameter(req, "type") typ = handler.GetParameter(req, "type")
) )
getRes, err := client.Get(index, typ, docID) getRes, err := client.Get(index, typ, docID)
if err != nil { if err != nil {

View File

@ -28,13 +28,13 @@
package insight package insight
import ( import (
"net/http"
"strconv"
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
insight2 "infini.sh/console/model/insight" insight2 "infini.sh/console/model/insight"
httprouter "infini.sh/framework/core/api/router" httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
"net/http"
"strconv"
) )
func (h *InsightAPI) createDashboard(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { func (h *InsightAPI) createDashboard(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {

View File

@ -70,8 +70,8 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
kv := strings.Split(part, "=") kv := strings.Split(part, "=")
if len(kv) == 2 { if len(kv) == 2 {
k := strings.TrimSpace(kv[0]) k := strings.TrimSpace(kv[0])
kvs[k]= strings.TrimSpace(kv[1]) kvs[k] = strings.TrimSpace(kv[1])
}else{ } else {
log.Debugf("got unexpected directory part: %s", part) log.Debugf("got unexpected directory part: %s", part)
} }
} }
@ -93,7 +93,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
} }
} }
valueField = kvs["property"] valueField = kvs["property"]
if indexName == "" || keyField == "" || valueField == "" { if indexName == "" || keyField == "" || valueField == "" {
return kvs["default"] return kvs["default"]
} }
@ -107,7 +107,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
} }
var ( var (
cacheLabels map[string]string cacheLabels map[string]string
ok bool ok bool
) )
if cacheLabels, ok = cacheLabelsMap[cacheKey]; !ok { if cacheLabels, ok = cacheLabelsMap[cacheKey]; !ok {
var keyFieldValues []string var keyFieldValues []string
@ -120,7 +120,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
cacheLabels, err = common2.GetLabelMaps(indexName, keyField, valueField, client, keyFieldValues, len(keyFieldValues)) cacheLabels, err = common2.GetLabelMaps(indexName, keyField, valueField, client, keyFieldValues, len(keyFieldValues))
if err != nil { if err != nil {
log.Error(err) log.Error(err)
}else{ } else {
cacheLabelsMap[cacheKey] = cacheLabels cacheLabelsMap[cacheKey] = cacheLabels
} }
} }
@ -155,10 +155,10 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
type RenderTemplateRequest struct { type RenderTemplateRequest struct {
Contexts []RenderTemplateContext `json:"contexts"` Contexts []RenderTemplateContext `json:"contexts"`
Template string `json:"template"` Template string `json:"template"`
} }
type RenderTemplateContext struct { type RenderTemplateContext struct {
Key string `json:"key"` Key string `json:"key"`
Value map[string]interface{} `json:"value"` Value map[string]interface{} `json:"value"`
} }

View File

@ -280,7 +280,7 @@ func getMetricData(metric *insight.Metric) (interface{}, error) {
params := map[string]interface{}{} params := map[string]interface{}{}
if metric.BucketSize != "" { if metric.BucketSize != "" {
bucketSize := metric.BucketSize bucketSize := metric.BucketSize
if metric.BucketSize == "auto" && interval != "" { if metric.BucketSize == "auto" && interval != "" {
bucketSize = interval bucketSize = interval
} }
if interval != "" || bucketSize != "auto" { if interval != "" || bucketSize != "auto" {
@ -348,13 +348,13 @@ func getMetricData(metric *insight.Metric) (interface{}, error) {
} }
} }
retMetricDataItem.Timestamp = timestamp retMetricDataItem.Timestamp = timestamp
if len(metric.Formulas) <= 1 && metric.Formula != ""{ if len(metric.Formulas) <= 1 && metric.Formula != "" {
//support older versions by returning the result for a single formula. //support older versions by returning the result for a single formula.
retMetricDataItem.Value = result retMetricDataItem.Value = result
} else { } else {
if v, ok := retMetricDataItem.Value.(map[string]interface{}); ok { if v, ok := retMetricDataItem.Value.(map[string]interface{}); ok {
v[formula] = result v[formula] = result
}else{ } else {
retMetricDataItem.Value = map[string]interface{}{formula: result} retMetricDataItem.Value = map[string]interface{}{formula: result}
} }
} }
@ -375,7 +375,7 @@ func getMetricData(metric *insight.Metric) (interface{}, error) {
} }
} }
return util.MapStr{ return util.MapStr{
"data": result, "data": result,
"request": string(queryDSL), "request": string(queryDSL),
}, nil }, nil
} }

View File

@ -115,8 +115,8 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
} }
} }
var ( var (
useDateHistogram = false useDateHistogram = false
dateHistogramAgg util.MapStr dateHistogramAgg util.MapStr
dateHistogramAggName string dateHistogramAggName string
) )
if metric.BucketSize != "" && metric.TimeField != "" { if metric.BucketSize != "" && metric.TimeField != "" {
@ -128,10 +128,10 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
buckets = 2 buckets = 2
} }
dateHistogramAgg = util.MapStr{ dateHistogramAgg = util.MapStr{
"field": metric.TimeField, "field": metric.TimeField,
"buckets": buckets, "buckets": buckets,
} }
}else{ } else {
dateHistogramAggName = "date_histogram" dateHistogramAggName = "date_histogram"
verInfo := elastic.GetClient(metric.ClusterId).GetVersion() verInfo := elastic.GetClient(metric.ClusterId).GetVersion()
@ -144,7 +144,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
return nil, fmt.Errorf("get interval field error: %w", err) return nil, fmt.Errorf("get interval field error: %w", err)
} }
dateHistogramAgg = util.MapStr{ dateHistogramAgg = util.MapStr{
"field": metric.TimeField, "field": metric.TimeField,
intervalField: metric.BucketSize, intervalField: metric.BucketSize,
} }
} }
@ -154,7 +154,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
basicAggs = util.MapStr{ basicAggs = util.MapStr{
"time_buckets": util.MapStr{ "time_buckets": util.MapStr{
dateHistogramAggName: dateHistogramAgg, dateHistogramAggName: dateHistogramAgg,
"aggs": basicAggs, "aggs": basicAggs,
}, },
} }
} }
@ -179,7 +179,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
"field": groups[i].Field, "field": groups[i].Field,
"size": limit, "size": limit,
} }
if i == grpLength - 1 && len(metric.Sort) > 0 { if i == grpLength-1 && len(metric.Sort) > 0 {
//use bucket sort instead of terms order when time after group //use bucket sort instead of terms order when time after group
if !timeBeforeGroup && len(metric.Sort) > 0 { if !timeBeforeGroup && len(metric.Sort) > 0 {
basicAggs["sort_field"] = util.MapStr{ basicAggs["sort_field"] = util.MapStr{
@ -197,7 +197,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
}, },
}, },
} }
}else{ } else {
var termsOrder []interface{} var termsOrder []interface{}
percentAggs := []string{"p99", "p95", "p90", "p80", "p50"} percentAggs := []string{"p99", "p95", "p90", "p80", "p50"}
for _, sortItem := range metric.Sort { for _, sortItem := range metric.Sort {
@ -250,7 +250,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
basicAggs = util.MapStr{ basicAggs = util.MapStr{
"time_buckets": util.MapStr{ "time_buckets": util.MapStr{
dateHistogramAggName: dateHistogramAgg, dateHistogramAggName: dateHistogramAgg,
"aggs": basicAggs, "aggs": basicAggs,
}, },
} }
} }
@ -288,7 +288,7 @@ func CollectMetricData(agg interface{}, timeBeforeGroup bool) ([]insight.MetricD
} }
// timeBeforeGroup => false // timeBeforeGroup => false
func collectMetricData(agg interface{}, groupValues []string, metricData *[]insight.MetricData) (interval string){ func collectMetricData(agg interface{}, groupValues []string, metricData *[]insight.MetricData) (interval string) {
if aggM, ok := agg.(map[string]interface{}); ok { if aggM, ok := agg.(map[string]interface{}); ok {
if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok { if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok {
interval, _ = timeBks["interval"].(string) interval, _ = timeBks["interval"].(string)
@ -351,7 +351,7 @@ func collectMetricData(agg interface{}, groupValues []string, metricData *[]insi
} }
// timeBeforeGroup => true // timeBeforeGroup => true
func collectMetricDataOther(agg interface{}, groupValues []string, metricData *[]insight.MetricData, timeKey interface{}) (interval string){ func collectMetricDataOther(agg interface{}, groupValues []string, metricData *[]insight.MetricData, timeKey interface{}) (interval string) {
if aggM, ok := agg.(map[string]interface{}); ok { if aggM, ok := agg.(map[string]interface{}); ok {
if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok { if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok {
interval, _ = timeBks["interval"].(string) interval, _ = timeBks["interval"].(string)

View File

@ -34,8 +34,8 @@ import (
"strings" "strings"
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/console/model/insight" "infini.sh/console/model/insight"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
) )

View File

@ -213,7 +213,7 @@ func GetCollectionMetas() map[string]CollectionMeta {
MatchObject: &alerting.Rule{}, MatchObject: &alerting.Rule{},
}, },
"metric": { "metric": {
Name: "metric", Name: "metric",
MatchObject: &insight.MetricBase{}, MatchObject: &insight.MetricBase{},
}, },
} }

View File

@ -29,13 +29,13 @@ package server
import ( import (
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/modules/configs/config"
httprouter "infini.sh/framework/core/api/router" httprouter "infini.sh/framework/core/api/router"
config3 "infini.sh/framework/core/config" config3 "infini.sh/framework/core/config"
"infini.sh/framework/core/global" "infini.sh/framework/core/global"
"infini.sh/framework/core/model" "infini.sh/framework/core/model"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/modules/configs/config"
"net/http" "net/http"
"path" "path"
"sync" "sync"

View File

@ -37,13 +37,13 @@ import (
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
"infini.sh/console/core/security/enum" "infini.sh/console/core/security/enum"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/api" "infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router" httprouter "infini.sh/framework/core/api/router"
elastic2 "infini.sh/framework/core/elastic" elastic2 "infini.sh/framework/core/elastic"
"infini.sh/framework/core/model" "infini.sh/framework/core/model"
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/modules/elastic" "infini.sh/framework/modules/elastic"
common2 "infini.sh/framework/modules/elastic/common" common2 "infini.sh/framework/modules/elastic/common"
) )

View File

@ -32,11 +32,11 @@ import (
"fmt" "fmt"
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
"infini.sh/console/core" "infini.sh/console/core"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/api" "infini.sh/framework/core/api"
"infini.sh/framework/core/errors" "infini.sh/framework/core/errors"
"infini.sh/framework/core/global" "infini.sh/framework/core/global"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
"net" "net"
"net/http" "net/http"
"net/url" "net/url"

View File

@ -35,30 +35,30 @@ import (
) )
type EmailAction struct { type EmailAction struct {
Data *alerting.Email Data *alerting.Email
Subject string Subject string
Body string Body string
} }
const EmailQueueName = "email_messages" const EmailQueueName = "email_messages"
func (act *EmailAction) Execute()([]byte, error){ func (act *EmailAction) Execute() ([]byte, error) {
queueCfg := queue.GetOrInitConfig(EmailQueueName) queueCfg := queue.GetOrInitConfig(EmailQueueName)
if act.Data.ServerID == "" { if act.Data.ServerID == "" {
return nil, fmt.Errorf("parameter server_id must not be empty") return nil, fmt.Errorf("parameter server_id must not be empty")
} }
emailMsg := util.MapStr{ emailMsg := util.MapStr{
"server_id": act.Data.ServerID, "server_id": act.Data.ServerID,
"email": act.Data.Recipients.To, "email": act.Data.Recipients.To,
"template": "raw", "template": "raw",
"variables": util.MapStr{ "variables": util.MapStr{
"subject": act.Subject, "subject": act.Subject,
"body": act.Body, "body": act.Body,
"content_type": act.Data.ContentType, "content_type": act.Data.ContentType,
"cc": act.Data.Recipients.CC, "cc": act.Data.Recipients.CC,
}, },
} }
emailMsgBytes := util.MustToJSONBytes(emailMsg) emailMsgBytes := util.MustToJSONBytes(emailMsg)
err := queue.Push(queueCfg, emailMsgBytes) err := queue.Push(queueCfg, emailMsgBytes)
return nil, err return nil, err
} }

View File

@ -40,7 +40,7 @@ type Action interface {
} }
type WebhookAction struct { type WebhookAction struct {
Data *alerting.CustomWebhook Data *alerting.CustomWebhook
Message string Message string
} }
@ -50,7 +50,7 @@ var actionClient = http.Client{
}, },
} }
func (act *WebhookAction) Execute()([]byte, error){ func (act *WebhookAction) Execute() ([]byte, error) {
var reqURL = act.Data.URL var reqURL = act.Data.URL
reqBody := strings.NewReader(act.Message) reqBody := strings.NewReader(act.Message)
req, err := http.NewRequest(http.MethodPost, reqURL, reqBody) req, err := http.NewRequest(http.MethodPost, reqURL, reqBody)
@ -67,4 +67,3 @@ func (act *WebhookAction) Execute()([]byte, error){
defer res.Body.Close() defer res.Body.Close()
return ioutil.ReadAll(res.Body) return ioutil.ReadAll(res.Body)
} }

View File

@ -42,9 +42,9 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by
return nil, fmt.Errorf("empty channel"), nil return nil, fmt.Errorf("empty channel"), nil
} }
var ( var (
act action.Action act action.Action
message []byte message []byte
err error err error
) )
switch channel.Type { switch channel.Type {
@ -75,7 +75,7 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by
act = &action.EmailAction{ act = &action.EmailAction{
Data: channel.Email, Data: channel.Email,
Subject: string(subjectBytes), Subject: string(subjectBytes),
Body: string(message), Body: string(message),
} }
default: default:
return nil, fmt.Errorf("unsupported action type: %s", channel.Type), message return nil, fmt.Errorf("unsupported action type: %s", channel.Type), message
@ -84,10 +84,10 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by
return executeResult, err, message return executeResult, err, message
} }
func ResolveMessage(messageTemplate string, ctx map[string]interface{}) ([]byte, error){ func ResolveMessage(messageTemplate string, ctx map[string]interface{}) ([]byte, error) {
msg := messageTemplate msg := messageTemplate
tmpl, err := template.New("alert-message").Funcs(funcs.GenericFuncMap()).Parse(msg) tmpl, err := template.New("alert-message").Funcs(funcs.GenericFuncMap()).Parse(msg)
if err !=nil { if err != nil {
return nil, fmt.Errorf("parse message temlate error: %w", err) return nil, fmt.Errorf("parse message temlate error: %w", err)
} }
msgBuffer := &bytes.Buffer{} msgBuffer := &bytes.Buffer{}
@ -120,17 +120,17 @@ func RetrieveChannel(ch *alerting.Channel, raiseChannelEnabledErr bool) (*alerti
case alerting.ChannelEmail: case alerting.ChannelEmail:
if ch.Email == nil { if ch.Email == nil {
ch.Email = refCh.Email ch.Email = refCh.Email
}else{ } else {
ch.Email.ServerID = refCh.Email.ServerID ch.Email.ServerID = refCh.Email.ServerID
ch.Email.Recipients = refCh.Email.Recipients ch.Email.Recipients = refCh.Email.Recipients
} }
case alerting.ChannelWebhook: case alerting.ChannelWebhook:
if ch.Webhook == nil { if ch.Webhook == nil {
ch.Webhook = refCh.Webhook ch.Webhook = refCh.Webhook
}else { } else {
ch.Webhook.URL = refCh.Webhook.URL ch.Webhook.URL = refCh.Webhook.URL
} }
} }
} }
return ch, nil return ch, nil
} }

View File

@ -29,12 +29,11 @@ package alerting
const ( const (
KVLastNotificationTime = "alert_last_notification_time" KVLastNotificationTime = "alert_last_notification_time"
KVLastTermStartTime = "alert_last_term_start_time" KVLastTermStartTime = "alert_last_term_start_time"
KVLastEscalationTime = "alert_last_escalation_time" KVLastEscalationTime = "alert_last_escalation_time"
KVLastMessageState = "alert_last_message_state" KVLastMessageState = "alert_last_message_state"
) )
const ( const (
ParamRuleID = "rule_id" //规则 UUID ParamRuleID = "rule_id" //规则 UUID
ParamResourceID = "resource_id" // 资源 UUID ParamResourceID = "resource_id" // 资源 UUID
@ -50,6 +49,7 @@ const (
ParamGroupValues = "group_values" ParamGroupValues = "group_values"
ParamIssueTimestamp = "issue_timestamp" ParamIssueTimestamp = "issue_timestamp"
ParamRelationValues = "relation_values" ParamRelationValues = "relation_values"
//rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
//check_status ,timestamp, // rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
) // check_status ,timestamp,
)

View File

@ -34,10 +34,10 @@ import (
log "github.com/cihub/seelog" log "github.com/cihub/seelog"
"infini.sh/console/model" "infini.sh/console/model"
"infini.sh/console/model/alerting" "infini.sh/console/model/alerting"
"infini.sh/console/model/insight"
alerting2 "infini.sh/console/service/alerting" alerting2 "infini.sh/console/service/alerting"
"infini.sh/console/service/alerting/common" "infini.sh/console/service/alerting/common"
"infini.sh/framework/core/elastic" "infini.sh/framework/core/elastic"
"infini.sh/console/model/insight"
"infini.sh/framework/core/kv" "infini.sh/framework/core/kv"
"infini.sh/framework/core/orm" "infini.sh/framework/core/orm"
"infini.sh/framework/core/util" "infini.sh/framework/core/util"
@ -50,14 +50,14 @@ import (
) )
type Engine struct { type Engine struct {
} }
//GenerateQuery generate a final elasticsearch query dsl object
//when RawFilter of rule is not empty, priority use it, otherwise to covert from Filter of rule (todo) // GenerateQuery generate a final elasticsearch query dsl object
//auto generate time filter query and then attach to final query // when RawFilter of rule is not empty, priority use it, otherwise to covert from Filter of rule (todo)
//auto generate elasticsearch aggregations by metrics of rule // auto generate time filter query and then attach to final query
//group of metric item converted to terms aggregation and TimeField of rule converted to date_histogram aggregation // auto generate elasticsearch aggregations by metrics of rule
//convert statistic of metric item to elasticsearch aggregation // group of metric item converted to terms aggregation and TimeField of rule converted to date_histogram aggregation
// convert statistic of metric item to elasticsearch aggregation
func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error) { func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error) {
filter, err := engine.GenerateRawFilter(rule, filterParam) filter, err := engine.GenerateRawFilter(rule, filterParam)
if err != nil { if err != nil {
@ -72,7 +72,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
} }
basicAggs := util.MapStr{} basicAggs := util.MapStr{}
//todo bucket sort (es 6.1) bucket script (es 2.0) //todo bucket sort (es 6.1) bucket script (es 2.0)
for _, metricItem := range rule.Metrics.Items { for _, metricItem := range rule.Metrics.Items {
metricAggs := engine.generateAgg(&metricItem) metricAggs := engine.generateAgg(&metricItem)
if err = util.MergeFields(basicAggs, metricAggs, true); err != nil { if err = util.MergeFields(basicAggs, metricAggs, true); err != nil {
return nil, err return nil, err
@ -81,21 +81,21 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
verInfo := elastic.GetClient(rule.Resource.ID).GetVersion() verInfo := elastic.GetClient(rule.Resource.ID).GetVersion()
var periodInterval = rule.Metrics.BucketSize var periodInterval = rule.Metrics.BucketSize
if filterParam != nil && filterParam.BucketSize != "" { if filterParam != nil && filterParam.BucketSize != "" {
periodInterval = filterParam.BucketSize periodInterval = filterParam.BucketSize
} }
if verInfo.Number==""{ if verInfo.Number == "" {
panic("invalid version") panic("invalid version")
} }
intervalField, err := elastic.GetDateHistogramIntervalField(verInfo.Distribution,verInfo.Number, periodInterval ) intervalField, err := elastic.GetDateHistogramIntervalField(verInfo.Distribution, verInfo.Number, periodInterval)
if err != nil { if err != nil {
return nil, fmt.Errorf("get interval field error: %w", err) return nil, fmt.Errorf("get interval field error: %w", err)
} }
timeAggs := util.MapStr{ timeAggs := util.MapStr{
"time_buckets": util.MapStr{ "time_buckets": util.MapStr{
"date_histogram": util.MapStr{ "date_histogram": util.MapStr{
"field": rule.Resource.TimeField, "field": rule.Resource.TimeField,
intervalField: periodInterval, intervalField: periodInterval,
}, },
"aggs": basicAggs, "aggs": basicAggs,
@ -107,7 +107,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
if grpLength := len(groups); grpLength > 0 { if grpLength := len(groups); grpLength > 0 {
var lastGroupAgg util.MapStr var lastGroupAgg util.MapStr
for i := grpLength-1; i>=0; i-- { for i := grpLength - 1; i >= 0; i-- {
limit := groups[i].Limit limit := groups[i].Limit
//top group 10 //top group 10
if limit <= 0 { if limit <= 0 {
@ -116,7 +116,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
groupAgg := util.MapStr{ groupAgg := util.MapStr{
"terms": util.MapStr{ "terms": util.MapStr{
"field": groups[i].Field, "field": groups[i].Field,
"size": limit, "size": limit,
}, },
} }
groupID := util.GetUUID() groupID := util.GetUUID()
@ -124,7 +124,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
groupAgg["aggs"] = util.MapStr{ groupAgg["aggs"] = util.MapStr{
groupID: lastGroupAgg, groupID: lastGroupAgg,
} }
}else{ } else {
groupAgg["aggs"] = timeAggs groupAgg["aggs"] = timeAggs
} }
lastGroupAgg = groupAgg lastGroupAgg = groupAgg
@ -132,29 +132,30 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
rootAggs = util.MapStr{ rootAggs = util.MapStr{
util.GetUUID(): lastGroupAgg, util.GetUUID(): lastGroupAgg,
} }
}else{ } else {
rootAggs = timeAggs rootAggs = timeAggs
} }
if len(filter) > 0 { if len(filter) > 0 {
rootAggs = util.MapStr{ rootAggs = util.MapStr{
"filter_agg": util.MapStr{ "filter_agg": util.MapStr{
"filter": filter, "filter": filter,
"aggs": rootAggs, "aggs": rootAggs,
}, },
} }
} }
return util.MapStr{ return util.MapStr{
"size": 0, "size": 0,
"query": timeFilter, "query": timeFilter,
"aggs": rootAggs, "aggs": rootAggs,
}, nil }, nil
} }
//generateAgg convert statistic of metric item to elasticsearch aggregation
func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]interface{}{ // generateAgg convert statistic of metric item to elasticsearch aggregation
func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]interface{} {
var ( var (
aggType = "value_count" aggType = "value_count"
field = metricItem.Field field = metricItem.Field
) )
if field == "" || field == "*" { if field == "" || field == "*" {
field = "_id" field = "_id"
@ -171,7 +172,7 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int
isPipeline = true isPipeline = true
case "medium": // from es version 6.6 case "medium": // from es version 6.6
aggType = "median_absolute_deviation" aggType = "median_absolute_deviation"
case "p99", "p95","p90","p80","p50": case "p99", "p95", "p90", "p80", "p50":
aggType = "percentiles" aggType = "percentiles"
percentStr := strings.TrimPrefix(metricItem.Statistic, "p") percentStr := strings.TrimPrefix(metricItem.Statistic, "p")
percent, _ = strconv.ParseFloat(percentStr, 32) percent, _ = strconv.ParseFloat(percentStr, 32)
@ -187,7 +188,7 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int
aggType: aggValue, aggType: aggValue,
}, },
} }
if !isPipeline{ if !isPipeline {
return aggs return aggs
} }
pipelineAggID := util.GetUUID() pipelineAggID := util.GetUUID()
@ -200,8 +201,8 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int
return aggs return aggs
} }
func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[string]interface{}, error){ func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[string]interface{}, error) {
if !fq.IsComplex(){ if !fq.IsComplex() {
q := map[string]interface{}{} q := map[string]interface{}{}
if len(fq.Values) == 0 { if len(fq.Values) == 0 {
return nil, fmt.Errorf("values should not be empty") return nil, fmt.Errorf("values should not be empty")
@ -267,14 +268,14 @@ func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[str
filterQueries []alerting.FilterQuery filterQueries []alerting.FilterQuery
) )
if len(fq.Not) >0 { if len(fq.Not) > 0 {
boolOperator = "must_not" boolOperator = "must_not"
filterQueries = fq.Not filterQueries = fq.Not
}else if len(fq.Or) > 0 { } else if len(fq.Or) > 0 {
boolOperator = "should" boolOperator = "should"
filterQueries = fq.Or filterQueries = fq.Or
}else { } else {
boolOperator = "must" boolOperator = "must"
filterQueries = fq.And filterQueries = fq.And
} }
@ -299,15 +300,15 @@ func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[str
return resultQuery, nil return resultQuery, nil
} }
func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (start, end interface{}){ func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (start, end interface{}) {
var ( var (
timeStart interface{} timeStart interface{}
timeEnd interface{} timeEnd interface{}
) )
if filterParam != nil { if filterParam != nil {
timeStart = filterParam.Start timeStart = filterParam.Start
timeEnd = filterParam.End timeEnd = filterParam.End
}else{ } else {
var ( var (
units string units string
value int value int
@ -316,23 +317,23 @@ func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (
if err != nil { if err != nil {
return nil, fmt.Errorf("parse bucket size of rule [%s] error: %v", rule.Name, err) return nil, fmt.Errorf("parse bucket size of rule [%s] error: %v", rule.Name, err)
} }
if intervalDuration / time.Hour >= 1 { if intervalDuration/time.Hour >= 1 {
units = "h" units = "h"
value = int(intervalDuration / time.Hour) value = int(intervalDuration / time.Hour)
}else if intervalDuration / time.Minute >= 1{ } else if intervalDuration/time.Minute >= 1 {
units = "m" units = "m"
value = int(intervalDuration / time.Minute) value = int(intervalDuration / time.Minute)
}else if intervalDuration / time.Second >= 1 { } else if intervalDuration/time.Second >= 1 {
units = "s" units = "s"
value = int(intervalDuration / time.Second) value = int(intervalDuration / time.Second)
}else{ } else {
return nil, fmt.Errorf("period interval: %s is too small", rule.Metrics.BucketSize) return nil, fmt.Errorf("period interval: %s is too small", rule.Metrics.BucketSize)
} }
bucketCount := rule.Conditions.GetMinimumPeriodMatch() + 1 bucketCount := rule.Conditions.GetMinimumPeriodMatch() + 1
if bucketCount <= 0 { if bucketCount <= 0 {
bucketCount = 1 bucketCount = 1
} }
duration, err := time.ParseDuration(fmt.Sprintf("%d%s", value * bucketCount, units)) duration, err := time.ParseDuration(fmt.Sprintf("%d%s", value*bucketCount, units))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -342,7 +343,7 @@ func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (
return timeStart, timeEnd return timeStart, timeEnd
} }
func (engine *Engine) generateTimeFilter(rule *alerting.Rule, filterParam *alerting.FilterParam) (map[string]interface{}, error){ func (engine *Engine) generateTimeFilter(rule *alerting.Rule, filterParam *alerting.FilterParam) (map[string]interface{}, error) {
timeStart, timeEnd := getQueryTimeRange(rule, filterParam) timeStart, timeEnd := getQueryTimeRange(rule, filterParam)
timeQuery := util.MapStr{ timeQuery := util.MapStr{
"range": util.MapStr{ "range": util.MapStr{
@ -360,8 +361,8 @@ func (engine *Engine) GenerateRawFilter(rule *alerting.Rule, filterParam *alerti
var err error var err error
if rule.Resource.RawFilter != nil { if rule.Resource.RawFilter != nil {
query = util.DeepCopy(rule.Resource.RawFilter).(map[string]interface{}) query = util.DeepCopy(rule.Resource.RawFilter).(map[string]interface{})
}else{ } else {
if !rule.Resource.Filter.IsEmpty(){ if !rule.Resource.Filter.IsEmpty() {
query, err = engine.ConvertFilterQueryToDsl(&rule.Resource.Filter) query, err = engine.ConvertFilterQueryToDsl(&rule.Resource.Filter)
if err != nil { if err != nil {
return nil, err return nil, err
@ -405,7 +406,7 @@ func (engine *Engine) GenerateRawFilter(rule *alerting.Rule, filterParam *alerti
return query, nil return query, nil
} }
func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam)(*alerting.QueryResult, error){ func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.QueryResult, error) {
esClient := elastic.GetClient(rule.Resource.ID) esClient := elastic.GetClient(rule.Resource.ID)
queryResult := &alerting.QueryResult{} queryResult := &alerting.QueryResult{}
indexName := strings.Join(rule.Resource.Objects, ",") indexName := strings.Join(rule.Resource.Objects, ",")
@ -470,7 +471,7 @@ func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.Fi
queryResult.MetricData = metricData queryResult.MetricData = metricData
return queryResult, nil return queryResult, nil
} }
func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam)([]alerting.MetricData, *alerting.QueryResult, error){ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam) ([]alerting.MetricData, *alerting.QueryResult, error) {
queryResult, err := engine.ExecuteQuery(rule, filterParam) queryResult, err := engine.ExecuteQuery(rule, filterParam)
if err != nil { if err != nil {
return nil, queryResult, err return nil, queryResult, err
@ -525,7 +526,7 @@ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool,
return nil, queryResult, err return nil, queryResult, err
} }
if r, ok := result.(float64); ok { if r, ok := result.(float64); ok {
if math.IsNaN(r) || math.IsInf(r, 0 ){ if math.IsNaN(r) || math.IsInf(r, 0) {
if !isFilterNaN { if !isFilterNaN {
targetData.Data["result"] = append(targetData.Data["result"], []interface{}{timestamp, math.NaN()}) targetData.Data["result"] = append(targetData.Data["result"], []interface{}{timestamp, math.NaN()})
} }
@ -540,10 +541,11 @@ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool,
} }
return targetMetricData, queryResult, nil return targetMetricData, queryResult, nil
} }
//CheckCondition check whether rule conditions triggered or not
//if triggered returns an ConditionResult // CheckCondition check whether rule conditions triggered or not
//sort conditions by priority desc before check , and then if condition is true, then continue check another group // if triggered returns an ConditionResult
func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionResult, error){ // sort conditions by priority desc before check , and then if condition is true, then continue check another group
func (engine *Engine) CheckCondition(rule *alerting.Rule) (*alerting.ConditionResult, error) {
var resultItems []alerting.ConditionResultItem var resultItems []alerting.ConditionResultItem
targetMetricData, queryResult, err := engine.GetTargetMetricData(rule, true, nil) targetMetricData, queryResult, err := engine.GetTargetMetricData(rule, true, nil)
conditionResult := &alerting.ConditionResult{ conditionResult := &alerting.ConditionResult{
@ -558,7 +560,7 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes
return alerting.PriorityWeights[rule.Conditions.Items[i].Priority] > alerting.PriorityWeights[rule.Conditions.Items[j].Priority] return alerting.PriorityWeights[rule.Conditions.Items[i].Priority] > alerting.PriorityWeights[rule.Conditions.Items[j].Priority]
}) })
} }
LoopCondition: LoopCondition:
for _, cond := range rule.Conditions.Items { for _, cond := range rule.Conditions.Items {
conditionExpression, err := cond.GenerateConditionExpression() conditionExpression, err := cond.GenerateConditionExpression()
if err != nil { if err != nil {
@ -580,8 +582,8 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes
if targetData.Data[dataKey][i][1] == nil { if targetData.Data[dataKey][i][1] == nil {
continue continue
} }
if r, ok := targetData.Data[dataKey][i][1].(float64); ok { if r, ok := targetData.Data[dataKey][i][1].(float64); ok {
if math.IsNaN(r){ if math.IsNaN(r) {
continue continue
} }
} }
@ -593,19 +595,19 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes
} }
if evaluateResult == true { if evaluateResult == true {
triggerCount += 1 triggerCount += 1
}else { } else {
triggerCount = 0 triggerCount = 0
} }
if triggerCount >= cond.MinimumPeriodMatch { if triggerCount >= cond.MinimumPeriodMatch {
log.Debugf("triggered condition %v, groups: %v\n", cond, targetData.GroupValues) log.Debugf("triggered condition %v, groups: %v\n", cond, targetData.GroupValues)
resultItem := alerting.ConditionResultItem{ resultItem := alerting.ConditionResultItem{
GroupValues: targetData.GroupValues, GroupValues: targetData.GroupValues,
ConditionItem: &cond, ConditionItem: &cond,
ResultValue: targetData.Data[dataKey][i][1], ResultValue: targetData.Data[dataKey][i][1],
IssueTimestamp: targetData.Data[dataKey][i][0], IssueTimestamp: targetData.Data[dataKey][i][0],
RelationValues: map[string]interface{}{}, RelationValues: map[string]interface{}{},
} }
for _, metric := range rule.Metrics.Items{ for _, metric := range rule.Metrics.Items {
resultItem.RelationValues[metric.Name] = queryResult.MetricData[idx].Data[metric.Name][i][1] resultItem.RelationValues[metric.Name] = queryResult.MetricData[idx].Data[metric.Name][i][1]
} }
resultItems = append(resultItems, resultItem) resultItems = append(resultItems, resultItem)
@ -623,30 +625,30 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
var ( var (
alertItem *alerting.Alert alertItem *alerting.Alert
err error err error
) )
defer func() { defer func() {
if err != nil && alertItem == nil { if err != nil && alertItem == nil {
alertItem = &alerting.Alert{ alertItem = &alerting.Alert{
ID: util.GetUUID(), ID: util.GetUUID(),
Created: time.Now(), Created: time.Now(),
Updated: time.Now(), Updated: time.Now(),
RuleID: rule.ID, RuleID: rule.ID,
RuleName: rule.Name, RuleName: rule.Name,
ResourceID: rule.Resource.ID, ResourceID: rule.Resource.ID,
ResourceName: rule.Resource.Name, ResourceName: rule.Resource.Name,
Expression: rule.Metrics.Expression, Expression: rule.Metrics.Expression,
Objects: rule.Resource.Objects, Objects: rule.Resource.Objects,
State: alerting.AlertStateError, State: alerting.AlertStateError,
//Priority: "undefine", //Priority: "undefine",
Error: err.Error(), Error: err.Error(),
} }
} }
if alertItem != nil { if alertItem != nil {
if err != nil{ if err != nil {
alertItem.State = alerting.AlertStateError alertItem.State = alerting.AlertStateError
alertItem.Error = err.Error() alertItem.Error = err.Error()
}else { } else {
for _, actionResult := range alertItem.ActionExecutionResults { for _, actionResult := range alertItem.ActionExecutionResults {
if actionResult.Error != "" { if actionResult.Error != "" {
alertItem.Error = actionResult.Error alertItem.Error = actionResult.Error
@ -670,24 +672,24 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
rule.Conditions.Items[i].Expression = strings.ReplaceAll(expression, "result", metricExpression) rule.Conditions.Items[i].Expression = strings.ReplaceAll(expression, "result", metricExpression)
} }
alertItem = &alerting.Alert{ alertItem = &alerting.Alert{
ID: util.GetUUID(), ID: util.GetUUID(),
Created: time.Now(), Created: time.Now(),
Updated: time.Now(), Updated: time.Now(),
RuleID: rule.ID, RuleID: rule.ID,
RuleName: rule.Name, RuleName: rule.Name,
ResourceID: rule.Resource.ID, ResourceID: rule.Resource.ID,
ResourceName: rule.Resource.Name, ResourceName: rule.Resource.Name,
Expression: rule.Metrics.Expression, Expression: rule.Metrics.Expression,
Objects: rule.Resource.Objects, Objects: rule.Resource.Objects,
Conditions: rule.Conditions, Conditions: rule.Conditions,
State: alerting.AlertStateOK, State: alerting.AlertStateOK,
} }
checkResults, err := engine.CheckCondition(rule) checkResults, err := engine.CheckCondition(rule)
alertItem.ConditionResult = checkResults alertItem.ConditionResult = checkResults
if err != nil { if err != nil {
return err return err
} }
alertMessage, err := getLastAlertMessage(rule.ID, 2 * time.Minute) alertMessage, err := getLastAlertMessage(rule.ID, 2*time.Minute)
if err != nil { if err != nil {
return fmt.Errorf("get alert message error: %w", err) return fmt.Errorf("get alert message error: %w", err)
} }
@ -696,12 +698,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
if len(conditionResults) == 0 { if len(conditionResults) == 0 {
alertItem.Priority = "" alertItem.Priority = ""
if checkResults.QueryResult.Nodata { if checkResults.QueryResult.Nodata {
alertItem.State = alerting.AlertStateNodata alertItem.State = alerting.AlertStateNodata
} }
if alertMessage != nil && alertMessage.Status != alerting.MessageStateRecovered && !checkResults.QueryResult.Nodata { if alertMessage != nil && alertMessage.Status != alerting.MessageStateRecovered && !checkResults.QueryResult.Nodata {
alertMessage.Status = alerting.MessageStateRecovered alertMessage.Status = alerting.MessageStateRecovered
alertMessage.ResourceID = rule.Resource.ID alertMessage.ResourceID = rule.Resource.ID
alertMessage.ResourceName = rule.Resource.Name alertMessage.ResourceName = rule.Resource.Name
err = saveAlertMessage(alertMessage) err = saveAlertMessage(alertMessage)
if err != nil { if err != nil {
@ -710,12 +712,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
// todo add recover notification to inner system message // todo add recover notification to inner system message
// send recover message to channel // send recover message to channel
recoverCfg := rule.RecoveryNotificationConfig recoverCfg := rule.RecoveryNotificationConfig
if recoverCfg != nil && recoverCfg.EventEnabled && recoverCfg.Enabled { if recoverCfg != nil && recoverCfg.EventEnabled && recoverCfg.Enabled {
paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{ paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{
alerting2.ParamEventID: alertMessage.ID, alerting2.ParamEventID: alertMessage.ID,
alerting2.ParamTimestamp: alertItem.Created.Unix(), alerting2.ParamTimestamp: alertItem.Created.Unix(),
"duration": alertItem.Created.Sub(alertMessage.Created).String(), "duration": alertItem.Created.Sub(alertMessage.Created).String(),
"trigger_at": alertMessage.Created.Unix(), "trigger_at": alertMessage.Created.Unix(),
}) })
err = attachTitleMessageToCtx(recoverCfg.Title, recoverCfg.Message, paramsCtx) err = attachTitleMessageToCtx(recoverCfg.Title, recoverCfg.Message, paramsCtx)
if err != nil { if err != nil {
@ -747,9 +749,9 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
triggerAt = alertMessage.Created triggerAt = alertMessage.Created
} }
paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{ paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{
alerting2.ParamTimestamp: alertItem.Created.Unix(), alerting2.ParamTimestamp: alertItem.Created.Unix(),
"duration": alertItem.Created.Sub(triggerAt).String(), "duration": alertItem.Created.Sub(triggerAt).String(),
"trigger_at": triggerAt.Unix(), "trigger_at": triggerAt.Unix(),
}) })
alertItem.Priority = priority alertItem.Priority = priority
@ -772,8 +774,8 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
Priority: priority, Priority: priority,
Title: alertItem.Title, Title: alertItem.Title,
Message: alertItem.Message, Message: alertItem.Message,
Tags: rule.Tags, Tags: rule.Tags,
Category: rule.Category, Category: rule.Category,
} }
alertMessage = msg alertMessage = msg
err = saveAlertMessage(msg) err = saveAlertMessage(msg)
@ -797,18 +799,18 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to create notification, err: %w", err) return fmt.Errorf("failed to create notification, err: %w", err)
} }
}else{ } else {
alertMessage.Title = alertItem.Title alertMessage.Title = alertItem.Title
alertMessage.Message = alertItem.Message alertMessage.Message = alertItem.Message
alertMessage.ResourceID = rule.Resource.ID alertMessage.ResourceID = rule.Resource.ID
alertMessage.ResourceName= rule.Resource.Name alertMessage.ResourceName = rule.Resource.Name
alertMessage.Priority = priority alertMessage.Priority = priority
err = saveAlertMessage(alertMessage) err = saveAlertMessage(alertMessage)
if err != nil { if err != nil {
return fmt.Errorf("save alert message error: %w", err) return fmt.Errorf("save alert message error: %w", err)
} }
} }
log.Debugf("check condition result of rule %s is %v", conditionResults, rule.ID ) log.Debugf("check condition result of rule %s is %v", conditionResults, rule.ID)
// if alert message status equals ignored , then skip sending message to channel // if alert message status equals ignored , then skip sending message to channel
if alertMessage != nil && alertMessage.Status == alerting.MessageStateIgnored { if alertMessage != nil && alertMessage.Status == alerting.MessageStateIgnored {
@ -834,7 +836,7 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
if err != nil { if err != nil {
return fmt.Errorf("get last notification time from kv error: %w", err) return fmt.Errorf("get last notification time from kv error: %w", err)
} }
if !tm.IsZero(){ if !tm.IsZero() {
rule.LastNotificationTime = tm rule.LastNotificationTime = tm
} }
} }
@ -845,8 +847,8 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{ paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{
alerting2.ParamTimestamp: alertItem.Created.Unix(), alerting2.ParamTimestamp: alertItem.Created.Unix(),
"priority": priority, "priority": priority,
"duration": alertItem.Created.Sub(alertMessage.Created).String(), "duration": alertItem.Created.Sub(alertMessage.Created).String(),
"trigger_at": alertMessage.Created.Unix(), "trigger_at": alertMessage.Created.Unix(),
}) })
if alertMessage != nil { if alertMessage != nil {
paramsCtx[alerting2.ParamEventID] = alertMessage.ID paramsCtx[alerting2.ParamEventID] = alertMessage.ID
@ -874,12 +876,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
rule.LastTermStartTime = alertMessage.Created rule.LastTermStartTime = alertMessage.Created
} }
if time.Now().Sub(rule.LastTermStartTime.Local()) > throttlePeriod { if time.Now().Sub(rule.LastTermStartTime.Local()) > throttlePeriod {
if rule.LastEscalationTime.IsZero(){ if rule.LastEscalationTime.IsZero() {
tm, err := readTimeFromKV(alerting2.KVLastEscalationTime, []byte(rule.ID)) tm, err := readTimeFromKV(alerting2.KVLastEscalationTime, []byte(rule.ID))
if err != nil { if err != nil {
return fmt.Errorf("get last escalation time from kv error: %w", err) return fmt.Errorf("get last escalation time from kv error: %w", err)
} }
if !tm.IsZero(){ if !tm.IsZero() {
rule.LastEscalationTime = tm rule.LastEscalationTime = tm
} }
} }
@ -899,10 +901,10 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
return nil return nil
} }
func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interface{}) error{ func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interface{}) error {
var ( var (
tplBytes []byte tplBytes []byte
err error err error
) )
tplBytes, err = common.ResolveMessage(message, paramsCtx) tplBytes, err = common.ResolveMessage(message, paramsCtx)
if err != nil { if err != nil {
@ -917,23 +919,23 @@ func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interfa
return nil return nil
} }
func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult, extraParams map[string]interface{} ) map[string]interface{}{ func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult, extraParams map[string]interface{}) map[string]interface{} {
var ( var (
conditionParams []util.MapStr conditionParams []util.MapStr
firstGroupValue string firstGroupValue string
firstThreshold string firstThreshold string
priority string priority string
) )
if len(checkResults.ResultItems) > 0 { if len(checkResults.ResultItems) > 0 {
priority = checkResults.ResultItems[0].ConditionItem.Priority priority = checkResults.ResultItems[0].ConditionItem.Priority
sort.Slice(checkResults.ResultItems, func(i, j int) bool { sort.Slice(checkResults.ResultItems, func(i, j int) bool {
if alerting.PriorityWeights[checkResults.ResultItems[i].ConditionItem.Priority] > alerting.PriorityWeights[checkResults.ResultItems[j].ConditionItem.Priority] { if alerting.PriorityWeights[checkResults.ResultItems[i].ConditionItem.Priority] > alerting.PriorityWeights[checkResults.ResultItems[j].ConditionItem.Priority] {
return true return true
} }
return false return false
}) })
sort.Slice(checkResults.ResultItems, func(i, j int) bool { sort.Slice(checkResults.ResultItems, func(i, j int) bool {
if vi, ok := checkResults.ResultItems[i].ResultValue.(float64); ok { if vi, ok := checkResults.ResultItems[i].ResultValue.(float64); ok {
if vj, ok := checkResults.ResultItems[j].ResultValue.(float64); ok { if vj, ok := checkResults.ResultItems[j].ResultValue.(float64); ok {
return vi > vj return vi > vj
} }
@ -972,10 +974,10 @@ func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult
max = checkResults.QueryResult.Max max = checkResults.QueryResult.Max
if v, ok := min.(int64); ok { if v, ok := min.(int64); ok {
//expand 60s //expand 60s
min = time.UnixMilli(v).Add(-time.Second*60).UTC().Format("2006-01-02T15:04:05.999Z") min = time.UnixMilli(v).Add(-time.Second * 60).UTC().Format("2006-01-02T15:04:05.999Z")
} }
if v, ok := max.(int64); ok { if v, ok := max.(int64); ok {
max = time.UnixMilli(v).Add(time.Second*60).UTC().Format("2006-01-02T15:04:05.999Z") max = time.UnixMilli(v).Add(time.Second * 60).UTC().Format("2006-01-02T15:04:05.999Z")
} }
} }
paramsCtx := util.MapStr{ paramsCtx := util.MapStr{
@ -983,14 +985,14 @@ func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult
alerting2.ParamResourceID: rule.Resource.ID, alerting2.ParamResourceID: rule.Resource.ID,
alerting2.ParamResourceName: rule.Resource.Name, alerting2.ParamResourceName: rule.Resource.Name,
alerting2.ParamResults: conditionParams, alerting2.ParamResults: conditionParams,
"objects": rule.Resource.Objects, "objects": rule.Resource.Objects,
"first_group_value": firstGroupValue, "first_group_value": firstGroupValue,
"first_threshold": firstThreshold, "first_threshold": firstThreshold,
"rule_name": rule.Name, "rule_name": rule.Name,
"priority": priority, "priority": priority,
"min": min, "min": min,
"max": max, "max": max,
"env": envVariables, "env": envVariables,
} }
err = util.MergeFields(paramsCtx, extraParams, true) err = util.MergeFields(paramsCtx, extraParams, true)
if err != nil { if err != nil {
@ -1004,30 +1006,30 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti
if err != nil { if err != nil {
return nil, fmt.Errorf("check condition error:%w", err) return nil, fmt.Errorf("check condition error:%w", err)
} }
alertMessage, err := getLastAlertMessage(rule.ID, 2 * time.Minute) alertMessage, err := getLastAlertMessage(rule.ID, 2*time.Minute)
if err != nil { if err != nil {
return nil, fmt.Errorf("get alert message error: %w", err) return nil, fmt.Errorf("get alert message error: %w", err)
} }
var actionResults []alerting.ActionExecutionResult var actionResults []alerting.ActionExecutionResult
now := time.Now() now := time.Now()
triggerAt := now triggerAt := now
if alertMessage != nil { if alertMessage != nil {
triggerAt = alertMessage.Created triggerAt = alertMessage.Created
} }
paramsCtx := newParameterCtx(rule, checkResults,util.MapStr{ paramsCtx := newParameterCtx(rule, checkResults, util.MapStr{
alerting2.ParamEventID: util.GetUUID(), alerting2.ParamEventID: util.GetUUID(),
alerting2.ParamTimestamp: now.Unix(), alerting2.ParamTimestamp: now.Unix(),
"duration": now.Sub(triggerAt).String(), "duration": now.Sub(triggerAt).String(),
"trigger_at": triggerAt.Unix(), "trigger_at": triggerAt.Unix(),
} ) })
if msgType == "escalation" || msgType == "notification" { if msgType == "escalation" || msgType == "notification" {
title, message := rule.GetNotificationTitleAndMessage() title, message := rule.GetNotificationTitleAndMessage()
err = attachTitleMessageToCtx(title, message, paramsCtx) err = attachTitleMessageToCtx(title, message, paramsCtx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
}else if msgType == "recover_notification" { } else if msgType == "recover_notification" {
if rule.RecoveryNotificationConfig == nil { if rule.RecoveryNotificationConfig == nil {
return nil, fmt.Errorf("recovery notification must not be empty") return nil, fmt.Errorf("recovery notification must not be empty")
} }
@ -1035,7 +1037,7 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti
if err != nil { if err != nil {
return nil, err return nil, err
} }
}else{ } else {
return nil, fmt.Errorf("unkonwn parameter msg type") return nil, fmt.Errorf("unkonwn parameter msg type")
} }
@ -1060,7 +1062,7 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti
} }
if len(channels) > 0 { if len(channels) > 0 {
actionResults, _ = performChannels(channels, paramsCtx, true) actionResults, _ = performChannels(channels, paramsCtx, true)
}else{ } else {
return nil, fmt.Errorf("no useable channel") return nil, fmt.Errorf("no useable channel")
} }
return actionResults, nil return actionResults, nil
@ -1071,8 +1073,8 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra
var actionResults []alerting.ActionExecutionResult var actionResults []alerting.ActionExecutionResult
for _, channel := range channels { for _, channel := range channels {
var ( var (
errStr string errStr string
resBytes []byte resBytes []byte
messageBytes []byte messageBytes []byte
) )
_, err := common.RetrieveChannel(&channel, raiseChannelEnabledErr) _, err := common.RetrieveChannel(&channel, raiseChannelEnabledErr)
@ -1080,7 +1082,7 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra
log.Error(err) log.Error(err)
errCount++ errCount++
errStr = err.Error() errStr = err.Error()
}else{ } else {
if !channel.Enabled { if !channel.Enabled {
continue continue
} }
@ -1094,17 +1096,15 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra
Result: string(resBytes), Result: string(resBytes),
Error: errStr, Error: errStr,
Message: string(messageBytes), Message: string(messageBytes),
ExecutionTime: int(time.Now().UnixNano()/1e6), ExecutionTime: int(time.Now().UnixNano() / 1e6),
ChannelType: channel.SubType, ChannelType: channel.SubType,
ChannelName: channel.Name, ChannelName: channel.Name,
ChannelID: channel.ID, ChannelID: channel.ID,
}) })
} }
return actionResults, errCount return actionResults, errCount
} }
func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context) { func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context) {
return func(ctx context.Context) { return func(ctx context.Context) {
defer func() { defer func() {
@ -1120,29 +1120,29 @@ func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context)
} }
} }
func CollectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData){ func CollectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData) {
if aggM, ok := agg.(map[string]interface{}); ok { if aggM, ok := agg.(map[string]interface{}); ok {
if targetAgg, ok := aggM["filter_agg"]; ok { if targetAgg, ok := aggM["filter_agg"]; ok {
collectMetricData(targetAgg, groupValues, metricData) collectMetricData(targetAgg, groupValues, metricData)
}else{ } else {
collectMetricData(aggM, groupValues, metricData) collectMetricData(aggM, groupValues, metricData)
} }
} }
} }
func collectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData){ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData) {
if aggM, ok := agg.(map[string]interface{}); ok { if aggM, ok := agg.(map[string]interface{}); ok {
if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok { if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok {
if bks, ok := timeBks["buckets"].([]interface{}); ok { if bks, ok := timeBks["buckets"].([]interface{}); ok {
md := alerting.MetricData{ md := alerting.MetricData{
Data: map[string][]alerting.TimeMetricData{}, Data: map[string][]alerting.TimeMetricData{},
GroupValues: strings.Split(groupValues, "*"), GroupValues: strings.Split(groupValues, "*"),
} }
for _, bk := range bks { for _, bk := range bks {
if bkM, ok := bk.(map[string]interface{}); ok{ if bkM, ok := bk.(map[string]interface{}); ok {
for k, v := range bkM { for k, v := range bkM {
if k == "key" || k == "key_as_string" || k== "doc_count"{ if k == "key" || k == "key_as_string" || k == "doc_count" {
continue continue
} }
if len(k) > 5 { //just store a,b,c if len(k) > 5 { //just store a,b,c
@ -1151,9 +1151,9 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti
if vm, ok := v.(map[string]interface{}); ok { if vm, ok := v.(map[string]interface{}); ok {
if metricVal, ok := vm["value"]; ok { if metricVal, ok := vm["value"]; ok {
md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], metricVal}) md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], metricVal})
}else{ } else {
//percentiles agg type //percentiles agg type
switch vm["values"].(type) { switch vm["values"].(type) {
case []interface{}: case []interface{}:
for _, val := range vm["values"].([]interface{}) { for _, val := range vm["values"].([]interface{}) {
if valM, ok := val.(map[string]interface{}); ok { if valM, ok := val.(map[string]interface{}); ok {
@ -1163,7 +1163,7 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti
} }
case map[string]interface{}: case map[string]interface{}:
for _, val := range vm["values"].(map[string]interface{}) { for _, val := range vm["values"].(map[string]interface{}) {
md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], val}) md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], val})
break break
} }
} }
@ -1176,18 +1176,18 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti
} }
} }
*metricData = append(*metricData,md) *metricData = append(*metricData, md)
} }
}else{ } else {
for k, v := range aggM { for k, v := range aggM {
if k == "key" || k== "doc_count"{ if k == "key" || k == "doc_count" {
continue continue
} }
if vm, ok := v.(map[string]interface{}); ok { if vm, ok := v.(map[string]interface{}); ok {
if bks, ok := vm["buckets"].([]interface{}); ok { if bks, ok := vm["buckets"].([]interface{}); ok {
for _, bk := range bks { for _, bk := range bks {
if bkVal, ok := bk.(map[string]interface{}); ok { if bkVal, ok := bk.(map[string]interface{}); ok {
currentGroup := bkVal["key"].(string) currentGroup := bkVal["key"].(string)
newGroupValues := currentGroup newGroupValues := currentGroup
if groupValues != "" { if groupValues != "" {
@ -1227,20 +1227,20 @@ func getLastAlertMessageFromES(ruleID string) (*alerting.AlertMessage, error) {
q := orm.Query{ q := orm.Query{
RawQuery: util.MustToJSONBytes(queryDsl), RawQuery: util.MustToJSONBytes(queryDsl),
} }
err, searchResult := orm.Search(alerting.AlertMessage{}, &q ) err, searchResult := orm.Search(alerting.AlertMessage{}, &q)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(searchResult.Result) == 0 { if len(searchResult.Result) == 0 {
return nil, nil return nil, nil
} }
messageBytes := util.MustToJSONBytes(searchResult.Result[0]) messageBytes := util.MustToJSONBytes(searchResult.Result[0])
message := &alerting.AlertMessage{} message := &alerting.AlertMessage{}
err = util.FromJSONBytes(messageBytes, message) err = util.FromJSONBytes(messageBytes, message)
return message, err return message, err
} }
func getLastAlertMessage(ruleID string, duration time.Duration) (*alerting.AlertMessage, error ){ func getLastAlertMessage(ruleID string, duration time.Duration) (*alerting.AlertMessage, error) {
messageBytes, err := kv.GetValue(alerting2.KVLastMessageState, []byte(ruleID)) messageBytes, err := kv.GetValue(alerting2.KVLastMessageState, []byte(ruleID))
if err != nil { if err != nil {
return nil, err return nil, err
@ -1280,15 +1280,14 @@ func saveAlertMessage(message *alerting.AlertMessage) error {
return err return err
} }
func readTimeFromKV(bucketKey string, key []byte) (time.Time, error) {
func readTimeFromKV(bucketKey string, key []byte)(time.Time, error){
timeBytes, err := kv.GetValue(bucketKey, key) timeBytes, err := kv.GetValue(bucketKey, key)
zeroTime := time.Time{} zeroTime := time.Time{}
if err != nil { if err != nil {
return zeroTime, err return zeroTime, err
} }
timeStr := string(timeBytes) timeStr := string(timeBytes)
if timeStr != ""{ if timeStr != "" {
return time.ParseInLocation(time.RFC3339, string(timeBytes), time.UTC) return time.ParseInLocation(time.RFC3339, string(timeBytes), time.UTC)
} }
return zeroTime, nil return zeroTime, nil

View File

@ -40,16 +40,16 @@ import (
"time" "time"
) )
func TestEngine( t *testing.T) { func TestEngine(t *testing.T) {
rule := alerting.Rule{ rule := alerting.Rule{
ID: util.GetUUID(), ID: util.GetUUID(),
Created: time.Now(), Created: time.Now(),
Updated: time.Now(), Updated: time.Now(),
Enabled: true, Enabled: true,
Resource: alerting.Resource{ Resource: alerting.Resource{
ID: "c8i18llath2blrusdjng", ID: "c8i18llath2blrusdjng",
Type: "elasticsearch", Type: "elasticsearch",
Objects: []string{".infini_metrics*"}, Objects: []string{".infini_metrics*"},
TimeField: "timestamp", TimeField: "timestamp",
Filter: alerting.FilterQuery{ Filter: alerting.FilterQuery{
And: []alerting.FilterQuery{ And: []alerting.FilterQuery{
@ -123,9 +123,9 @@ func TestEngine( t *testing.T) {
ThrottlePeriod: "1h", ThrottlePeriod: "1h",
AcceptTimeRange: alerting.TimeRange{ AcceptTimeRange: alerting.TimeRange{
Start: "8:00", Start: "8:00",
End: "21:00", End: "21:00",
}, },
EscalationEnabled: true, EscalationEnabled: true,
EscalationThrottlePeriod: "30m", EscalationThrottlePeriod: "30m",
}, },
} }
@ -143,11 +143,11 @@ func TestEngine( t *testing.T) {
//fmt.Println(util.MustToJSON(filter)) //fmt.Println(util.MustToJSON(filter))
} }
func TestGenerateAgg(t *testing.T) { func TestGenerateAgg(t *testing.T) {
eng := &Engine{} eng := &Engine{}
agg := eng.generateAgg(&insight.MetricItem{ agg := eng.generateAgg(&insight.MetricItem{
Name: "a", Name: "a",
Field: "cpu.percent", Field: "cpu.percent",
Statistic: "p99", Statistic: "p99",
}) })
fmt.Println(util.MustToJSON(agg)) fmt.Println(util.MustToJSON(agg))
@ -210,20 +210,20 @@ func TestGeneratePercentilesAggQuery(t *testing.T) {
esClient := elasticsearch.ESAPIV7{} esClient := elasticsearch.ESAPIV7{}
esClient.Elasticsearch = cfg.ID esClient.Elasticsearch = cfg.ID
esClient.Version = elastic.Version{ esClient.Version = elastic.Version{
Number: "7.10.2", Number: "7.10.2",
Major: 7, Major: 7,
Distribution: elastic.Elasticsearch, Distribution: elastic.Elasticsearch,
} }
elastic.UpdateClient(cfg, &esClient) elastic.UpdateClient(cfg, &esClient)
rule := alerting.Rule{ rule := alerting.Rule{
ID: util.GetUUID(), ID: util.GetUUID(),
Created: time.Now(), Created: time.Now(),
Updated: time.Now(), Updated: time.Now(),
Enabled: true, Enabled: true,
Resource: alerting.Resource{ Resource: alerting.Resource{
ID: cfg.ID, ID: cfg.ID,
Type: "elasticsearch", Type: "elasticsearch",
Objects: []string{".infini_metrics*"}, Objects: []string{".infini_metrics*"},
TimeField: "timestamp", TimeField: "timestamp",
RawFilter: map[string]interface{}{ RawFilter: map[string]interface{}{
"bool": map[string]interface{}{ "bool": map[string]interface{}{
@ -271,9 +271,9 @@ func TestGeneratePercentilesAggQuery(t *testing.T) {
ThrottlePeriod: "1h", ThrottlePeriod: "1h",
AcceptTimeRange: alerting.TimeRange{ AcceptTimeRange: alerting.TimeRange{
Start: "08:00", Start: "08:00",
End: "21:00", End: "21:00",
}, },
EscalationEnabled: true, EscalationEnabled: true,
EscalationThrottlePeriod: "30m", EscalationThrottlePeriod: "30m",
}, },
} }
@ -289,21 +289,21 @@ func TestConvertFilterQuery(t *testing.T) {
fq := alerting.FilterQuery{ fq := alerting.FilterQuery{
And: []alerting.FilterQuery{ And: []alerting.FilterQuery{
{ {
Field: "metadata.category", Field: "metadata.category",
Values: []string{"elasticsearch"}, Values: []string{"elasticsearch"},
Operator: "equals", Operator: "equals",
}, },
{ {
Field: "metadata.name", Field: "metadata.name",
Values: []string{"index_stats", "node_stats"}, Values: []string{"index_stats", "node_stats"},
Operator: "in", Operator: "in",
}, },
{ {
Not: []alerting.FilterQuery{ Not: []alerting.FilterQuery{
{ {
Field: "timestamp", Field: "timestamp",
Operator: "gt", Operator: "gt",
Values: []string{"2022-04-16T16:16:39.168605+08:00"}, Values: []string{"2022-04-16T16:16:39.168605+08:00"},
}, },
}, },
}, },
@ -318,4 +318,4 @@ func TestConvertFilterQuery(t *testing.T) {
if dsl := util.MustToJSON(q); dsl != targetDsl { if dsl := util.MustToJSON(q); dsl != targetDsl {
t.Errorf("expect dsl %s but got %s", targetDsl, dsl) t.Errorf("expect dsl %s but got %s", targetDsl, dsl)
} }
} }

View File

@ -29,7 +29,7 @@ package elasticsearch
import "infini.sh/console/service/alerting" import "infini.sh/console/service/alerting"
func init(){ func init() {
eng := Engine{} eng := Engine{}
alerting.RegistEngine("elasticsearch", &eng) alerting.RegistEngine("elasticsearch", &eng)
} }

View File

@ -36,17 +36,18 @@ import (
type Engine interface { type Engine interface {
GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error) GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error)
ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam)(*alerting.QueryResult, error) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.QueryResult, error)
CheckCondition(rule *alerting.Rule)(*alerting.ConditionResult, error) CheckCondition(rule *alerting.Rule) (*alerting.ConditionResult, error)
GenerateTask(rule alerting.Rule) func(ctx context.Context) GenerateTask(rule alerting.Rule) func(ctx context.Context)
Test(rule *alerting.Rule, msgType string) ([]alerting.ActionExecutionResult, error) Test(rule *alerting.Rule, msgType string) ([]alerting.ActionExecutionResult, error)
GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam)([]alerting.MetricData, *alerting.QueryResult, error) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam) ([]alerting.MetricData, *alerting.QueryResult, error)
} }
var ( var (
alertEngines = map[string] Engine{} alertEngines = map[string]Engine{}
alertEnginesMutex = sync.RWMutex{} alertEnginesMutex = sync.RWMutex{}
) )
func RegistEngine(typ string, engine Engine) { func RegistEngine(typ string, engine Engine) {
alertEnginesMutex.Lock() alertEnginesMutex.Lock()
defer alertEnginesMutex.Unlock() defer alertEnginesMutex.Unlock()
@ -61,4 +62,4 @@ func GetEngine(typ string) Engine {
panic(fmt.Sprintf("alert engine of type: %s not found", typ)) panic(fmt.Sprintf("alert engine of type: %s not found", typ))
} }
return eng return eng
} }

View File

@ -37,7 +37,7 @@ import (
log "src/github.com/cihub/seelog" log "src/github.com/cihub/seelog"
) )
func GetEnvVariables() (map[string]interface{}, error){ func GetEnvVariables() (map[string]interface{}, error) {
configFile := global.Env().GetConfigFile() configFile := global.Env().GetConfigFile()
envVariables, err := config.LoadEnvVariables(configFile) envVariables, err := config.LoadEnvVariables(configFile)
if err != nil { if err != nil {
@ -64,7 +64,7 @@ func GetEnvVariables() (map[string]interface{}, error){
return envVariables, nil return envVariables, nil
} }
func GetInnerConsoleEndpoint() (string, error){ func GetInnerConsoleEndpoint() (string, error) {
appConfig := &config2.AppConfig{ appConfig := &config2.AppConfig{
UI: config2.UIConfig{}, UI: config2.UIConfig{},
} }

View File

@ -31,4 +31,4 @@ import "infini.sh/framework/core/util"
func formatBytes(precision int, bytes float64) string { func formatBytes(precision int, bytes float64) string {
return util.FormatBytes(bytes, precision) return util.FormatBytes(bytes, precision)
} }

View File

@ -32,10 +32,10 @@ import (
"time" "time"
) )
func datetimeInZone(zone string, date interface{}) string{ func datetimeInZone(zone string, date interface{}) string {
return _dateInZone("2006-01-02 15:04:05", date, zone) return _dateInZone("2006-01-02 15:04:05", date, zone)
} }
func datetime(date interface{}) string{ func datetime(date interface{}) string {
return _dateInZone("2006-01-02 15:04:05", date, "Local") return _dateInZone("2006-01-02 15:04:05", date, "Local")
} }
@ -58,7 +58,7 @@ func _dateInZone(fmt string, date interface{}, zone string) string {
t = *date t = *date
case int64: case int64:
if date > 1e12 { if date > 1e12 {
date = date/1000 date = date / 1000
} }
t = time.Unix(date, 0) t = time.Unix(date, 0)
case int: case int:

View File

@ -35,7 +35,7 @@ import (
"strings" "strings"
) )
func lookup(directory string, id string) interface{}{ func lookup(directory string, id string) interface{} {
directory = strings.TrimSpace(directory) directory = strings.TrimSpace(directory)
if directory == "" { if directory == "" {
return "empty_directory" return "empty_directory"
@ -46,8 +46,8 @@ func lookup(directory string, id string) interface{}{
kv := strings.Split(part, "=") kv := strings.Split(part, "=")
if len(kv) == 2 { if len(kv) == 2 {
k := strings.TrimSpace(kv[0]) k := strings.TrimSpace(kv[0])
kvs[k]= strings.TrimSpace(kv[1]) kvs[k] = strings.TrimSpace(kv[1])
}else{ } else {
log.Debugf("got unexpected directory part: %s", part) log.Debugf("got unexpected directory part: %s", part)
} }
} }
@ -59,10 +59,10 @@ func lookup(directory string, id string) interface{}{
return kvs["default"] return kvs["default"]
} }
func lookupMetadata(object string, property string, defaultValue string, id string) interface{}{ func lookupMetadata(object string, property string, defaultValue string, id string) interface{} {
var ( var (
cfgM = util.MapStr{} cfgM = util.MapStr{}
buf []byte buf []byte
) )
switch object { switch object {
case "cluster": case "cluster":
@ -127,4 +127,4 @@ func lookupMetadata(object string, property string, defaultValue string, id stri
return v return v
} }
return defaultValue return defaultValue
} }

View File

@ -41,21 +41,21 @@ func GenericFuncMap() template.FuncMap {
} }
var genericMap = map[string]interface{}{ var genericMap = map[string]interface{}{
"hello": func() string { return "Hello!" }, "hello": func() string { return "Hello!" },
"format_bytes": formatBytes, "format_bytes": formatBytes,
"to_fixed": toFixed, "to_fixed": toFixed,
"date": date, "date": date,
"date_in_zone": dateInZone, "date_in_zone": dateInZone,
"datetime": datetime, "datetime": datetime,
"datetime_in_zone": datetimeInZone, "datetime_in_zone": datetimeInZone,
"to_upper": strings.ToUpper, "to_upper": strings.ToUpper,
"to_lower": strings.ToLower, "to_lower": strings.ToLower,
"add": add, "add": add,
"sub": sub, "sub": sub,
"div": div, "div": div,
"mul": mul, "mul": mul,
"lookup": lookup, "lookup": lookup,
"str_replace": replace, "str_replace": replace,
"md_to_html": mdToHTML, "md_to_html": mdToHTML,
//"get_keystore_secret": getKeystoreSecret, //"get_keystore_secret": getKeystoreSecret,
} }

View File

@ -35,10 +35,10 @@ import (
"strconv" "strconv"
) )
func toFixed(precision int, num float64) float64{ func toFixed(precision int, num float64) float64 {
return util.ToFixed(num, precision) return util.ToFixed(num, precision)
} }
func add(a, b interface{}) float64{ func add(a, b interface{}) float64 {
av := ToFloat64(a) av := ToFloat64(a)
bv := ToFloat64(b) bv := ToFloat64(b)
return av + bv return av + bv
@ -125,4 +125,4 @@ func ToFloat64E(i interface{}) (float64, error) {
default: default:
return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
} }
} }

View File

@ -37,7 +37,7 @@ import (
func substring(start, end int, s string) string { func substring(start, end int, s string) string {
runes := []rune(s) runes := []rune(s)
length := len(runes) length := len(runes)
if start < 0 || start > length || end < 0 || end > length{ if start < 0 || start > length || end < 0 || end > length {
return s return s
} }
return string(runes[start:end]) return string(runes[start:end])
@ -59,4 +59,4 @@ func mdToHTML(mdText string) string {
buf := markdown.Render(doc, renderer) buf := markdown.Render(doc, renderer)
return string(buf) return string(buf)
} }

View File

@ -28,11 +28,11 @@
package alerting package alerting
type ParameterMeta struct { type ParameterMeta struct {
Name string `json:"name"` Name string `json:"name"`
Type string `json:"type"` //int, float, string, date, array, object Type string `json:"type"` //int, float, string, date, array, object
Description string `json:"description"` Description string `json:"description"`
Eg string `json:"eg,omitempty"` Eg string `json:"eg,omitempty"`
Properties []ParameterMeta `json:"properties,omitempty"` Properties []ParameterMeta `json:"properties,omitempty"`
} }
func GetTemplateParameters() []ParameterMeta { func GetTemplateParameters() []ParameterMeta {