chore: code format (#72)
* chore: code format * chore: remove fetch-depth * chore: add format and lint * chore: add pr_check * fix: lint with config * chore: this pr only unit test * fix: code format error
This commit is contained in:
parent
fb4dafecb3
commit
8da176bea8
|
@ -0,0 +1,307 @@
|
|||
name: Unit Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.23.4
|
||||
NODEJS_VERSION: 16.20.2
|
||||
PNAME: console
|
||||
|
||||
jobs:
|
||||
format_check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout current repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.PNAME }}
|
||||
|
||||
- name: Checkout framework repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: infinilabs/framework
|
||||
path: framework
|
||||
|
||||
- name: Checkout framework-vendor
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
repository: infinilabs/framework-vendor
|
||||
path: vendor
|
||||
|
||||
- name: Set up nodejs toolchain
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cnpm-
|
||||
|
||||
- name: Check nodejs toolchain
|
||||
run: |
|
||||
if ! command -v cnpm >/dev/null 2>&1; then
|
||||
npm install -g rimraf --quiet --no-progress
|
||||
npm install -g cnpm@9.2.0 --quiet --no-progress
|
||||
fi
|
||||
node -v && npm -v && cnpm -v
|
||||
|
||||
- name: Set up go toolchain
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: false
|
||||
cache: true
|
||||
|
||||
- name: Check go toolchain
|
||||
run: go version
|
||||
|
||||
- name: Cache Build Output
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.public
|
||||
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
|
||||
${{ runner.os }}-build-
|
||||
|
||||
- name: Code format
|
||||
env:
|
||||
GOFLAGS: -tags=ci
|
||||
run: |
|
||||
echo Home path is $HOME
|
||||
export WORKBASE=$HOME/go/src/infini.sh
|
||||
export WORK=$WORKBASE/console
|
||||
|
||||
# for test workspace
|
||||
mkdir -p $HOME/go/src/
|
||||
ln -s $GITHUB_WORKSPACE $WORKBASE
|
||||
|
||||
# for web build
|
||||
cd $WORK/web
|
||||
cnpm install --quiet --no-progress
|
||||
cnpm run build --quiet
|
||||
|
||||
# check work folder
|
||||
ls -lrt $WORKBASE/
|
||||
ls -alrt $WORK
|
||||
|
||||
# for code format
|
||||
cd $WORK
|
||||
echo Formating code at $PWD ...
|
||||
make format
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "make format failed, please check make output"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Check for changes after format
|
||||
id: check-changes
|
||||
shell: bash
|
||||
run: |
|
||||
export WORKBASE=$HOME/go/src/infini.sh
|
||||
export WORK=$WORKBASE/$PNAME
|
||||
|
||||
# for foramt check
|
||||
cd $WORK
|
||||
if [[ $(git status --porcelain | grep -c " M .*\.go$") -gt 0 ]]; then
|
||||
echo "go format detected formatting changes"
|
||||
echo "changes=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "go format no changes found"
|
||||
echo "changes=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Fail workflow if changes after format
|
||||
if: steps.check-changes.outputs.changes == 'true'
|
||||
run: exit 1
|
||||
|
||||
unit_test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout current repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.PNAME }}
|
||||
|
||||
- name: Checkout framework repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: infinilabs/framework
|
||||
path: framework
|
||||
|
||||
- name: Checkout framework-vendor
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
repository: infinilabs/framework-vendor
|
||||
path: vendor
|
||||
|
||||
- name: Set up nodejs toolchain
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cnpm-
|
||||
|
||||
- name: Check nodejs toolchain
|
||||
run: |
|
||||
if ! command -v cnpm >/dev/null 2>&1; then
|
||||
npm install -g rimraf --quiet --no-progress
|
||||
npm install -g cnpm@9.2.0 --quiet --no-progress
|
||||
fi
|
||||
node -v && npm -v && cnpm -v
|
||||
|
||||
- name: Set up go toolchain
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: false
|
||||
cache: true
|
||||
|
||||
- name: Check go toolchain
|
||||
run: go version
|
||||
|
||||
- name: Cache Build Output
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.public
|
||||
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
|
||||
${{ runner.os }}-build-
|
||||
|
||||
- name: Unit test
|
||||
env:
|
||||
GOFLAGS: -tags=ci
|
||||
run: |
|
||||
echo Home path is $HOME
|
||||
export WORKBASE=$HOME/go/src/infini.sh
|
||||
export WORK=$WORKBASE/$PNAME
|
||||
|
||||
# for test workspace
|
||||
mkdir -p $HOME/go/src/
|
||||
ln -s $GITHUB_WORKSPACE $WORKBASE
|
||||
|
||||
# for web build
|
||||
cd $WORK/web
|
||||
cnpm install --quiet --no-progress
|
||||
cnpm run build --quiet
|
||||
|
||||
# check work folder
|
||||
ls -lrt $WORKBASE/
|
||||
ls -alrt $WORK
|
||||
|
||||
# for unit test
|
||||
cd $WORK
|
||||
echo Testing code at $PWD ...
|
||||
make test
|
||||
|
||||
code_lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout current repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: ${{ env.PNAME }}
|
||||
|
||||
- name: Checkout framework repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: infinilabs/framework
|
||||
path: framework
|
||||
|
||||
- name: Checkout framework-vendor
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
repository: infinilabs/framework-vendor
|
||||
path: vendor
|
||||
|
||||
- name: Set up nodejs toolchain
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cnpm-
|
||||
|
||||
- name: Check nodejs toolchain
|
||||
run: |
|
||||
if ! command -v cnpm >/dev/null 2>&1; then
|
||||
npm install -g rimraf --quiet --no-progress
|
||||
npm install -g cnpm@9.2.0 --quiet --no-progress
|
||||
fi
|
||||
node -v && npm -v && cnpm -v
|
||||
|
||||
- name: Set up go toolchain
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: false
|
||||
cache: true
|
||||
|
||||
- name: Check go toolchain
|
||||
run: go version
|
||||
|
||||
- name: Cache Build Output
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.public
|
||||
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
|
||||
${{ runner.os }}-build-
|
||||
|
||||
- name: Code lint
|
||||
env:
|
||||
GOFLAGS: -tags=ci
|
||||
run: |
|
||||
echo Home path is $HOME
|
||||
export WORKBASE=$HOME/go/src/infini.sh
|
||||
export WORK=$WORKBASE/$PNAME
|
||||
|
||||
# for test workspace
|
||||
mkdir -p $HOME/go/src/
|
||||
ln -s $GITHUB_WORKSPACE $WORKBASE
|
||||
|
||||
# for web build
|
||||
cd $WORK/web
|
||||
cnpm install --quiet --no-progress
|
||||
cnpm run build --quiet
|
||||
|
||||
# check work folder
|
||||
ls -lrt $WORKBASE/
|
||||
ls -alrt $WORK
|
||||
|
||||
# for code lint
|
||||
cd $WORK
|
||||
echo Testing code at $PWD ...
|
||||
# make lint
|
|
@ -1,105 +0,0 @@
|
|||
name: Unit Test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO_VERSION: 1.23.4
|
||||
NODEJS_VERSION: 16.20.2
|
||||
steps:
|
||||
- name: Checkout current repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: console
|
||||
|
||||
- name: Checkout framework repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
repository: infinilabs/framework
|
||||
path: framework
|
||||
|
||||
- name: Checkout framework-vendor
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
fetch-depth: 0
|
||||
repository: infinilabs/framework-vendor
|
||||
path: vendor
|
||||
|
||||
- name: Set up nodejs toolchain
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Cache dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
node_modules
|
||||
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cnpm-
|
||||
|
||||
- name: Check nodejs toolchain
|
||||
run: |
|
||||
if ! command -v cnpm >/dev/null 2>&1; then
|
||||
npm install -g rimraf --quiet --no-progress
|
||||
npm install -g cnpm@9.2.0 --quiet --no-progress
|
||||
fi
|
||||
node -v && npm -v && cnpm -v
|
||||
|
||||
- name: Set up go toolchain
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
check-latest: false
|
||||
cache: true
|
||||
|
||||
- name: Check go toolchain
|
||||
run: go version
|
||||
|
||||
- name: Cache Build Output
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.public
|
||||
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
|
||||
${{ runner.os }}-build-
|
||||
|
||||
- name: Unit test
|
||||
env:
|
||||
GOFLAGS: -tags=ci
|
||||
run: |
|
||||
echo Home path is $HOME
|
||||
export WORKBASE=$HOME/go/src/infini.sh
|
||||
export WORK=$WORKBASE/console
|
||||
|
||||
# for test workspace
|
||||
mkdir -p $HOME/go/src/
|
||||
ln -s $GITHUB_WORKSPACE $WORKBASE
|
||||
|
||||
# for web build
|
||||
cd $WORK/web
|
||||
cnpm install --quiet --no-progress
|
||||
cnpm run build --quiet
|
||||
|
||||
# check work folder
|
||||
ls -lrt $WORKBASE/
|
||||
ls -alrt $WORK
|
||||
|
||||
# for unit test
|
||||
cd $WORK
|
||||
echo Testing code at $PWD ...
|
||||
make test
|
|
@ -32,5 +32,7 @@ appveyor.yml
|
|||
log/
|
||||
.env
|
||||
generated_*.go
|
||||
config/generated.go
|
||||
config/generat*.go
|
||||
config/initialization.dsl
|
||||
config/system_config.yml
|
||||
|
|
|
@ -46,7 +46,7 @@ func GetMapStringValue(m util.MapStr, key string) string {
|
|||
|
||||
func MapLabel(labelName, indexName, keyField, valueField string, client elastic.API, cacheLabels map[string]string) string {
|
||||
if len(cacheLabels) > 0 {
|
||||
if v, ok := cacheLabels[labelName]; ok{
|
||||
if v, ok := cacheLabels[labelName]; ok {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func MapLabel(labelName, indexName, keyField, valueField string, client elastic.
|
|||
return labelMaps[labelName]
|
||||
}
|
||||
|
||||
func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, keyFieldValues []string, cacheSize int) (map[string]string, error){
|
||||
func GetLabelMaps(indexName, keyField, valueField string, client elastic.API, keyFieldValues []string, cacheSize int) (map[string]string, error) {
|
||||
if client == nil {
|
||||
return nil, fmt.Errorf("cluster client must not be empty")
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, k
|
|||
var key string
|
||||
if keyField == "_id" {
|
||||
key = hit.ID
|
||||
}else{
|
||||
} else {
|
||||
key = GetMapStringValue(sourceM, keyField)
|
||||
}
|
||||
if key != "" {
|
||||
|
@ -99,7 +99,7 @@ func GetLabelMaps( indexName, keyField, valueField string, client elastic.API, k
|
|||
return labelMaps, nil
|
||||
}
|
||||
|
||||
func ExecuteTemplate( tpl *template.Template, ctx map[string]interface{}) ([]byte, error){
|
||||
func ExecuteTemplate(tpl *template.Template, ctx map[string]interface{}) ([]byte, error) {
|
||||
msgBuffer := &bytes.Buffer{}
|
||||
err := tpl.Execute(msgBuffer, ctx)
|
||||
return msgBuffer.Bytes(), err
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
package config
|
||||
|
||||
const LastCommitLog = "N/A"
|
||||
const BuildDate = "N/A"
|
||||
|
||||
const EOLDate = "N/A"
|
||||
|
||||
const Version = "0.0.1-SNAPSHOT"
|
||||
|
||||
const BuildNumber = "001"
|
|
@ -29,24 +29,24 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ErrTypeRequestParams = "request_params_error"
|
||||
ErrTypeApplication = "application_error"
|
||||
ErrTypeAlreadyExists = "already_exists_error"
|
||||
ErrTypeNotExists = "not_exists_error"
|
||||
ErrTypeIncorrectPassword = "incorrect_password_error"
|
||||
ErrTypeRequestParams = "request_params_error"
|
||||
ErrTypeApplication = "application_error"
|
||||
ErrTypeAlreadyExists = "already_exists_error"
|
||||
ErrTypeNotExists = "not_exists_error"
|
||||
ErrTypeIncorrectPassword = "incorrect_password_error"
|
||||
ErrTypeDomainPrefixMismatch = "domain_prefix_mismatch_error"
|
||||
ErrTypeDisabled = "disabled_error"
|
||||
ErrTypeRequestTimeout = "request_timeout_error"
|
||||
ErrTypeDisabled = "disabled_error"
|
||||
ErrTypeRequestTimeout = "request_timeout_error"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrPasswordIncorrect = errors.New("incorrect password")
|
||||
ErrNotExistsErr = errors.New("not exists")
|
||||
ErrNotExistsErr = errors.New("not exists")
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
typ string
|
||||
msg interface{}
|
||||
typ string
|
||||
msg interface{}
|
||||
field string
|
||||
}
|
||||
|
||||
|
@ -54,22 +54,22 @@ func (err Error) Error() string {
|
|||
return fmt.Sprintf("%s:%v: %v", err.typ, err.field, err.msg)
|
||||
}
|
||||
|
||||
//NewAppError returns an application error
|
||||
// NewAppError returns an application error
|
||||
func NewAppError(msg any) *Error {
|
||||
return New(ErrTypeApplication, "", msg)
|
||||
}
|
||||
|
||||
//NewParamsError returns a request params error
|
||||
// NewParamsError returns a request params error
|
||||
func NewParamsError(field string, msg any) *Error {
|
||||
return New(ErrTypeRequestParams, field, msg)
|
||||
}
|
||||
|
||||
//NewAlreadyExistsError returns an already exists error
|
||||
// NewAlreadyExistsError returns an already exists error
|
||||
func NewAlreadyExistsError(field string, msg any) *Error {
|
||||
return New(ErrTypeAlreadyExists, field, msg)
|
||||
}
|
||||
|
||||
//NewNotExistsError returns a not exists error
|
||||
// NewNotExistsError returns a not exists error
|
||||
func NewNotExistsError(field string, msg any) *Error {
|
||||
return New(ErrTypeNotExists, field, msg)
|
||||
}
|
||||
|
|
|
@ -144,8 +144,8 @@ const (
|
|||
PermissionMigrationTaskWrite = "task:write"
|
||||
PermissionComparisonTaskRead = "comparison_task:read"
|
||||
PermissionComparisonTaskWrite = "comparison_task:write"
|
||||
PermissionSmtpServerRead = "smtp_server:read"
|
||||
PermissionSmtpServerWrite = "smtp_server:write"
|
||||
PermissionSmtpServerRead = "smtp_server:read"
|
||||
PermissionSmtpServerWrite = "smtp_server:write"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -221,8 +221,8 @@ var (
|
|||
DashboardAllPermission = []string{PermissionLayoutRead, PermissionLayoutWrite}
|
||||
WorkbenchReadPermission = []string{PermissionElasticsearchClusterRead, PermissionActivityRead, PermissionAlertMessageRead, PermissionElasticsearchMetricRead}
|
||||
WorkbenchAllPermission = WorkbenchReadPermission
|
||||
SmtpServerReadPermission = []string{PermissionSmtpServerRead}
|
||||
SmtpServerAllPermission = []string{PermissionSmtpServerRead, PermissionSmtpServerWrite}
|
||||
SmtpServerReadPermission = []string{PermissionSmtpServerRead}
|
||||
SmtpServerAllPermission = []string{PermissionSmtpServerRead, PermissionSmtpServerWrite}
|
||||
)
|
||||
|
||||
var AdminPrivilege = []string{
|
||||
|
@ -304,8 +304,8 @@ func init() {
|
|||
|
||||
SubscriptionRead: SubscriptionReadPermission,
|
||||
SubscriptionAll: SubscriptionAllPermission,
|
||||
SmtpServerRead: SmtpServerReadPermission,
|
||||
SmtpServerAll: SmtpServerAllPermission,
|
||||
SmtpServerRead: SmtpServerReadPermission,
|
||||
SmtpServerAll: SmtpServerAllPermission,
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,72 +32,72 @@ import (
|
|||
)
|
||||
|
||||
type Alert struct {
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword }"`
|
||||
RuleName string `json:"rule_name" elastic_mapping:"rule_name: { type: keyword }"`
|
||||
ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword }"`
|
||||
ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword }"`
|
||||
Expression string `json:"expression" elastic_mapping:"expression: { type: keyword, copy_to:search_text }"`
|
||||
Objects []string `json:"objects" elastic_mapping:"objects: { type:keyword,copy_to:search_text }"`
|
||||
Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"`
|
||||
Title string `json:"title" elastic_mapping:"title: { type: keyword }"`
|
||||
Message string `json:"message" elastic_mapping:"context: { type: keyword, copy_to:search_text }"`
|
||||
AcknowledgedTime interface{} `json:"acknowledged_time,omitempty"`
|
||||
ActionExecutionResults []ActionExecutionResult `json:"action_execution_results,omitempty"`
|
||||
RecoverActionResults []ActionExecutionResult `json:"recover_action_results,omitempty"`
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword }"`
|
||||
RuleName string `json:"rule_name" elastic_mapping:"rule_name: { type: keyword }"`
|
||||
ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword }"`
|
||||
ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword }"`
|
||||
Expression string `json:"expression" elastic_mapping:"expression: { type: keyword, copy_to:search_text }"`
|
||||
Objects []string `json:"objects" elastic_mapping:"objects: { type:keyword,copy_to:search_text }"`
|
||||
Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"`
|
||||
Title string `json:"title" elastic_mapping:"title: { type: keyword }"`
|
||||
Message string `json:"message" elastic_mapping:"context: { type: keyword, copy_to:search_text }"`
|
||||
AcknowledgedTime interface{} `json:"acknowledged_time,omitempty"`
|
||||
ActionExecutionResults []ActionExecutionResult `json:"action_execution_results,omitempty"`
|
||||
RecoverActionResults []ActionExecutionResult `json:"recover_action_results,omitempty"`
|
||||
EscalationActionResults []ActionExecutionResult `json:"escalation_action_results,omitempty"`
|
||||
Users []string `json:"users,omitempty"`
|
||||
State string `json:"state"`
|
||||
Error string `json:"error,omitempty"`
|
||||
IsNotified bool `json:"is_notified" elastic_mapping:"is_notified: { type: boolean }"` //标识本次检测是否发送了告警通知
|
||||
IsEscalated bool `json:"is_escalated" elastic_mapping:"is_escalated: { type: boolean }"` //标识本次检测是否发送了升级告警通知
|
||||
Conditions Condition `json:"condition"`
|
||||
ConditionResult *ConditionResult `json:"condition_result,omitempty" elastic_mapping:"condition_result: { type: object,enabled:false }"`
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
Users []string `json:"users,omitempty"`
|
||||
State string `json:"state"`
|
||||
Error string `json:"error,omitempty"`
|
||||
IsNotified bool `json:"is_notified" elastic_mapping:"is_notified: { type: boolean }"` //标识本次检测是否发送了告警通知
|
||||
IsEscalated bool `json:"is_escalated" elastic_mapping:"is_escalated: { type: boolean }"` //标识本次检测是否发送了升级告警通知
|
||||
Conditions Condition `json:"condition"`
|
||||
ConditionResult *ConditionResult `json:"condition_result,omitempty" elastic_mapping:"condition_result: { type: object,enabled:false }"`
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
}
|
||||
|
||||
type ActionExecutionResult struct {
|
||||
ExecutionTime int `json:"execution_time"`
|
||||
Error string `json:"error"`
|
||||
Result string `json:"result"`
|
||||
Message string `json:"message"`
|
||||
ChannelName string `json:"channel_name"`
|
||||
ChannelType string `json:"channel_type"`
|
||||
ChannelID string `json:"channel_id"`
|
||||
Result string `json:"result"`
|
||||
Message string `json:"message"`
|
||||
ChannelName string `json:"channel_name"`
|
||||
ChannelType string `json:"channel_type"`
|
||||
ChannelID string `json:"channel_id"`
|
||||
}
|
||||
|
||||
const (
|
||||
AlertStateAlerting string = "alerting"
|
||||
AlertStateOK = "ok"
|
||||
AlertStateError = "error"
|
||||
AlertStateNodata = "nodata"
|
||||
AlertStateError = "error"
|
||||
AlertStateNodata = "nodata"
|
||||
)
|
||||
|
||||
const (
|
||||
MessageStateAlerting = "alerting"
|
||||
MessageStateIgnored = "ignored"
|
||||
MessageStateAlerting = "alerting"
|
||||
MessageStateIgnored = "ignored"
|
||||
MessageStateRecovered = "recovered"
|
||||
)
|
||||
|
||||
type AlertMessage struct {
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword,copy_to:search_text }"`
|
||||
ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword,copy_to:search_text }"`
|
||||
ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword,copy_to:search_text }"`
|
||||
Title string `json:"title" elastic_mapping:"title: { type: keyword,copy_to:search_text }"`
|
||||
Message string `json:"message" elastic_mapping:"content: { type: keyword,copy_to:search_text }"`
|
||||
Status string `json:"status" elastic_mapping:"status: { type: keyword,copy_to:search_text }"`
|
||||
IgnoredTime time.Time `json:"ignored_time,omitempty" elastic_mapping:"ignored_time: { type: date }"`
|
||||
IgnoredReason string `json:"ignored_reason,omitempty" elastic_mapping:"ignored_reason: { type: keyword,copy_to:search_text }"`
|
||||
IgnoredUser string `json:"ignored_user,omitempty" elastic_mapping:"ignored_user: { type: keyword,copy_to:search_text }"`
|
||||
Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"`
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"`
|
||||
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"`
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
RuleID string `json:"rule_id" elastic_mapping:"rule_id: { type: keyword,copy_to:search_text }"`
|
||||
ResourceID string `json:"resource_id" elastic_mapping:"resource_id: { type: keyword,copy_to:search_text }"`
|
||||
ResourceName string `json:"resource_name" elastic_mapping:"resource_name: { type: keyword,copy_to:search_text }"`
|
||||
Title string `json:"title" elastic_mapping:"title: { type: keyword,copy_to:search_text }"`
|
||||
Message string `json:"message" elastic_mapping:"content: { type: keyword,copy_to:search_text }"`
|
||||
Status string `json:"status" elastic_mapping:"status: { type: keyword,copy_to:search_text }"`
|
||||
IgnoredTime time.Time `json:"ignored_time,omitempty" elastic_mapping:"ignored_time: { type: date }"`
|
||||
IgnoredReason string `json:"ignored_reason,omitempty" elastic_mapping:"ignored_reason: { type: keyword,copy_to:search_text }"`
|
||||
IgnoredUser string `json:"ignored_user,omitempty" elastic_mapping:"ignored_user: { type: keyword,copy_to:search_text }"`
|
||||
Priority string `json:"priority" elastic_mapping:"priority: { type: keyword }"`
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"`
|
||||
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"`
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -30,10 +30,11 @@ package alerting
|
|||
import "fmt"
|
||||
|
||||
type Condition struct {
|
||||
Operator string `json:"operator"`
|
||||
Items []ConditionItem `json:"items"`
|
||||
Operator string `json:"operator"`
|
||||
Items []ConditionItem `json:"items"`
|
||||
}
|
||||
func (cond *Condition) GetMinimumPeriodMatch() int{
|
||||
|
||||
func (cond *Condition) GetMinimumPeriodMatch() int {
|
||||
var minPeriodMatch = 0
|
||||
for _, citem := range cond.Items {
|
||||
if citem.MinimumPeriodMatch > minPeriodMatch {
|
||||
|
@ -45,14 +46,14 @@ func (cond *Condition) GetMinimumPeriodMatch() int{
|
|||
|
||||
type ConditionItem struct {
|
||||
//MetricName string `json:"metric"`
|
||||
MinimumPeriodMatch int `json:"minimum_period_match"`
|
||||
Operator string `json:"operator"`
|
||||
Values []string `json:"values"`
|
||||
Priority string `json:"priority"`
|
||||
Expression string `json:"expression,omitempty"`
|
||||
MinimumPeriodMatch int `json:"minimum_period_match"`
|
||||
Operator string `json:"operator"`
|
||||
Values []string `json:"values"`
|
||||
Priority string `json:"priority"`
|
||||
Expression string `json:"expression,omitempty"`
|
||||
}
|
||||
|
||||
func (cond *ConditionItem) GenerateConditionExpression()(conditionExpression string, err error){
|
||||
func (cond *ConditionItem) GenerateConditionExpression() (conditionExpression string, err error) {
|
||||
valueLength := len(cond.Values)
|
||||
if valueLength == 0 {
|
||||
return conditionExpression, fmt.Errorf("condition values: %v should not be empty", cond.Values)
|
||||
|
@ -81,20 +82,20 @@ func (cond *ConditionItem) GenerateConditionExpression()(conditionExpression str
|
|||
|
||||
type ConditionResult struct {
|
||||
ResultItems []ConditionResultItem `json:"result_items"`
|
||||
QueryResult *QueryResult `json:"query_result"`
|
||||
QueryResult *QueryResult `json:"query_result"`
|
||||
}
|
||||
type ConditionResultItem struct {
|
||||
GroupValues []string `json:"group_values"`
|
||||
ConditionItem *ConditionItem `json:"condition_item"`
|
||||
IssueTimestamp interface{} `json:"issue_timestamp"`
|
||||
ResultValue interface{} `json:"result_value"` //满足条件最后一个值
|
||||
GroupValues []string `json:"group_values"`
|
||||
ConditionItem *ConditionItem `json:"condition_item"`
|
||||
IssueTimestamp interface{} `json:"issue_timestamp"`
|
||||
ResultValue interface{} `json:"result_value"` //满足条件最后一个值
|
||||
RelationValues map[string]interface{} `json:"relation_values"`
|
||||
}
|
||||
|
||||
var PriorityWeights = map[string]int{
|
||||
"info": 1,
|
||||
"low": 2,
|
||||
"medium": 3,
|
||||
"high": 4,
|
||||
"info": 1,
|
||||
"low": 2,
|
||||
"medium": 3,
|
||||
"high": 4,
|
||||
"critical": 5,
|
||||
}
|
|
@ -33,17 +33,16 @@ import (
|
|||
|
||||
type Channel struct {
|
||||
orm.ORMObjectBase
|
||||
Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"`
|
||||
Type string `json:"type" elastic_mapping:"type:{type:keyword,copy_to:search_text}"` // email or webhook
|
||||
Priority int `json:"priority,omitempty"`
|
||||
Webhook *CustomWebhook `json:"webhook,omitempty" elastic_mapping:"webhook:{type:object}"`
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
SubType string `json:"sub_type" elastic_mapping:"sub_type:{type:keyword,copy_to:search_text}"`
|
||||
Email *Email `json:"email,omitempty" elastic_mapping:"email:{type:object}"`
|
||||
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
|
||||
Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"`
|
||||
Type string `json:"type" elastic_mapping:"type:{type:keyword,copy_to:search_text}"` // email or webhook
|
||||
Priority int `json:"priority,omitempty"`
|
||||
Webhook *CustomWebhook `json:"webhook,omitempty" elastic_mapping:"webhook:{type:object}"`
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
SubType string `json:"sub_type" elastic_mapping:"sub_type:{type:keyword,copy_to:search_text}"`
|
||||
Email *Email `json:"email,omitempty" elastic_mapping:"email:{type:object}"`
|
||||
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
|
||||
}
|
||||
|
||||
|
||||
const (
|
||||
ChannelEmail = "email"
|
||||
ChannelWebhook = "webhook"
|
||||
|
|
|
@ -29,7 +29,7 @@ package alerting
|
|||
|
||||
type Filter struct {
|
||||
And []FilterQuery `json:"and,omitempty"`
|
||||
Or []FilterQuery `json:"or,omitempty"`
|
||||
Or []FilterQuery `json:"or,omitempty"`
|
||||
Not []FilterQuery `json:"not,omitempty"`
|
||||
//MinimumShouldMatch int `json:"minimum_should_match"`
|
||||
}
|
||||
|
|
|
@ -28,12 +28,12 @@
|
|||
package alerting
|
||||
|
||||
type FilterQuery struct {
|
||||
Field string `json:"field,omitempty"`
|
||||
Operator string `json:"operator,omitempty"`
|
||||
Values []string `json:"values,omitempty"`
|
||||
And []FilterQuery `json:"and,omitempty"`
|
||||
Or []FilterQuery `json:"or,omitempty"`
|
||||
Not []FilterQuery `json:"not,omitempty"`
|
||||
Field string `json:"field,omitempty"`
|
||||
Operator string `json:"operator,omitempty"`
|
||||
Values []string `json:"values,omitempty"`
|
||||
And []FilterQuery `json:"and,omitempty"`
|
||||
Or []FilterQuery `json:"or,omitempty"`
|
||||
Not []FilterQuery `json:"not,omitempty"`
|
||||
}
|
||||
|
||||
func (fq FilterQuery) IsComplex() bool {
|
||||
|
|
|
@ -36,13 +36,12 @@ import (
|
|||
|
||||
type Metric struct {
|
||||
insight.Metric
|
||||
Title string `json:"title,omitempty"` //text template
|
||||
Message string `json:"message,omitempty"` // text template
|
||||
Title string `json:"title,omitempty"` //text template
|
||||
Message string `json:"message,omitempty"` // text template
|
||||
Expression string `json:"expression,omitempty" elastic_mapping:"expression:{type:keyword,copy_to:search_text}"` //告警表达式,自动生成 eg: avg(cpu) > 80
|
||||
}
|
||||
|
||||
|
||||
func (m *Metric) GenerateExpression() (string, error){
|
||||
func (m *Metric) GenerateExpression() (string, error) {
|
||||
if len(m.Items) == 1 {
|
||||
return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil
|
||||
}
|
||||
|
@ -50,12 +49,12 @@ func (m *Metric) GenerateExpression() (string, error){
|
|||
return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items))
|
||||
}
|
||||
var (
|
||||
expressionBytes = []byte(m.Formula)
|
||||
expressionBytes = []byte(m.Formula)
|
||||
metricExpression string
|
||||
)
|
||||
for _, item := range m.Items {
|
||||
metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field)
|
||||
reg, err := regexp.Compile(item.Name+`([^\w]|$)`)
|
||||
reg, err := regexp.Compile(item.Name + `([^\w]|$)`)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -66,23 +65,23 @@ func (m *Metric) GenerateExpression() (string, error){
|
|||
}
|
||||
|
||||
type MetricItem struct {
|
||||
Name string `json:"name"`
|
||||
Field string `json:"field"`
|
||||
Name string `json:"name"`
|
||||
Field string `json:"field"`
|
||||
Statistic string `json:"statistic"`
|
||||
}
|
||||
|
||||
type QueryResult struct {
|
||||
Query string `json:"query"`
|
||||
Raw string `json:"raw"`
|
||||
Query string `json:"query"`
|
||||
Raw string `json:"raw"`
|
||||
MetricData []MetricData `json:"metric_data"`
|
||||
Nodata bool `json:"nodata"`
|
||||
Min interface{} `json:"-"`
|
||||
Max interface{} `json:"-"`
|
||||
Nodata bool `json:"nodata"`
|
||||
Min interface{} `json:"-"`
|
||||
Max interface{} `json:"-"`
|
||||
}
|
||||
|
||||
type MetricData struct {
|
||||
GroupValues []string `json:"group_values"`
|
||||
Data map[string][]TimeMetricData `json:"data"`
|
||||
GroupValues []string `json:"group_values"`
|
||||
Data map[string][]TimeMetricData `json:"data"`
|
||||
}
|
||||
|
||||
type TimeMetricData []interface{}
|
||||
|
|
|
@ -32,20 +32,19 @@ import (
|
|||
)
|
||||
|
||||
type Resource struct {
|
||||
ID string `json:"resource_id" elastic_mapping:"resource_id:{type:keyword}"`
|
||||
Name string `json:"resource_name" elastic_mapping:"resource_name:{type:keyword}"`
|
||||
Type string `json:"type" elastic_mapping:"type:{type:keyword}"`
|
||||
Objects []string `json:"objects" elastic_mapping:"objects:{type:keyword,copy_to:search_text}"`
|
||||
Filter FilterQuery `json:"filter,omitempty" elastic_mapping:"-"`
|
||||
ID string `json:"resource_id" elastic_mapping:"resource_id:{type:keyword}"`
|
||||
Name string `json:"resource_name" elastic_mapping:"resource_name:{type:keyword}"`
|
||||
Type string `json:"type" elastic_mapping:"type:{type:keyword}"`
|
||||
Objects []string `json:"objects" elastic_mapping:"objects:{type:keyword,copy_to:search_text}"`
|
||||
Filter FilterQuery `json:"filter,omitempty" elastic_mapping:"-"`
|
||||
RawFilter map[string]interface{} `json:"raw_filter,omitempty"`
|
||||
TimeField string `json:"time_field,omitempty" elastic_mapping:"id:{type:keyword}"`
|
||||
Context Context `json:"context"`
|
||||
TimeField string `json:"time_field,omitempty" elastic_mapping:"id:{type:keyword}"`
|
||||
Context Context `json:"context"`
|
||||
}
|
||||
|
||||
func (r Resource) Validate() error{
|
||||
func (r Resource) Validate() error {
|
||||
if r.TimeField == "" {
|
||||
return fmt.Errorf("TimeField can not be empty")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -33,33 +33,33 @@ import (
|
|||
)
|
||||
|
||||
type Rule struct {
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"`
|
||||
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:keyword}"`
|
||||
Resource Resource `json:"resource" elastic_mapping:"resource:{type:object}"`
|
||||
Metrics Metric `json:"metrics" elastic_mapping:"metrics:{type:object}"`
|
||||
Conditions Condition `json:"conditions" elastic_mapping:"conditions:{type:object}"`
|
||||
Channels *NotificationConfig `json:"channels,omitempty" elastic_mapping:"channels:{type:object}"`
|
||||
NotificationConfig *NotificationConfig `json:"notification_config,omitempty" elastic_mapping:"notification_config:{type:object}"`
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
Name string `json:"name" elastic_mapping:"name:{type:keyword,copy_to:search_text}"`
|
||||
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:keyword}"`
|
||||
Resource Resource `json:"resource" elastic_mapping:"resource:{type:object}"`
|
||||
Metrics Metric `json:"metrics" elastic_mapping:"metrics:{type:object}"`
|
||||
Conditions Condition `json:"conditions" elastic_mapping:"conditions:{type:object}"`
|
||||
Channels *NotificationConfig `json:"channels,omitempty" elastic_mapping:"channels:{type:object}"`
|
||||
NotificationConfig *NotificationConfig `json:"notification_config,omitempty" elastic_mapping:"notification_config:{type:object}"`
|
||||
RecoveryNotificationConfig *RecoveryNotificationConfig `json:"recovery_notification_config,omitempty" elastic_mapping:"recovery_notification_config:{type:object}"`
|
||||
Schedule Schedule `json:"schedule" elastic_mapping:"schedule:{type:object}"`
|
||||
LastNotificationTime time.Time `json:"-" elastic_mapping:"last_notification_time:{type:date}"`
|
||||
LastTermStartTime time.Time `json:"-"` //标识最近一轮告警的开始时间
|
||||
LastEscalationTime time.Time `json:"-"` //标识最近一次告警升级发送通知的时间
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
Expression string `json:"-"`
|
||||
Creator struct {
|
||||
Schedule Schedule `json:"schedule" elastic_mapping:"schedule:{type:object}"`
|
||||
LastNotificationTime time.Time `json:"-" elastic_mapping:"last_notification_time:{type:date}"`
|
||||
LastTermStartTime time.Time `json:"-"` //标识最近一轮告警的开始时间
|
||||
LastEscalationTime time.Time `json:"-"` //标识最近一次告警升级发送通知的时间
|
||||
SearchText string `json:"-" elastic_mapping:"search_text:{type:text,index_prefixes:{},index_phrases:true, analyzer:suggest_text_search }"`
|
||||
Expression string `json:"-"`
|
||||
Creator struct {
|
||||
Name string `json:"name" elastic_mapping:"name: { type: keyword }"`
|
||||
Id string `json:"id" elastic_mapping:"id: { type: keyword }"`
|
||||
} `json:"creator" elastic_mapping:"creator:{type:object}"`
|
||||
Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"`
|
||||
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"`
|
||||
Category string `json:"category,omitempty" elastic_mapping:"category: { type: keyword,copy_to:search_text }"`
|
||||
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword,copy_to:search_text }"`
|
||||
}
|
||||
|
||||
func (rule *Rule) GetOrInitExpression() (string, error){
|
||||
if rule.Expression != ""{
|
||||
func (rule *Rule) GetOrInitExpression() (string, error) {
|
||||
if rule.Expression != "" {
|
||||
return rule.Expression, nil
|
||||
}
|
||||
sb := strings.Builder{}
|
||||
|
@ -81,7 +81,8 @@ func (rule *Rule) GetOrInitExpression() (string, error){
|
|||
rule.Expression = strings.ReplaceAll(sb.String(), "result", metricExp)
|
||||
return rule.Expression, nil
|
||||
}
|
||||
//GetNotificationConfig for adapter old version config
|
||||
|
||||
// GetNotificationConfig for adapter old version config
|
||||
func (rule *Rule) GetNotificationConfig() *NotificationConfig {
|
||||
if rule.NotificationConfig != nil {
|
||||
return rule.NotificationConfig
|
||||
|
@ -96,37 +97,37 @@ func (rule *Rule) GetNotificationTitleAndMessage() (string, string) {
|
|||
}
|
||||
|
||||
type NotificationConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Title string `json:"title,omitempty"` //text template
|
||||
Message string `json:"message,omitempty"` // text template
|
||||
Normal []Channel `json:"normal,omitempty"`
|
||||
Escalation []Channel `json:"escalation,omitempty"`
|
||||
ThrottlePeriod string `json:"throttle_period,omitempty"` //沉默周期
|
||||
AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"`
|
||||
EscalationThrottlePeriod string `json:"escalation_throttle_period,omitempty"`
|
||||
EscalationEnabled bool `json:"escalation_enabled,omitempty"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Title string `json:"title,omitempty"` //text template
|
||||
Message string `json:"message,omitempty"` // text template
|
||||
Normal []Channel `json:"normal,omitempty"`
|
||||
Escalation []Channel `json:"escalation,omitempty"`
|
||||
ThrottlePeriod string `json:"throttle_period,omitempty"` //沉默周期
|
||||
AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"`
|
||||
EscalationThrottlePeriod string `json:"escalation_throttle_period,omitempty"`
|
||||
EscalationEnabled bool `json:"escalation_enabled,omitempty"`
|
||||
}
|
||||
|
||||
type RecoveryNotificationConfig struct {
|
||||
Enabled bool `json:"enabled"` // channel enabled
|
||||
Title string `json:"title"` //text template
|
||||
Message string `json:"message"` // text template
|
||||
AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"`
|
||||
Normal []Channel `json:"normal,omitempty"`
|
||||
EventEnabled bool `json:"event_enabled"`
|
||||
Enabled bool `json:"enabled"` // channel enabled
|
||||
Title string `json:"title"` //text template
|
||||
Message string `json:"message"` // text template
|
||||
AcceptTimeRange TimeRange `json:"accept_time_range,omitempty"`
|
||||
Normal []Channel `json:"normal,omitempty"`
|
||||
EventEnabled bool `json:"event_enabled"`
|
||||
}
|
||||
|
||||
type MessageTemplate struct{
|
||||
Type string `json:"type"`
|
||||
type MessageTemplate struct {
|
||||
Type string `json:"type"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
type TimeRange struct {
|
||||
Start string `json:"start"`
|
||||
End string `json:"end"`
|
||||
End string `json:"end"`
|
||||
}
|
||||
|
||||
func (tr *TimeRange) Include( t time.Time) bool {
|
||||
func (tr *TimeRange) Include(t time.Time) bool {
|
||||
if tr.Start == "" || tr.End == "" {
|
||||
return true
|
||||
}
|
||||
|
@ -135,10 +136,11 @@ func (tr *TimeRange) Include( t time.Time) bool {
|
|||
}
|
||||
|
||||
type FilterParam struct {
|
||||
Start interface{} `json:"start"`
|
||||
End interface{} `json:"end"`
|
||||
BucketSize string `json:"bucket_size"`
|
||||
Start interface{} `json:"start"`
|
||||
End interface{} `json:"end"`
|
||||
BucketSize string `json:"bucket_size"`
|
||||
}
|
||||
|
||||
//ctx
|
||||
//rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
|
||||
//check_status ,timestamp,
|
|
@ -36,7 +36,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func TestCreateRule( t *testing.T) {
|
||||
func TestCreateRule(t *testing.T) {
|
||||
rule := Rule{
|
||||
//ORMObjectBase: orm.ORMObjectBase{
|
||||
// ID: util.GetUUID(),
|
||||
|
@ -45,9 +45,9 @@ func TestCreateRule( t *testing.T) {
|
|||
//},
|
||||
Enabled: true,
|
||||
Resource: Resource{
|
||||
ID: "c8i18llath2blrusdjng",
|
||||
Type: "elasticsearch",
|
||||
Objects: []string{".infini_metrics*"},
|
||||
ID: "c8i18llath2blrusdjng",
|
||||
Type: "elasticsearch",
|
||||
Objects: []string{".infini_metrics*"},
|
||||
TimeField: "timestamp",
|
||||
Filter: FilterQuery{
|
||||
And: []FilterQuery{
|
||||
|
@ -69,68 +69,68 @@ func TestCreateRule( t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
//Metrics: Metric{
|
||||
// PeriodInterval: "1m",
|
||||
// MaxPeriods: 15,
|
||||
// Items: []MetricItem{
|
||||
// {Name: "red_health", Field: "*", Statistic: "count", Group: []string{"metadata.labels.cluster_id"}},
|
||||
// },
|
||||
//},
|
||||
//Conditions: Condition{
|
||||
// Operator: "any",
|
||||
// Items: []ConditionItem{
|
||||
// { MinimumPeriodMatch: 1, Operator: "gte", Values: []string{"1"}, Priority: "error", AlertMessage: "集群健康状态为 Red"},
|
||||
// },
|
||||
//},
|
||||
//Metrics: Metric{
|
||||
// PeriodInterval: "1m",
|
||||
// MaxPeriods: 15,
|
||||
// Items: []MetricItem{
|
||||
// {Name: "red_health", Field: "*", Statistic: "count", Group: []string{"metadata.labels.cluster_id"}},
|
||||
// },
|
||||
//},
|
||||
//Conditions: Condition{
|
||||
// Operator: "any",
|
||||
// Items: []ConditionItem{
|
||||
// { MinimumPeriodMatch: 1, Operator: "gte", Values: []string{"1"}, Priority: "error", AlertMessage: "集群健康状态为 Red"},
|
||||
// },
|
||||
//},
|
||||
|
||||
Metrics: Metric{
|
||||
Metric: insight.Metric{
|
||||
Groups: []insight.MetricGroupItem{{"metadata.labels.cluster_id", 10}, {"metadata.labels.node_id", 10}},
|
||||
Items: []insight.MetricItem{
|
||||
{Name: "a", Field: "payload.elasticsearch.node_stats.fs.total.free_in_bytes", Statistic: "min" },
|
||||
{Name: "b", Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", Statistic: "max"},
|
||||
},
|
||||
BucketSize: "1m",
|
||||
Formula: "a/b*100",
|
||||
},
|
||||
//Expression: "min(fs.free_in_bytes)/max(fs.total_in_bytes)*100",
|
||||
Metrics: Metric{
|
||||
Metric: insight.Metric{
|
||||
Groups: []insight.MetricGroupItem{{"metadata.labels.cluster_id", 10}, {"metadata.labels.node_id", 10}},
|
||||
Items: []insight.MetricItem{
|
||||
{Name: "a", Field: "payload.elasticsearch.node_stats.fs.total.free_in_bytes", Statistic: "min"},
|
||||
{Name: "b", Field: "payload.elasticsearch.node_stats.fs.total.total_in_bytes", Statistic: "max"},
|
||||
},
|
||||
Conditions: Condition{
|
||||
Operator: "any",
|
||||
Items: []ConditionItem{
|
||||
{MinimumPeriodMatch: 1, Operator: "lte", Values: []string{"76"}, Priority: "error"},
|
||||
},
|
||||
},
|
||||
|
||||
Channels: &NotificationConfig{
|
||||
Normal: []Channel{
|
||||
{Name: "钉钉", Type: ChannelWebhook, Webhook: &CustomWebhook{
|
||||
HeaderParams: map[string]string{
|
||||
"Message-Type": "application/json",
|
||||
},
|
||||
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
|
||||
Method: http.MethodPost,
|
||||
URL: "https://oapi.dingtalk.com/robot/send?access_token=XXXXXX",
|
||||
}},
|
||||
},
|
||||
Escalation: []Channel{
|
||||
{Type: ChannelWebhook, Name: "微信", Webhook: &CustomWebhook{
|
||||
HeaderParams: map[string]string{
|
||||
"Message-Type": "application/json",
|
||||
},
|
||||
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
|
||||
Method: http.MethodPost,
|
||||
URL: "https://oapi.weixin.com/robot/send?access_token=6a5c7c9454ff74537a6de493153b1da68860942d4b0aeb33797cb68b5111b077",
|
||||
}},
|
||||
},
|
||||
ThrottlePeriod: "1h",
|
||||
AcceptTimeRange: TimeRange{
|
||||
Start: "8:00",
|
||||
End: "21:00",
|
||||
},
|
||||
EscalationEnabled: false,
|
||||
EscalationThrottlePeriod: "30m",
|
||||
BucketSize: "1m",
|
||||
Formula: "a/b*100",
|
||||
},
|
||||
//Expression: "min(fs.free_in_bytes)/max(fs.total_in_bytes)*100",
|
||||
},
|
||||
Conditions: Condition{
|
||||
Operator: "any",
|
||||
Items: []ConditionItem{
|
||||
{MinimumPeriodMatch: 1, Operator: "lte", Values: []string{"76"}, Priority: "error"},
|
||||
},
|
||||
},
|
||||
|
||||
Channels: &NotificationConfig{
|
||||
Normal: []Channel{
|
||||
{Name: "钉钉", Type: ChannelWebhook, Webhook: &CustomWebhook{
|
||||
HeaderParams: map[string]string{
|
||||
"Message-Type": "application/json",
|
||||
},
|
||||
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
|
||||
Method: http.MethodPost,
|
||||
URL: "https://oapi.dingtalk.com/robot/send?access_token=XXXXXX",
|
||||
}},
|
||||
},
|
||||
Escalation: []Channel{
|
||||
{Type: ChannelWebhook, Name: "微信", Webhook: &CustomWebhook{
|
||||
HeaderParams: map[string]string{
|
||||
"Message-Type": "application/json",
|
||||
},
|
||||
Body: `{"msgtype": "text","text": {"content":"告警通知: {{ctx.message}}"}}`,
|
||||
Method: http.MethodPost,
|
||||
URL: "https://oapi.weixin.com/robot/send?access_token=6a5c7c9454ff74537a6de493153b1da68860942d4b0aeb33797cb68b5111b077",
|
||||
}},
|
||||
},
|
||||
ThrottlePeriod: "1h",
|
||||
AcceptTimeRange: TimeRange{
|
||||
Start: "8:00",
|
||||
End: "21:00",
|
||||
},
|
||||
EscalationEnabled: false,
|
||||
EscalationThrottlePeriod: "30m",
|
||||
},
|
||||
}
|
||||
//err := rule.Metrics.GenerateExpression()
|
||||
//if err != nil {
|
||||
|
@ -145,15 +145,12 @@ func TestCreateRule( t *testing.T) {
|
|||
fmt.Println(exp)
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
func TestTimeRange_Include( t *testing.T) {
|
||||
func TestTimeRange_Include(t *testing.T) {
|
||||
tr := TimeRange{
|
||||
Start: "08:00",
|
||||
End: "18:31",
|
||||
End: "18:31",
|
||||
}
|
||||
fmt.Println(tr.Include(time.Now()))
|
||||
ti,_ := time.Parse(time.RFC3339, "2022-04-11T10:31:38.911000504Z")
|
||||
ti, _ := time.Parse(time.RFC3339, "2022-04-11T10:31:38.911000504Z")
|
||||
fmt.Println(time.Now().Sub(ti))
|
||||
}
|
||||
|
|
|
@ -28,14 +28,11 @@
|
|||
package alerting
|
||||
|
||||
type Schedule struct {
|
||||
Cron *Cron `json:"cron,omitempty" elastic_mapping:"cron:{type:object}"`
|
||||
Cron *Cron `json:"cron,omitempty" elastic_mapping:"cron:{type:object}"`
|
||||
Interval string `json:"interval,omitempty" elastic_mapping:"interval:{type:keyword}"`
|
||||
}
|
||||
|
||||
type Cron struct {
|
||||
Expression string `json:"expression" elastic_mapping:"expression:{type:text}"`
|
||||
Timezone string `json:"timezone" elastic_mapping:"timezone:{type:keyword}"`
|
||||
Timezone string `json:"timezone" elastic_mapping:"timezone:{type:keyword}"`
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -29,19 +29,19 @@ package alerting
|
|||
|
||||
type CustomWebhook struct {
|
||||
HeaderParams map[string]string `json:"header_params,omitempty" elastic_mapping:"header_params:{type:object,enabled:false}"`
|
||||
Method string `json:"method" elastic_mapping:"method:{type:keyword}"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Body string `json:"body" elastic_mapping:"body:{type:text}"`
|
||||
Method string `json:"method" elastic_mapping:"method:{type:keyword}"`
|
||||
URL string `json:"url,omitempty"`
|
||||
Body string `json:"body" elastic_mapping:"body:{type:text}"`
|
||||
}
|
||||
|
||||
type Email struct {
|
||||
ServerID string `json:"server_id" elastic_mapping:"server_id:{type:keyword}"`
|
||||
ServerID string `json:"server_id" elastic_mapping:"server_id:{type:keyword}"`
|
||||
Recipients struct {
|
||||
To []string `json:"to,omitempty" elastic_mapping:"to:{type:keyword}"`
|
||||
CC []string `json:"cc,omitempty" elastic_mapping:"cc:{type:keyword}"`
|
||||
BCC []string `json:"bcc,omitempty" elastic_mapping:"bcc:{type:keyword}"`
|
||||
} `json:"recipients" elastic_mapping:"recipients:{type:object}"`
|
||||
Subject string `json:"subject" elastic_mapping:"subject:{type:text}"`
|
||||
Body string `json:"body" elastic_mapping:"body:{type:text}"`
|
||||
Subject string `json:"subject" elastic_mapping:"subject:{type:text}"`
|
||||
Body string `json:"body" elastic_mapping:"body:{type:text}"`
|
||||
ContentType string `json:"content_type" elastic_mapping:"content_type:{type:keyword}"`
|
||||
}
|
|
@ -35,13 +35,13 @@ import (
|
|||
|
||||
type EmailServer struct {
|
||||
orm.ORMObjectBase
|
||||
Name string `json:"name" elastic_mapping:"name:{type:text}"`
|
||||
Host string `json:"host" elastic_mapping:"host:{type:keyword}"`
|
||||
Port int `json:"port" elastic_mapping:"port:{type:keyword}"`
|
||||
TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"`
|
||||
Auth *model.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"`
|
||||
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
|
||||
CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"`
|
||||
Name string `json:"name" elastic_mapping:"name:{type:text}"`
|
||||
Host string `json:"host" elastic_mapping:"host:{type:keyword}"`
|
||||
Port int `json:"port" elastic_mapping:"port:{type:keyword}"`
|
||||
TLS bool `json:"tls" elastic_mapping:"tls:{type:keyword}"`
|
||||
Auth *model.BasicAuth `json:"auth" elastic_mapping:"auth:{type:object}"`
|
||||
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
|
||||
CredentialID string `json:"credential_id" elastic_mapping:"credential_id:{type:keyword}"`
|
||||
}
|
||||
|
||||
func (serv *EmailServer) Validate(requireName bool) error {
|
||||
|
|
|
@ -30,19 +30,19 @@ package insight
|
|||
import "time"
|
||||
|
||||
type Dashboard struct {
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
ClusterId string `json:"cluster_id" elastic_mapping:"cluster_id: { type: keyword }"`
|
||||
IndexPattern string `json:"index_pattern" elastic_mapping:"index_pattern: { type: keyword }"`
|
||||
TimeField string `json:"time_field,omitempty" elastic_mapping:"time_field: { type: keyword }"`
|
||||
Filter interface{} `json:"filter,omitempty" elastic_mapping:"filter: { type: object, enabled:false }"`
|
||||
BucketSize string `json:"bucket_size" elastic_mapping:"bucket_size: { type: keyword }"`
|
||||
Title string `json:"title" elastic_mapping:"title: { type: keyword }"`
|
||||
Description string `json:"description" elastic_mapping:"description: { type: keyword }"`
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
ClusterId string `json:"cluster_id" elastic_mapping:"cluster_id: { type: keyword }"`
|
||||
IndexPattern string `json:"index_pattern" elastic_mapping:"index_pattern: { type: keyword }"`
|
||||
TimeField string `json:"time_field,omitempty" elastic_mapping:"time_field: { type: keyword }"`
|
||||
Filter interface{} `json:"filter,omitempty" elastic_mapping:"filter: { type: object, enabled:false }"`
|
||||
BucketSize string `json:"bucket_size" elastic_mapping:"bucket_size: { type: keyword }"`
|
||||
Title string `json:"title" elastic_mapping:"title: { type: keyword }"`
|
||||
Description string `json:"description" elastic_mapping:"description: { type: keyword }"`
|
||||
Visualizations interface{} `json:"visualizations" elastic_mapping:"visualizations: { type: object, enabled:false }"`
|
||||
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword }"`
|
||||
User string `json:"user" elastic_mapping:"user: { type: keyword }"`
|
||||
Query interface{} `json:"query,omitempty" elastic_mapping:"query: { type: object, enabled:false }"`
|
||||
TimeFilter interface{} `json:"time_filter,omitempty" elastic_mapping:"time_filter: { type: object, enabled:false }"`
|
||||
Tags []string `json:"tags,omitempty" elastic_mapping:"tags: { type: keyword }"`
|
||||
User string `json:"user" elastic_mapping:"user: { type: keyword }"`
|
||||
Query interface{} `json:"query,omitempty" elastic_mapping:"query: { type: object, enabled:false }"`
|
||||
TimeFilter interface{} `json:"time_filter,omitempty" elastic_mapping:"time_filter: { type: object, enabled:false }"`
|
||||
}
|
||||
|
|
|
@ -27,9 +27,8 @@
|
|||
|
||||
package insight
|
||||
|
||||
|
||||
type SeriesItem struct {
|
||||
Type string `json:"type"`
|
||||
Type string `json:"type"`
|
||||
Options map[string]interface{} `json:"options"`
|
||||
Metric Metric `json:"metric"`
|
||||
Metric Metric `json:"metric"`
|
||||
}
|
|
@ -29,39 +29,40 @@ package insight
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/core/util"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
type Metric struct {
|
||||
AggTypes []string `json:"agg_types,omitempty"`
|
||||
IndexPattern string `json:"index_pattern,omitempty"`
|
||||
TimeField string `json:"time_field,omitempty"`
|
||||
BucketSize string `json:"bucket_size,omitempty"`
|
||||
Filter interface{} `json:"filter,omitempty"`
|
||||
Groups []MetricGroupItem `json:"groups,omitempty"` //bucket group
|
||||
Sort []GroupSort `json:"sort,omitempty"`
|
||||
ClusterId string `json:"cluster_id,omitempty"`
|
||||
Formula string `json:"formula,omitempty"`
|
||||
AggTypes []string `json:"agg_types,omitempty"`
|
||||
IndexPattern string `json:"index_pattern,omitempty"`
|
||||
TimeField string `json:"time_field,omitempty"`
|
||||
BucketSize string `json:"bucket_size,omitempty"`
|
||||
Filter interface{} `json:"filter,omitempty"`
|
||||
Groups []MetricGroupItem `json:"groups,omitempty"` //bucket group
|
||||
Sort []GroupSort `json:"sort,omitempty"`
|
||||
ClusterId string `json:"cluster_id,omitempty"`
|
||||
Formula string `json:"formula,omitempty"`
|
||||
//array of formula for new version
|
||||
Formulas []string `json:"formulas,omitempty"`
|
||||
Items []MetricItem `json:"items"`
|
||||
FormatType string `json:"format,omitempty"`
|
||||
TimeFilter interface{} `json:"time_filter,omitempty"`
|
||||
TimeBeforeGroup bool `json:"time_before_group,omitempty"`
|
||||
BucketLabel *BucketLabel `json:"bucket_label,omitempty"`
|
||||
Formulas []string `json:"formulas,omitempty"`
|
||||
Items []MetricItem `json:"items"`
|
||||
FormatType string `json:"format,omitempty"`
|
||||
TimeFilter interface{} `json:"time_filter,omitempty"`
|
||||
TimeBeforeGroup bool `json:"time_before_group,omitempty"`
|
||||
BucketLabel *BucketLabel `json:"bucket_label,omitempty"`
|
||||
// number of buckets to return, used for aggregation auto_date_histogram when bucket size equals 'auto'
|
||||
Buckets uint `json:"buckets,omitempty"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
Buckets uint `json:"buckets,omitempty"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
type MetricBase struct {
|
||||
orm.ORMObjectBase
|
||||
//display name of the metric
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name"`
|
||||
//metric identifier
|
||||
Key string `json:"key"`
|
||||
Key string `json:"key"`
|
||||
//optional values : "node", "indices", "shard"
|
||||
Level string `json:"level"`
|
||||
//metric calculation formula
|
||||
|
@ -76,16 +77,16 @@ type MetricBase struct {
|
|||
}
|
||||
|
||||
type GroupSort struct {
|
||||
Key string `json:"key"`
|
||||
Key string `json:"key"`
|
||||
Direction string `json:"direction"`
|
||||
}
|
||||
|
||||
type MetricGroupItem struct {
|
||||
Field string `json:"field"`
|
||||
Limit int `json:"limit"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
func (m *Metric) GenerateExpression() (string, error){
|
||||
func (m *Metric) GenerateExpression() (string, error) {
|
||||
if len(m.Items) == 1 {
|
||||
return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil
|
||||
}
|
||||
|
@ -93,12 +94,12 @@ func (m *Metric) GenerateExpression() (string, error){
|
|||
return "", fmt.Errorf("formula should not be empty since there are %d metrics", len(m.Items))
|
||||
}
|
||||
var (
|
||||
expressionBytes = []byte(m.Formula)
|
||||
expressionBytes = []byte(m.Formula)
|
||||
metricExpression string
|
||||
)
|
||||
for _, item := range m.Items {
|
||||
metricExpression = fmt.Sprintf("%s(%s)", item.Statistic, item.Field)
|
||||
reg, err := regexp.Compile(item.Name+`([^\w]|$)`)
|
||||
reg, err := regexp.Compile(item.Name + `([^\w]|$)`)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -127,10 +128,10 @@ func (m *Metric) ValidateSortKey() error {
|
|||
mm[item.Name] = &item
|
||||
}
|
||||
for _, sortItem := range m.Sort {
|
||||
if !util.StringInArray([]string{"desc", "asc"}, sortItem.Direction){
|
||||
if !util.StringInArray([]string{"desc", "asc"}, sortItem.Direction) {
|
||||
return fmt.Errorf("unknown sort direction [%s]", sortItem.Direction)
|
||||
}
|
||||
if _, ok := mm[sortItem.Key]; !ok && !util.StringInArray([]string{"_key", "_count"}, sortItem.Key){
|
||||
if _, ok := mm[sortItem.Key]; !ok && !util.StringInArray([]string{"_key", "_count"}, sortItem.Key) {
|
||||
return fmt.Errorf("unknown sort key [%s]", sortItem.Key)
|
||||
}
|
||||
}
|
||||
|
@ -138,26 +139,26 @@ func (m *Metric) ValidateSortKey() error {
|
|||
}
|
||||
|
||||
type MetricItem struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Field string `json:"field"`
|
||||
FieldType string `json:"field_type,omitempty"`
|
||||
Statistic string `json:"statistic,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Field string `json:"field"`
|
||||
FieldType string `json:"field_type,omitempty"`
|
||||
Statistic string `json:"statistic,omitempty"`
|
||||
}
|
||||
|
||||
type MetricDataItem struct {
|
||||
Timestamp interface{} `json:"timestamp,omitempty"`
|
||||
Value interface{} `json:"value"`
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
GroupLabel string `json:"group_label,omitempty"`
|
||||
Timestamp interface{} `json:"timestamp,omitempty"`
|
||||
Value interface{} `json:"value"`
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
GroupLabel string `json:"group_label,omitempty"`
|
||||
}
|
||||
|
||||
type MetricData struct {
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
Data map[string][]MetricDataItem
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
Data map[string][]MetricDataItem
|
||||
GroupLabel string `json:"group_label,omitempty"`
|
||||
}
|
||||
|
||||
type BucketLabel struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
Enabled bool `json:"enabled"`
|
||||
Template string `json:"template,omitempty"`
|
||||
}
|
||||
|
|
|
@ -30,15 +30,15 @@ package insight
|
|||
import "time"
|
||||
|
||||
type Visualization struct {
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created *time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated *time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
Title string `json:"title,omitempty" elastic_mapping:"title: { type: keyword }"`
|
||||
IndexPattern string `json:"index_pattern,omitempty" elastic_mapping:"index_pattern: { type: keyword }"`
|
||||
ClusterId string `json:"cluster_id,omitempty" elastic_mapping:"cluster_id: { type: keyword }"`
|
||||
Series []SeriesItem `json:"series" elastic_mapping:"series: { type: object,enabled:false }"`
|
||||
Position *Position `json:"position,omitempty" elastic_mapping:"position: { type: object,enabled:false }"`
|
||||
Description string `json:"description,omitempty" elastic_mapping:"description: { type: keyword }"`
|
||||
ID string `json:"id,omitempty" elastic_meta:"_id" elastic_mapping:"id: { type: keyword }"`
|
||||
Created *time.Time `json:"created,omitempty" elastic_mapping:"created: { type: date }"`
|
||||
Updated *time.Time `json:"updated,omitempty" elastic_mapping:"updated: { type: date }"`
|
||||
Title string `json:"title,omitempty" elastic_mapping:"title: { type: keyword }"`
|
||||
IndexPattern string `json:"index_pattern,omitempty" elastic_mapping:"index_pattern: { type: keyword }"`
|
||||
ClusterId string `json:"cluster_id,omitempty" elastic_mapping:"cluster_id: { type: keyword }"`
|
||||
Series []SeriesItem `json:"series" elastic_mapping:"series: { type: object,enabled:false }"`
|
||||
Position *Position `json:"position,omitempty" elastic_mapping:"position: { type: object,enabled:false }"`
|
||||
Description string `json:"description,omitempty" elastic_mapping:"description: { type: keyword }"`
|
||||
}
|
||||
|
||||
type Position struct {
|
||||
|
|
|
@ -31,6 +31,6 @@ import "infini.sh/framework/core/orm"
|
|||
|
||||
type Widget struct {
|
||||
orm.ORMObjectBase
|
||||
Title string `json:"title" elastic_mapping:"title: { type: text }"`
|
||||
Config interface{}`json:"config" elastic_mapping:"config: { type: object,enabled:false }"`
|
||||
Title string `json:"title" elastic_mapping:"title: { type: text }"`
|
||||
Config interface{} `json:"config" elastic_mapping:"config: { type: object,enabled:false }"`
|
||||
}
|
||||
|
|
|
@ -31,20 +31,21 @@ import "infini.sh/framework/core/orm"
|
|||
|
||||
type Layout struct {
|
||||
orm.ORMObjectBase
|
||||
Name string `json:"name" elastic_mapping:"name: { type: text }"`
|
||||
Name string `json:"name" elastic_mapping:"name: { type: text }"`
|
||||
Description string `json:"description" elastic_mapping:"description: { type: text }"`
|
||||
Creator struct {
|
||||
Creator struct {
|
||||
Name string `json:"name"`
|
||||
Id string `json:"id"`
|
||||
} `json:"creator"`
|
||||
ViewID string `json:"view_id" elastic_mapping:"view_id: { type: keyword }"`
|
||||
Config interface{} `json:"config" elastic_mapping:"config: { type: object, enabled:false }"`
|
||||
Reserved bool `json:"reserved,omitempty" elastic_mapping:"reserved:{type:boolean}"`
|
||||
Type LayoutType `json:"type" elastic_mapping:"type: { type: keyword }"`
|
||||
IsFixed bool `json:"is_fixed" elastic_mapping:"is_fixed: { type: boolean }"`
|
||||
ViewID string `json:"view_id" elastic_mapping:"view_id: { type: keyword }"`
|
||||
Config interface{} `json:"config" elastic_mapping:"config: { type: object, enabled:false }"`
|
||||
Reserved bool `json:"reserved,omitempty" elastic_mapping:"reserved:{type:boolean}"`
|
||||
Type LayoutType `json:"type" elastic_mapping:"type: { type: keyword }"`
|
||||
IsFixed bool `json:"is_fixed" elastic_mapping:"is_fixed: { type: boolean }"`
|
||||
}
|
||||
|
||||
type LayoutType string
|
||||
|
||||
const (
|
||||
LayoutTypeWorkspace LayoutType = "workspace"
|
||||
)
|
|
@ -46,9 +46,9 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
|
|||
HostName string `json:"host_name"`
|
||||
IP string `json:"ip"`
|
||||
Source string `json:"source"`
|
||||
OSName string `json:"os_name"`
|
||||
OSArch string `json:"os_arch"`
|
||||
NodeID string `json:"node_uuid"`
|
||||
OSName string `json:"os_name"`
|
||||
OSArch string `json:"os_arch"`
|
||||
NodeID string `json:"node_uuid"`
|
||||
}
|
||||
err := h.DecodeJSON(req, &reqBody)
|
||||
if err != nil {
|
||||
|
@ -84,7 +84,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
|
|||
hostInfo = &host.HostInfo{
|
||||
IP: hi.IP,
|
||||
OSInfo: host.OS{
|
||||
Platform: hi.OSName,
|
||||
Platform: hi.OSName,
|
||||
KernelArch: hi.OSArch,
|
||||
},
|
||||
NodeID: hi.NodeID,
|
||||
|
@ -97,7 +97,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
|
|||
}
|
||||
hostInfo.Timestamp = time.Now()
|
||||
var ctx *orm.Context
|
||||
if i == len(reqBody) - 1 {
|
||||
if i == len(reqBody)-1 {
|
||||
ctx = &orm.Context{
|
||||
Refresh: "wait_for",
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func (h *APIHandler) enrollHost(w http.ResponseWriter, req *http.Request, ps htt
|
|||
continue
|
||||
}
|
||||
}
|
||||
resBody := util.MapStr{
|
||||
resBody := util.MapStr{
|
||||
"success": true,
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
|
@ -168,15 +168,15 @@ func (h *APIHandler) GetHostAgentInfo(w http.ResponseWriter, req *http.Request,
|
|||
}
|
||||
|
||||
h.WriteJSON(w, util.MapStr{
|
||||
"host_id": hostID,
|
||||
"host_id": hostID,
|
||||
"agent_id": obj.ID,
|
||||
"version": obj.Application.Version,
|
||||
"status": hostInfo.AgentStatus,
|
||||
"version": obj.Application.Version,
|
||||
"status": hostInfo.AgentStatus,
|
||||
"endpoint": obj.GetEndpoint(),
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
func getHost(hostID string) (*host.HostInfo, error){
|
||||
func getHost(hostID string) (*host.HostInfo, error) {
|
||||
hostInfo := &host.HostInfo{}
|
||||
hostInfo.ID = hostID
|
||||
exists, err := orm.Get(hostInfo)
|
||||
|
|
|
@ -31,13 +31,13 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
log "github.com/cihub/seelog"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"infini.sh/framework/core/elastic"
|
||||
"infini.sh/framework/core/global"
|
||||
"infini.sh/framework/core/kv"
|
||||
"infini.sh/framework/core/model"
|
||||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/core/util"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
common2 "infini.sh/framework/modules/elastic/common"
|
||||
metadata2 "infini.sh/framework/modules/elastic/metadata"
|
||||
"time"
|
||||
|
|
|
@ -37,15 +37,15 @@ import (
|
|||
"path"
|
||||
)
|
||||
|
||||
func GenerateClientCert(caFile, caKey string) (caCert, clientCertPEM, clientKeyPEM []byte, err error){
|
||||
func GenerateClientCert(caFile, caKey string) (caCert, clientCertPEM, clientKeyPEM []byte, err error) {
|
||||
return generateCert(caFile, caKey, false)
|
||||
}
|
||||
|
||||
func GenerateServerCert(caFile, caKey string) (caCert, serverCertPEM, serverKeyPEM []byte, err error){
|
||||
func GenerateServerCert(caFile, caKey string) (caCert, serverCertPEM, serverKeyPEM []byte, err error) {
|
||||
return generateCert(caFile, caKey, true)
|
||||
}
|
||||
|
||||
func generateCert(caFile, caKey string, isServer bool)(caCert, instanceCertPEM, instanceKeyPEM []byte, err error){
|
||||
func generateCert(caFile, caKey string, isServer bool) (caCert, instanceCertPEM, instanceKeyPEM []byte, err error) {
|
||||
pool := x509.NewCertPool()
|
||||
caCert, err = os.ReadFile(caFile)
|
||||
if err != nil {
|
||||
|
@ -69,11 +69,11 @@ func generateCert(caFile, caKey string, isServer bool)(caCert, instanceCertPEM,
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
if isServer{
|
||||
if isServer {
|
||||
b = &pem.Block{Type: "CERTIFICATE", Bytes: caCertBytes}
|
||||
certPEM := pem.EncodeToMemory(b)
|
||||
instanceCertPEM, instanceKeyPEM, err = util.GenerateServerCert(rootCert, certKey.(*rsa.PrivateKey), certPEM, nil)
|
||||
}else{
|
||||
instanceCertPEM, instanceKeyPEM, err = util.GenerateServerCert(rootCert, certKey.(*rsa.PrivateKey), certPEM, nil)
|
||||
} else {
|
||||
_, instanceCertPEM, instanceKeyPEM = util.GetClientCert(rootCert, certKey)
|
||||
}
|
||||
return caCert, instanceCertPEM, instanceKeyPEM, nil
|
||||
|
@ -84,9 +84,9 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) {
|
|||
instanceCrt := path.Join(dataDir, "certs/agent/instance.crt")
|
||||
instanceKey := path.Join(dataDir, "certs/agent/instance.key")
|
||||
var (
|
||||
err error
|
||||
err error
|
||||
clientCertPEM []byte
|
||||
clientKeyPEM []byte
|
||||
clientKeyPEM []byte
|
||||
)
|
||||
if util.FileExists(instanceCrt) && util.FileExists(instanceKey) {
|
||||
return instanceCrt, instanceKey, nil
|
||||
|
@ -96,7 +96,7 @@ func GetAgentInstanceCerts(caFile, caKey string) (string, string, error) {
|
|||
return "", "", err
|
||||
}
|
||||
baseDir := path.Join(dataDir, "certs/agent")
|
||||
if !util.IsExist(baseDir){
|
||||
if !util.IsExist(baseDir) {
|
||||
err = os.MkdirAll(baseDir, 0775)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
|
|
|
@ -30,8 +30,8 @@ package common
|
|||
import (
|
||||
log "github.com/cihub/seelog"
|
||||
"infini.sh/console/modules/agent/model"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"infini.sh/framework/core/env"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
)
|
||||
|
||||
func GetAgentConfig() *model.AgentConfig {
|
||||
|
|
|
@ -28,14 +28,14 @@
|
|||
package model
|
||||
|
||||
type AgentConfig struct {
|
||||
Enabled bool `config:"enabled"`
|
||||
Setup *SetupConfig `config:"setup"`
|
||||
Enabled bool `config:"enabled"`
|
||||
Setup *SetupConfig `config:"setup"`
|
||||
}
|
||||
|
||||
type SetupConfig struct {
|
||||
DownloadURL string `config:"download_url"`
|
||||
CACertFile string `config:"ca_cert"`
|
||||
CAKeyFile string `config:"ca_key"`
|
||||
ConsoleEndpoint string `config:"console_endpoint"`
|
||||
Port string `config:"port"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
CACertFile string `config:"ca_cert"`
|
||||
CAKeyFile string `config:"ca_key"`
|
||||
ConsoleEndpoint string `config:"console_endpoint"`
|
||||
Port string `config:"port"`
|
||||
}
|
||||
|
|
|
@ -38,36 +38,36 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody:=util.MapStr{}
|
||||
reqBody := struct{
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
StartTime interface{} `json:"start_time"`
|
||||
EndTime interface{} `json:"end_time"`
|
||||
func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := util.MapStr{}
|
||||
reqBody := struct {
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
StartTime interface{} `json:"start_time"`
|
||||
EndTime interface{} `json:"end_time"`
|
||||
}{}
|
||||
err := h.DecodeJSON(req, &reqBody)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
|
||||
aggs["term_cluster_id"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.cluster_id",
|
||||
"size": 1000,
|
||||
"size": 1000,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"term_cluster_name": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.cluster_name",
|
||||
"size": 1,
|
||||
"size": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -86,9 +86,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
|
||||
clusterFilter, hasAllPrivilege := h.GetClusterFilter(req, "metadata.labels.cluster_id")
|
||||
if !hasAllPrivilege && clusterFilter == nil {
|
||||
h.WriteJSON(w, elastic.SearchResponse{
|
||||
|
||||
}, http.StatusOK)
|
||||
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
|
||||
return
|
||||
}
|
||||
if !hasAllPrivilege && clusterFilter != nil {
|
||||
|
@ -97,9 +95,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
|
||||
hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req)
|
||||
if !hasAllPrivilege && len(indexPrivilege) == 0 {
|
||||
h.WriteJSON(w, elastic.SearchResponse{
|
||||
|
||||
}, http.StatusOK)
|
||||
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
|
||||
return
|
||||
}
|
||||
if !hasAllPrivilege {
|
||||
|
@ -107,10 +103,10 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
for clusterID, indices := range indexPrivilege {
|
||||
var (
|
||||
wildcardIndices []string
|
||||
normalIndices []string
|
||||
normalIndices []string
|
||||
)
|
||||
for _, index := range indices {
|
||||
if strings.Contains(index,"*") {
|
||||
if strings.Contains(index, "*") {
|
||||
wildcardIndices = append(wildcardIndices, index)
|
||||
continue
|
||||
}
|
||||
|
@ -120,8 +116,8 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
if len(wildcardIndices) > 0 {
|
||||
subShould = append(subShould, util.MapStr{
|
||||
"query_string": util.MapStr{
|
||||
"query": strings.Join(wildcardIndices, " "),
|
||||
"fields": []string{"metadata.labels.index_name"},
|
||||
"query": strings.Join(wildcardIndices, " "),
|
||||
"fields": []string{"metadata.labels.index_name"},
|
||||
"default_operator": "OR",
|
||||
},
|
||||
})
|
||||
|
@ -146,7 +142,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"should": subShould,
|
||||
"should": subShould,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -156,7 +152,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
indexFilter := util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"should": indexShould,
|
||||
"should": indexShould,
|
||||
},
|
||||
}
|
||||
filter = append(filter, indexFilter)
|
||||
|
@ -168,7 +164,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
{
|
||||
"query_string": util.MapStr{
|
||||
"default_field": "*",
|
||||
"query": reqBody.Keyword,
|
||||
"query": reqBody.Keyword,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -176,15 +172,15 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
var boolQuery = util.MapStr{
|
||||
"filter": filter,
|
||||
}
|
||||
if len(should) >0 {
|
||||
if len(should) > 0 {
|
||||
boolQuery["should"] = should
|
||||
boolQuery["minimum_should_match"] = 1
|
||||
}
|
||||
query := util.MapStr{
|
||||
"aggs": aggs,
|
||||
"size": reqBody.Size,
|
||||
"from": reqBody.From,
|
||||
"_source": []string{"changelog", "id", "metadata", "timestamp"},
|
||||
"from": reqBody.From,
|
||||
"_source": []string{"changelog", "id", "metadata", "timestamp"},
|
||||
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
|
||||
"query": util.MapStr{
|
||||
"bool": boolQuery,
|
||||
|
@ -194,7 +190,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
reqBody.Sort = []string{"timestamp", "desc"}
|
||||
}
|
||||
|
||||
query["sort"] = []util.MapStr{
|
||||
query["sort"] = []util.MapStr{
|
||||
{
|
||||
reqBody.Sort[0]: util.MapStr{
|
||||
"order": reqBody.Sort[1],
|
||||
|
@ -206,7 +202,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
|
|||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetWildcardIndexName(event.Activity{}), dsl)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(response.RawResult.Body)
|
||||
|
|
|
@ -33,9 +33,9 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -43,8 +43,8 @@ func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request,
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(errStr)
|
||||
h.WriteError(w, errStr, http.StatusInternalServerError)
|
||||
return
|
||||
|
|
|
@ -118,7 +118,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
|
||||
var (
|
||||
// cluster_id => cluster_uuid
|
||||
clustersM = map[string]string{}
|
||||
clustersM = map[string]string{}
|
||||
clusterUUIDs []string
|
||||
)
|
||||
for _, cid := range clusterIDs {
|
||||
|
@ -145,28 +145,27 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
indexMetricItems := []GroupMetricItem{}
|
||||
metricItem := newMetricItem("cluster_indexing", 2, "cluster")
|
||||
metricItem.OnlyPrimary = true
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "cluster_indexing",
|
||||
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "cluster_indexing",
|
||||
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: metricItem,
|
||||
FormatType: "num",
|
||||
Units: "doc/s",
|
||||
MetricItem: metricItem,
|
||||
FormatType: "num",
|
||||
Units: "doc/s",
|
||||
})
|
||||
|
||||
metricItem = newMetricItem("cluster_search", 2, "cluster")
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "cluster_search",
|
||||
Field: "payload.elasticsearch.node_stats.indices.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "cluster_search",
|
||||
Field: "payload.elasticsearch.node_stats.indices.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: metricItem,
|
||||
FormatType: "num",
|
||||
Units: "query/s",
|
||||
MetricItem: metricItem,
|
||||
FormatType: "num",
|
||||
Units: "query/s",
|
||||
})
|
||||
|
||||
|
||||
clusterID := global.MustLookupString(elastic.GlobalSystemElasticsearchID)
|
||||
intervalField, err := getDateHistogramIntervalField(clusterID, bucketSizeStr)
|
||||
if err != nil {
|
||||
|
@ -200,23 +199,23 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
aggs:=map[string]interface{}{}
|
||||
aggs := map[string]interface{}{}
|
||||
sumAggs := util.MapStr{}
|
||||
|
||||
for _,metricItem:=range indexMetricItems {
|
||||
for _, metricItem := range indexMetricItems {
|
||||
leafAgg := util.MapStr{
|
||||
"max":util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field,
|
||||
},
|
||||
}
|
||||
var sumBucketPath = "term_node>"+ metricItem.ID
|
||||
var sumBucketPath = "term_node>" + metricItem.ID
|
||||
aggs[metricItem.ID] = leafAgg
|
||||
|
||||
sumAggs[metricItem.ID] = util.MapStr{
|
||||
|
@ -224,22 +223,22 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
"buckets_path": sumBucketPath,
|
||||
},
|
||||
}
|
||||
if metricItem.IsDerivative{
|
||||
sumAggs[metricItem.ID+"_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
if metricItem.IsDerivative {
|
||||
sumAggs[metricItem.ID+"_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
sumAggs["term_node"]= util.MapStr{
|
||||
sumAggs["term_node"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.node_id",
|
||||
"size": 1000,
|
||||
"size": 1000,
|
||||
},
|
||||
"aggs": aggs,
|
||||
}
|
||||
query["aggs"]= util.MapStr{
|
||||
query["aggs"] = util.MapStr{
|
||||
"group_by_level": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.cluster_uuid",
|
||||
|
@ -247,11 +246,11 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
},
|
||||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram":util.MapStr{
|
||||
"field": "timestamp",
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs":sumAggs,
|
||||
"aggs": sumAggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -279,12 +278,12 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
for _, line := range indexMetrics["cluster_indexing"].Lines {
|
||||
// remove first metric dot
|
||||
data := line.Data
|
||||
if v, ok := data.([][]interface{}); ok && len(v)> 0 {
|
||||
if v, ok := data.([][]interface{}); ok && len(v) > 0 {
|
||||
// remove first metric dot
|
||||
temp := v[1:]
|
||||
// // remove first last dot
|
||||
if len(temp) > 0 {
|
||||
temp = temp[0: len(temp)-1]
|
||||
temp = temp[0 : len(temp)-1]
|
||||
}
|
||||
data = temp
|
||||
}
|
||||
|
@ -293,12 +292,12 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
searchMetricData := util.MapStr{}
|
||||
for _, line := range indexMetrics["cluster_search"].Lines {
|
||||
data := line.Data
|
||||
if v, ok := data.([][]interface{}); ok && len(v)> 0 {
|
||||
if v, ok := data.([][]interface{}); ok && len(v) > 0 {
|
||||
// remove first metric dot
|
||||
temp := v[1:]
|
||||
// // remove first last dot
|
||||
if len(temp) > 0 {
|
||||
temp = temp[0: len(temp)-1]
|
||||
temp = temp[0 : len(temp)-1]
|
||||
}
|
||||
data = temp
|
||||
}
|
||||
|
@ -633,7 +632,6 @@ func (h *APIHandler) GetClusterNodes(w http.ResponseWriter, req *http.Request, p
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
if v, ok := nodeID.(string); ok {
|
||||
nodeInfos[v] = util.MapStr{
|
||||
"timestamp": hitM["timestamp"],
|
||||
|
@ -642,7 +640,7 @@ func (h *APIHandler) GetClusterNodes(w http.ResponseWriter, req *http.Request, p
|
|||
"load_1m": load,
|
||||
"heap.percent": heapUsage,
|
||||
"disk.avail": availDisk,
|
||||
"disk.used": usedDisk,
|
||||
"disk.used": usedDisk,
|
||||
"uptime": uptime,
|
||||
}
|
||||
|
||||
|
@ -865,14 +863,14 @@ type RealtimeNodeInfo struct {
|
|||
IndexQPS interface{} `json:"index_qps"`
|
||||
QueryQPS interface{} `json:"query_qps"`
|
||||
IndexBytesQPS interface{} `json:"index_bytes_qps"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
CatNodeResponse
|
||||
}
|
||||
|
||||
func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
|
||||
ver := h.Client().GetVersion()
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -891,18 +889,18 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map
|
|||
"aggs": util.MapStr{
|
||||
"date": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"term_shard": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.shard_id",
|
||||
"size": 1000,
|
||||
"size": 1000,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"filter_pri": util.MapStr{
|
||||
"filter": util.MapStr{ "term": util.MapStr{ "payload.elasticsearch.shard_stats.routing.primary": true } },
|
||||
"filter": util.MapStr{"term": util.MapStr{"payload.elasticsearch.shard_stats.routing.primary": true}},
|
||||
"aggs": util.MapStr{
|
||||
"index_total": util.MapStr{
|
||||
"max": util.MapStr{
|
||||
|
@ -994,8 +992,8 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map
|
|||
|
||||
func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
|
||||
ver := h.Client().GetVersion()
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1048,7 +1046,7 @@ func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName st
|
|||
"aggs": util.MapStr{
|
||||
"date": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
|
@ -1108,8 +1106,8 @@ func (h *APIHandler) getShardQPS(clusterID string, nodeUUID string, indexName st
|
|||
|
||||
func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
|
||||
ver := h.Client().GetVersion()
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1128,7 +1126,7 @@ func (h *APIHandler) getNodeQPS(clusterID string, bucketSizeInSeconds int) (map[
|
|||
"aggs": util.MapStr{
|
||||
"date": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
|
@ -1238,11 +1236,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
|
|||
{
|
||||
"match": util.MapStr{
|
||||
reqBody.SearchField: util.MapStr{
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1284,11 +1282,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
|
|||
{
|
||||
"match": util.MapStr{
|
||||
"search_text": util.MapStr{
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1357,7 +1355,7 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
|
|||
id := ps.ByName("id")
|
||||
collectionMode := GetMonitorState(id)
|
||||
ret := util.MapStr{
|
||||
"cluster_id": id,
|
||||
"cluster_id": id,
|
||||
"metric_collection_mode": collectionMode,
|
||||
}
|
||||
queryDSL := util.MapStr{
|
||||
|
@ -1382,7 +1380,7 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
|
|||
"grp_name": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.name",
|
||||
"size": 10,
|
||||
"size": 10,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"max_timestamp": util.MapStr{
|
||||
|
@ -1405,11 +1403,11 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
|
|||
key := bk["key"].(string)
|
||||
if tv, ok := bk["max_timestamp"].(map[string]interface{}); ok {
|
||||
if collectionMode == elastic.ModeAgentless {
|
||||
if util.StringInArray([]string{ "index_stats", "cluster_health", "cluster_stats", "node_stats"}, key) {
|
||||
if util.StringInArray([]string{"index_stats", "cluster_health", "cluster_stats", "node_stats"}, key) {
|
||||
ret[key] = getCollectionStats(tv["value"])
|
||||
}
|
||||
}else{
|
||||
if util.StringInArray([]string{ "shard_stats", "cluster_health", "cluster_stats", "node_stats"}, key) {
|
||||
} else {
|
||||
if util.StringInArray([]string{"shard_stats", "cluster_health", "cluster_stats", "node_stats"}, key) {
|
||||
ret[key] = getCollectionStats(tv["value"])
|
||||
}
|
||||
}
|
||||
|
@ -1422,13 +1420,13 @@ func (h *APIHandler) getClusterMonitorState(w http.ResponseWriter, req *http.Req
|
|||
func getCollectionStats(lastActiveAt interface{}) util.MapStr {
|
||||
stats := util.MapStr{
|
||||
"last_active_at": lastActiveAt,
|
||||
"status": "active",
|
||||
"status": "active",
|
||||
}
|
||||
if timestamp, ok := lastActiveAt.(float64); ok {
|
||||
t := time.Unix(int64(timestamp/1000), 0)
|
||||
if time.Now().Sub(t) > 5 * time.Minute {
|
||||
if time.Now().Sub(t) > 5*time.Minute {
|
||||
stats["status"] = "warning"
|
||||
}else{
|
||||
} else {
|
||||
stats["status"] = "ok"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
|
||||
func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -47,16 +47,16 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(errStr)
|
||||
h.WriteError(w, errStr, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var reqParams = struct{
|
||||
Index string `json:"index"`
|
||||
Body map[string]interface{} `json:"body"`
|
||||
var reqParams = struct {
|
||||
Index string `json:"index"`
|
||||
Body map[string]interface{} `json:"body"`
|
||||
DistinctByField map[string]interface{} `json:"distinct_by_field"`
|
||||
}{}
|
||||
|
||||
|
@ -101,12 +101,12 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
|
|||
if qm, ok := query.(map[string]interface{}); ok {
|
||||
|
||||
filter, _ := util.MapStr(qm).GetValue("bool.filter")
|
||||
if fv, ok := filter.([]interface{}); ok{
|
||||
if fv, ok := filter.([]interface{}); ok {
|
||||
fv = append(fv, util.MapStr{
|
||||
"script": util.MapStr{
|
||||
"script": util.MapStr{
|
||||
"source": "distinct_by_field",
|
||||
"lang": "infini",
|
||||
"lang": "infini",
|
||||
"params": reqParams.DistinctByField,
|
||||
},
|
||||
},
|
||||
|
@ -173,7 +173,7 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
|
|||
if timeout != "" {
|
||||
queryArgs = &[]util.KV{
|
||||
{
|
||||
Key: "timeout",
|
||||
Key: "timeout",
|
||||
Value: timeout,
|
||||
},
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
|
|||
}
|
||||
var cancel context.CancelFunc
|
||||
// here add one second for network delay
|
||||
ctx, cancel = context.WithTimeout(context.Background(), du + time.Second)
|
||||
ctx, cancel = context.WithTimeout(context.Background(), du+time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
|
@ -207,12 +207,10 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
|
|||
h.Write(w, searchRes.RawResult.Body)
|
||||
}
|
||||
|
||||
|
||||
func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -221,16 +219,16 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
h.WriteError(w, errStr, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var reqParams = struct{
|
||||
var reqParams = struct {
|
||||
BoolFilter interface{} `json:"boolFilter"`
|
||||
FieldName string `json:"field"`
|
||||
Query string `json:"query"`
|
||||
FieldName string `json:"field"`
|
||||
Query string `json:"query"`
|
||||
}{}
|
||||
err = h.DecodeJSON(req, &reqParams)
|
||||
if err != nil {
|
||||
|
@ -246,7 +244,7 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
|
|||
indices, hasAll := h.GetAllowedIndices(req, targetClusterID)
|
||||
if !hasAll {
|
||||
if len(indices) == 0 {
|
||||
h.WriteJSON(w, values,http.StatusOK)
|
||||
h.WriteJSON(w, values, http.StatusOK)
|
||||
return
|
||||
}
|
||||
boolQ["must"] = []util.MapStr{
|
||||
|
@ -265,15 +263,15 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
|
|||
"aggs": util.MapStr{
|
||||
"suggestions": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": reqParams.FieldName,
|
||||
"include": reqParams.Query + ".*",
|
||||
"field": reqParams.FieldName,
|
||||
"include": reqParams.Query + ".*",
|
||||
"execution_hint": "map",
|
||||
"shard_size": 10,
|
||||
"shard_size": 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var queryBodyBytes = util.MustToJSONBytes(queryBody)
|
||||
var queryBodyBytes = util.MustToJSONBytes(queryBody)
|
||||
|
||||
searchRes, err := client.SearchWithRawQueryDSL(indexName, queryBodyBytes)
|
||||
if err != nil {
|
||||
|
@ -285,7 +283,7 @@ func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *htt
|
|||
for _, bucket := range searchRes.Aggregations["suggestions"].Buckets {
|
||||
values = append(values, bucket["key"])
|
||||
}
|
||||
h.WriteJSON(w, values,http.StatusOK)
|
||||
h.WriteJSON(w, values, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
@ -293,7 +291,7 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.
|
|||
traceIndex := h.GetParameterOrDefault(req, "traceIndex", orm.GetIndexName(elastic.TraceMeta{}))
|
||||
traceField := h.GetParameterOrDefault(req, "traceField", "trace_id")
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -301,8 +299,8 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
h.WriteError(w, errStr, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
@ -340,4 +338,3 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.
|
|||
}
|
||||
h.WriteJSON(w, indexNames, http.StatusOK)
|
||||
}
|
||||
|
||||
|
|
|
@ -211,8 +211,7 @@ func (h *APIHandler) getDiscoverHosts(w http.ResponseWriter, req *http.Request,
|
|||
|
||||
func getHostSummary(agentIDs []string, metricName string, summary map[string]util.MapStr) error {
|
||||
if summary == nil {
|
||||
summary = map[string]util.MapStr{
|
||||
}
|
||||
summary = map[string]util.MapStr{}
|
||||
}
|
||||
|
||||
if len(agentIDs) == 0 {
|
||||
|
@ -506,8 +505,7 @@ func (h *APIHandler) FetchHostInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
for key, item := range hostMetrics {
|
||||
for _, line := range item.Lines {
|
||||
if _, ok := networkMetrics[line.Metric.Label]; !ok {
|
||||
networkMetrics[line.Metric.Label] = util.MapStr{
|
||||
}
|
||||
networkMetrics[line.Metric.Label] = util.MapStr{}
|
||||
}
|
||||
networkMetrics[line.Metric.Label][key] = line.Data
|
||||
}
|
||||
|
@ -682,20 +680,20 @@ func (h *APIHandler) getSingleHostMetricFromNode(ctx context.Context, nodeID str
|
|||
}
|
||||
|
||||
const (
|
||||
OSCPUUsedPercentMetricKey = "cpu_used_percent"
|
||||
MemoryUsedPercentMetricKey = "memory_used_percent"
|
||||
DiskUsedPercentMetricKey = "disk_used_percent"
|
||||
SystemLoadMetricKey = "system_load"
|
||||
CPUIowaitMetricKey = "cpu_iowait"
|
||||
SwapMemoryUsedPercentMetricKey= "swap_memory_used_percent"
|
||||
NetworkSummaryMetricKey = "network_summary"
|
||||
NetworkPacketsSummaryMetricKey = "network_packets_summary"
|
||||
DiskReadRateMetricKey = "disk_read_rate"
|
||||
DiskWriteRateMetricKey = "disk_write_rate"
|
||||
DiskPartitionUsageMetricKey = "disk_partition_usage"
|
||||
OSCPUUsedPercentMetricKey = "cpu_used_percent"
|
||||
MemoryUsedPercentMetricKey = "memory_used_percent"
|
||||
DiskUsedPercentMetricKey = "disk_used_percent"
|
||||
SystemLoadMetricKey = "system_load"
|
||||
CPUIowaitMetricKey = "cpu_iowait"
|
||||
SwapMemoryUsedPercentMetricKey = "swap_memory_used_percent"
|
||||
NetworkSummaryMetricKey = "network_summary"
|
||||
NetworkPacketsSummaryMetricKey = "network_packets_summary"
|
||||
DiskReadRateMetricKey = "disk_read_rate"
|
||||
DiskWriteRateMetricKey = "disk_write_rate"
|
||||
DiskPartitionUsageMetricKey = "disk_partition_usage"
|
||||
NetworkInterfaceOutputRateMetricKey = "network_interface_output_rate"
|
||||
|
||||
)
|
||||
|
||||
func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
hostID := ps.MustGetParameter("host_id")
|
||||
hostInfo := &host.HostInfo{}
|
||||
|
@ -798,7 +796,7 @@ func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Reque
|
|||
metricItem.AddLine("Disk Write Rate", "Disk Write Rate", "network write rate of host.", "group1", "payload.host.diskio_summary.write.bytes", "max", bucketSizeStr, "%", "bytes", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
case DiskPartitionUsageMetricKey, NetworkInterfaceOutputRateMetricKey:
|
||||
resBody["metrics"] , err = h.getGroupHostMetrics(ctx, hostInfo.AgentID, min, max, bucketSize, key)
|
||||
resBody["metrics"], err = h.getGroupHostMetrics(ctx, hostInfo.AgentID, min, max, bucketSize, key)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w, err, http.StatusInternalServerError)
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
clusterID := ps.MustGetParameter("id")
|
||||
esClient := elastic.GetClient(clusterID)
|
||||
policies, err := esClient.GetILMPolicy("")
|
||||
|
@ -47,7 +47,7 @@ func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.R
|
|||
h.WriteJSON(w, policies, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
clusterID := ps.MustGetParameter("id")
|
||||
policy := ps.MustGetParameter("policy")
|
||||
esClient := elastic.GetClient(clusterID)
|
||||
|
@ -66,7 +66,7 @@ func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.
|
|||
h.WriteAckOKJSON(w)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
clusterID := ps.MustGetParameter("id")
|
||||
policy := ps.MustGetParameter("policy")
|
||||
esClient := elastic.GetClient(clusterID)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -46,41 +46,41 @@ import (
|
|||
)
|
||||
|
||||
func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody:=util.MapStr{}
|
||||
reqBody := struct{
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
SearchField string `json:"search_field"`
|
||||
resBody := util.MapStr{}
|
||||
reqBody := struct {
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
SearchField string `json:"search_field"`
|
||||
}{}
|
||||
err := h.DecodeJSON(req, &reqBody)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
|
||||
aggs["term_cluster_id"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.cluster_id",
|
||||
"size": 1000,
|
||||
"size": 1000,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"term_cluster_name": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.cluster_name",
|
||||
"size": 1,
|
||||
"size": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
filter := elastic.BuildSearchTermFilter(reqBody.Filter)
|
||||
var should []util.MapStr
|
||||
if reqBody.SearchField != ""{
|
||||
if reqBody.SearchField != "" {
|
||||
should = []util.MapStr{
|
||||
{
|
||||
"prefix": util.MapStr{
|
||||
|
@ -103,8 +103,8 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
|
|||
},
|
||||
},
|
||||
}
|
||||
}else{
|
||||
if reqBody.Keyword != ""{
|
||||
} else {
|
||||
if reqBody.Keyword != "" {
|
||||
should = []util.MapStr{
|
||||
{
|
||||
"prefix": util.MapStr{
|
||||
|
@ -149,15 +149,13 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
|
|||
}
|
||||
}
|
||||
|
||||
must := []interface{}{
|
||||
}
|
||||
if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri {
|
||||
if indexFilter != nil{
|
||||
must := []interface{}{}
|
||||
if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri {
|
||||
if indexFilter != nil {
|
||||
must = append(must, indexFilter)
|
||||
}
|
||||
}else{
|
||||
h.WriteJSON(w, elastic.SearchResponse{
|
||||
}, http.StatusOK)
|
||||
} else {
|
||||
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
|
||||
return
|
||||
}
|
||||
boolQuery := util.MapStr{
|
||||
|
@ -169,7 +167,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
|
|||
},
|
||||
},
|
||||
"filter": filter,
|
||||
"must": must,
|
||||
"must": must,
|
||||
}
|
||||
if len(should) > 0 {
|
||||
boolQuery["should"] = should
|
||||
|
@ -178,7 +176,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
|
|||
query := util.MapStr{
|
||||
"aggs": aggs,
|
||||
"size": reqBody.Size,
|
||||
"from": reqBody.From,
|
||||
"from": reqBody.From,
|
||||
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
|
||||
"query": util.MapStr{
|
||||
"bool": boolQuery,
|
||||
|
@ -192,7 +190,7 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
|
|||
},
|
||||
}
|
||||
if len(reqBody.Sort) > 1 {
|
||||
query["sort"] = []util.MapStr{
|
||||
query["sort"] = []util.MapStr{
|
||||
{
|
||||
reqBody.Sort[0]: util.MapStr{
|
||||
"order": reqBody.Sort[1],
|
||||
|
@ -204,14 +202,14 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
|
|||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.IndexConfig{}), dsl)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(util.MustToJSONBytes(response))
|
||||
|
||||
}
|
||||
|
||||
func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool){
|
||||
func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool) {
|
||||
hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req)
|
||||
if !hasAllPrivilege && len(indexPrivilege) == 0 {
|
||||
return nil, false
|
||||
|
@ -221,10 +219,10 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
|
|||
for clusterID, indices := range indexPrivilege {
|
||||
var (
|
||||
wildcardIndices []string
|
||||
normalIndices []string
|
||||
normalIndices []string
|
||||
)
|
||||
for _, index := range indices {
|
||||
if strings.Contains(index,"*") {
|
||||
if strings.Contains(index, "*") {
|
||||
wildcardIndices = append(wildcardIndices, index)
|
||||
continue
|
||||
}
|
||||
|
@ -234,8 +232,8 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
|
|||
if len(wildcardIndices) > 0 {
|
||||
subShould = append(subShould, util.MapStr{
|
||||
"query_string": util.MapStr{
|
||||
"query": strings.Join(wildcardIndices, " "),
|
||||
"fields": []string{"metadata.index_name"},
|
||||
"query": strings.Join(wildcardIndices, " "),
|
||||
"fields": []string{"metadata.index_name"},
|
||||
"default_operator": "OR",
|
||||
},
|
||||
})
|
||||
|
@ -260,7 +258,7 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
|
|||
{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"should": subShould,
|
||||
"should": subShould,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -270,14 +268,14 @@ func (h *APIHandler) getAllowedIndexFilter(req *http.Request) (util.MapStr, bool
|
|||
indexFilter := util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"should": indexShould,
|
||||
"should": indexShould,
|
||||
},
|
||||
}
|
||||
return indexFilter, true
|
||||
}
|
||||
return nil, true
|
||||
}
|
||||
func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
var indexIDs []interface{}
|
||||
h.DecodeJSON(req, &indexIDs)
|
||||
|
||||
|
@ -288,8 +286,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
indexIDs = indexIDs[0:1]
|
||||
// map indexIDs(cluster_id:index_name => cluster_uuid:indexName)
|
||||
var (
|
||||
indexIDM = map[string]string{}
|
||||
newIndexIDs []interface{}
|
||||
indexIDM = map[string]string{}
|
||||
newIndexIDs []interface{}
|
||||
clusterIndexNames = map[string][]string{}
|
||||
)
|
||||
indexID := indexIDs[0]
|
||||
|
@ -318,12 +316,12 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
return
|
||||
}
|
||||
clusterIndexNames[firstClusterID] = append(clusterIndexNames[firstClusterID], firstIndexName)
|
||||
}else{
|
||||
} else {
|
||||
h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
for clusterID, indexNames := range clusterIndexNames {
|
||||
clusterUUID, err := adapter.GetClusterUUID(clusterID)
|
||||
clusterUUID, err := adapter.GetClusterUUID(clusterID)
|
||||
if err != nil {
|
||||
log.Warnf("get cluster uuid error: %v", err)
|
||||
continue
|
||||
|
@ -382,7 +380,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
}
|
||||
if primary == true {
|
||||
indexInfo.Shards++
|
||||
}else{
|
||||
} else {
|
||||
indexInfo.Replicas++
|
||||
}
|
||||
indexInfo.Timestamp = hitM["timestamp"]
|
||||
|
@ -403,36 +401,36 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
}
|
||||
var metricLen = 15
|
||||
// 索引速率
|
||||
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.OnlyPrimary = true
|
||||
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems := []GroupMetricItem{}
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.shard_stats.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.shard_stats.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
})
|
||||
queryMetric:=newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.shard_stats.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
queryMetric := newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.shard_stats.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
})
|
||||
|
||||
aggs:=map[string]interface{}{}
|
||||
query :=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
aggs := map[string]interface{}{}
|
||||
query := map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
"metadata.category": util.MapStr{
|
||||
|
@ -462,7 +460,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -471,18 +469,18 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
}
|
||||
|
||||
sumAggs := util.MapStr{}
|
||||
for _,metricItem:=range nodeMetricItems{
|
||||
for _, metricItem := range nodeMetricItems {
|
||||
leafAgg := util.MapStr{
|
||||
"max":util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field,
|
||||
},
|
||||
}
|
||||
var sumBucketPath = "term_shard>"+ metricItem.ID
|
||||
var sumBucketPath = "term_shard>" + metricItem.ID
|
||||
if metricItem.MetricItem.OnlyPrimary {
|
||||
filterSubAggs := util.MapStr{
|
||||
metricItem.ID: leafAgg,
|
||||
}
|
||||
aggs["filter_pri"]=util.MapStr{
|
||||
aggs["filter_pri"] = util.MapStr{
|
||||
"filter": util.MapStr{
|
||||
"term": util.MapStr{
|
||||
"payload.elasticsearch.shard_stats.routing.primary": util.MapStr{
|
||||
|
@ -492,8 +490,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
},
|
||||
"aggs": filterSubAggs,
|
||||
}
|
||||
sumBucketPath = "term_shard>filter_pri>"+ metricItem.ID
|
||||
}else{
|
||||
sumBucketPath = "term_shard>filter_pri>" + metricItem.ID
|
||||
} else {
|
||||
aggs[metricItem.ID] = leafAgg
|
||||
}
|
||||
|
||||
|
@ -502,18 +500,18 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
"buckets_path": sumBucketPath,
|
||||
},
|
||||
}
|
||||
if metricItem.IsDerivative{
|
||||
sumAggs[metricItem.ID+"_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
if metricItem.IsDerivative {
|
||||
sumAggs[metricItem.ID+"_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
sumAggs["term_shard"]= util.MapStr{
|
||||
sumAggs["term_shard"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.shard_id",
|
||||
"size": 10000,
|
||||
"size": 10000,
|
||||
},
|
||||
"aggs": aggs,
|
||||
}
|
||||
|
@ -523,8 +521,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
query["size"]=0
|
||||
query["aggs"]= util.MapStr{
|
||||
query["size"] = 0
|
||||
query["aggs"] = util.MapStr{
|
||||
"group_by_level": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.index_id",
|
||||
|
@ -532,11 +530,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
},
|
||||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram":util.MapStr{
|
||||
"field": "timestamp",
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs":sumAggs,
|
||||
"aggs": sumAggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -549,9 +547,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
|
|||
indexMetrics := map[string]util.MapStr{}
|
||||
for key, item := range metrics {
|
||||
for _, line := range item.Lines {
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok{
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{
|
||||
}
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok {
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{}
|
||||
}
|
||||
indexMetrics[line.Metric.Label][key] = line.Data
|
||||
}
|
||||
|
@ -601,11 +598,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
indexID := ps.MustGetParameter("index")
|
||||
parts := strings.Split(indexID, ":")
|
||||
if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) {
|
||||
h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
h.WriteError(w, "invalid index id: "+ indexID, http.StatusInternalServerError)
|
||||
h.WriteError(w, "invalid index id: "+indexID, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -635,7 +632,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
return
|
||||
}
|
||||
q1 := orm.Query{
|
||||
Size: 1000,
|
||||
Size: 1000,
|
||||
WildcardIndex: true,
|
||||
}
|
||||
q1.Conds = orm.And(
|
||||
|
@ -651,9 +648,9 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
summary := util.MapStr{}
|
||||
hit := response.Hits.Hits[0].Source
|
||||
var (
|
||||
shardsNum int
|
||||
shardsNum int
|
||||
replicasNum int
|
||||
indexInfo = util.MapStr{
|
||||
indexInfo = util.MapStr{
|
||||
"index": parts[1],
|
||||
}
|
||||
)
|
||||
|
@ -683,7 +680,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
storeInBytes, _ := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "store", "size_in_bytes"}, resultM)
|
||||
if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "shard_stats", "docs", "count"}, resultM); ok {
|
||||
//summary["docs"] = docs
|
||||
if v, ok := docs.(float64); ok && primary == true{
|
||||
if v, ok := docs.(float64); ok && primary == true {
|
||||
shardSum.DocsCount += int64(v)
|
||||
}
|
||||
}
|
||||
|
@ -695,7 +692,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
}
|
||||
if primary == true {
|
||||
shardSum.Shards++
|
||||
}else{
|
||||
} else {
|
||||
shardSum.Replicas++
|
||||
}
|
||||
}
|
||||
|
@ -706,7 +703,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
indexInfo["store_size"] = util.FormatBytes(float64(shardSum.StoreInBytes), 1)
|
||||
indexInfo["shards"] = shardSum.Shards + shardSum.Replicas
|
||||
|
||||
summary["unassigned_shards"] = (replicasNum + 1) * shardsNum - shardSum.Shards - shardSum.Replicas
|
||||
summary["unassigned_shards"] = (replicasNum+1)*shardsNum - shardSum.Shards - shardSum.Replicas
|
||||
}
|
||||
summary["index_info"] = indexInfo
|
||||
|
||||
|
@ -721,7 +718,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
|
|||
}
|
||||
indexName := ps.MustGetParameter("index")
|
||||
q1 := orm.Query{
|
||||
Size: 1000,
|
||||
Size: 1000,
|
||||
WildcardIndex: true,
|
||||
}
|
||||
clusterUUID, err := adapter.GetClusterUUID(clusterID)
|
||||
|
@ -742,7 +739,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result := orm.Search(&event.Event{}, &q1)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w,err.Error(), http.StatusInternalServerError )
|
||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var shards = []interface{}{}
|
||||
|
@ -756,7 +753,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
|
|||
err, nodesResult := orm.Search(elastic.NodeConfig{}, q)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w,err.Error(), http.StatusInternalServerError )
|
||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
nodeIDToName := util.MapStr{}
|
||||
|
@ -803,7 +800,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
|
|||
primary, _ := shardM.GetValue("routing.primary")
|
||||
if primary == true {
|
||||
shardInfo["prirep"] = "p"
|
||||
}else{
|
||||
} else {
|
||||
shardInfo["prirep"] = "r"
|
||||
}
|
||||
shardInfo["state"], _ = shardM.GetValue("routing.state")
|
||||
|
@ -880,11 +877,11 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
var metricType string
|
||||
if metricKey == v1.IndexHealthMetricKey {
|
||||
metricType = v1.MetricTypeClusterHealth
|
||||
}else{
|
||||
} else {
|
||||
//for agent mode
|
||||
metricType = v1.MetricTypeNodeStats
|
||||
}
|
||||
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, metricType,60)
|
||||
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, metricType, 60)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err
|
||||
|
@ -892,7 +889,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
return
|
||||
}
|
||||
if bucketSize <= 60 {
|
||||
min = min - int64(2 * bucketSize * 1000)
|
||||
min = min - int64(2*bucketSize*1000)
|
||||
}
|
||||
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
|
||||
du, err := time.ParseDuration(timeout)
|
||||
|
@ -947,14 +944,14 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
return
|
||||
}
|
||||
metrics["shard_state"] = shardStateMetric
|
||||
}else if metricKey == v1.IndexHealthMetricKey {
|
||||
healthMetric, err := h.GetIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
metrics["index_health"] = healthMetric
|
||||
} else if metricKey == v1.IndexHealthMetricKey {
|
||||
healthMetric, err := h.GetIndexHealthMetric(ctx, clusterID, indexName, min, max, bucketSize)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w, err, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
metrics["index_health"] = healthMetric
|
||||
} else {
|
||||
switch metricKey {
|
||||
case v1.IndexThroughputMetricKey:
|
||||
|
@ -1037,7 +1034,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, metricType)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[metricKey].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -1047,8 +1044,8 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int) (*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1101,14 +1098,14 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str
|
|||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"groups": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "payload.elasticsearch.shard_stats.routing.state",
|
||||
"size": 10,
|
||||
"size": 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1122,8 +1119,8 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str
|
|||
return nil, err
|
||||
}
|
||||
|
||||
metricItem:=newMetricItem("shard_state", 0, "")
|
||||
metricItem.AddLine("Shard State","Shard State","","group1","payload.elasticsearch.shard_stats.routing.state","max",bucketSizeStr,"","ratio","0.[00]","0.[00]",false,false)
|
||||
metricItem := newMetricItem("shard_state", 0, "")
|
||||
metricItem.AddLine("Shard State", "Shard State", "", "group1", "payload.elasticsearch.shard_stats.routing.state", "max", bucketSizeStr, "", "ratio", "0.[00]", "0.[00]", false, false)
|
||||
|
||||
metricData := []interface{}{}
|
||||
if response.StatusCode == 200 {
|
||||
|
@ -1140,7 +1137,7 @@ func (h *APIHandler) getIndexShardsMetric(ctx context.Context, id, indexName str
|
|||
}
|
||||
|
||||
func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string] interface{}{}
|
||||
resBody := map[string]interface{}{}
|
||||
id := ps.ByName("id")
|
||||
indexName := ps.ByName("index")
|
||||
if !h.IsIndexAllowed(req, id, indexName) {
|
||||
|
@ -1149,7 +1146,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
}, http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
q := &orm.Query{ Size: 1}
|
||||
q := &orm.Query{Size: 1}
|
||||
q.AddSort("timestamp", orm.DESC)
|
||||
q.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -1161,13 +1158,13 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result := orm.Search(event.Event{}, q)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
namesM := util.MapStr{}
|
||||
if len(result.Result) > 0 {
|
||||
if data, ok := result.Result[0].(map[string]interface{}); ok {
|
||||
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists {
|
||||
if table, ok := routingTable.(map[string]interface{}); ok{
|
||||
if table, ok := routingTable.(map[string]interface{}); ok {
|
||||
if shardsM, ok := table["shards"].(map[string]interface{}); ok {
|
||||
for _, rows := range shardsM {
|
||||
if rowsArr, ok := rows.([]interface{}); ok {
|
||||
|
@ -1189,12 +1186,12 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
}
|
||||
|
||||
//node uuid
|
||||
nodeIds := make([]interface{}, 0, len(namesM) )
|
||||
nodeIds := make([]interface{}, 0, len(namesM))
|
||||
for name, _ := range namesM {
|
||||
nodeIds = append(nodeIds, name)
|
||||
}
|
||||
|
||||
q1 := &orm.Query{ Size: 100}
|
||||
q1 := &orm.Query{Size: 100}
|
||||
q1.AddSort("timestamp", orm.DESC)
|
||||
q1.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -1204,7 +1201,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result = orm.Search(elastic.NodeConfig{}, q1)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
nodes := []interface{}{}
|
||||
for _, hit := range result.Result {
|
||||
|
@ -1224,11 +1221,11 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
|
||||
if v, ok := nodeId.(string); ok {
|
||||
ninfo := util.MapStr{
|
||||
"id": v,
|
||||
"name": nodeName,
|
||||
"ip": ip,
|
||||
"port": port,
|
||||
"status": status,
|
||||
"id": v,
|
||||
"name": nodeName,
|
||||
"ip": ip,
|
||||
"port": port,
|
||||
"status": status,
|
||||
"timestamp": hitM["timestamp"],
|
||||
}
|
||||
nodes = append(nodes, ninfo)
|
||||
|
@ -1249,7 +1246,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
|
|||
}
|
||||
var must = []util.MapStr{}
|
||||
|
||||
if !util.StringInArray(ids, "*"){
|
||||
if !util.StringInArray(ids, "*") {
|
||||
|
||||
must = append(must, util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
|
@ -1260,9 +1257,8 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
|
|||
|
||||
if keyword != "" {
|
||||
must = append(must, util.MapStr{
|
||||
"wildcard":util.MapStr{
|
||||
"metadata.index_name":
|
||||
util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
|
||||
"wildcard": util.MapStr{
|
||||
"metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -1288,7 +1284,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
|
|||
},
|
||||
}
|
||||
|
||||
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
indexName := orm.GetIndexName(elastic.IndexConfig{})
|
||||
resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl))
|
||||
|
@ -1310,22 +1305,22 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
|
|||
return
|
||||
}
|
||||
|
||||
//deleteIndexMetadata used to delete index metadata after index is deleted from cluster
|
||||
// deleteIndexMetadata used to delete index metadata after index is deleted from cluster
|
||||
func (h APIHandler) deleteIndexMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
indexName := orm.GetIndexName(elastic.IndexConfig{})
|
||||
must := []util.MapStr{
|
||||
must := []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
"metadata.labels.state": "delete",
|
||||
},
|
||||
},
|
||||
}
|
||||
if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri {
|
||||
if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri {
|
||||
if indexFilter != nil {
|
||||
must = append(must, indexFilter)
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
//has no any index permission, just return
|
||||
h.WriteAckOKJSON(w)
|
||||
return
|
||||
|
|
|
@ -27,6 +27,13 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/cihub/seelog"
|
||||
"infini.sh/console/core"
|
||||
v1 "infini.sh/console/modules/elastic/api/v1"
|
||||
|
@ -39,12 +46,6 @@ import (
|
|||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/core/util"
|
||||
"infini.sh/framework/modules/elastic/common"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type APIHandler struct {
|
||||
|
@ -534,13 +535,13 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
|
|||
key := h.GetParameter(req, "key")
|
||||
var metricType string
|
||||
switch key {
|
||||
case v1.IndexThroughputMetricKey, v1.SearchThroughputMetricKey, v1.IndexLatencyMetricKey, v1.SearchLatencyMetricKey, CircuitBreakerMetricKey,ShardStateMetricKey:
|
||||
case v1.IndexThroughputMetricKey, v1.SearchThroughputMetricKey, v1.IndexLatencyMetricKey, v1.SearchLatencyMetricKey, CircuitBreakerMetricKey, ShardStateMetricKey:
|
||||
metricType = v1.MetricTypeNodeStats
|
||||
case ClusterDocumentsMetricKey,
|
||||
ClusterStorageMetricKey,
|
||||
ClusterIndicesMetricKey,
|
||||
ClusterNodeCountMetricKey:
|
||||
metricType = v1.MetricTypeClusterStats
|
||||
metricType = v1.MetricTypeClusterStats
|
||||
case ClusterHealthMetricKey:
|
||||
metricType = v1.MetricTypeClusterStats
|
||||
case ShardCountMetricKey:
|
||||
|
@ -649,7 +650,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
|
|||
minBucketSize, err := v1.GetMetricMinBucketSize(id, metricType)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[key].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -700,7 +701,7 @@ func (h *APIHandler) HandleNodeMetricsAction(w http.ResponseWriter, req *http.Re
|
|||
minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[key].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -817,7 +818,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
|
|||
}
|
||||
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, shardID, key)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -830,7 +831,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
|
|||
minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[key].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -888,7 +889,7 @@ func (h *APIHandler) HandleQueueMetricsAction(w http.ResponseWriter, req *http.R
|
|||
minBucketSize, err := v1.GetMetricMinBucketSize(id, v1.MetricTypeNodeStats)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[key].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -1015,20 +1016,20 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
ClusterStorageMetricKey = "cluster_storage"
|
||||
ClusterStorageMetricKey = "cluster_storage"
|
||||
ClusterDocumentsMetricKey = "cluster_documents"
|
||||
ClusterIndicesMetricKey = "cluster_indices"
|
||||
ClusterIndicesMetricKey = "cluster_indices"
|
||||
ClusterNodeCountMetricKey = "node_count"
|
||||
ClusterHealthMetricKey = "cluster_health"
|
||||
ShardCountMetricKey = "shard_count"
|
||||
CircuitBreakerMetricKey = "circuit_breaker"
|
||||
ClusterHealthMetricKey = "cluster_health"
|
||||
ShardCountMetricKey = "shard_count"
|
||||
CircuitBreakerMetricKey = "circuit_breaker"
|
||||
)
|
||||
|
||||
func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
|
||||
|
||||
var (
|
||||
clusterMetricsResult = map[string]*common.MetricItem {}
|
||||
err error
|
||||
clusterMetricsResult = map[string]*common.MetricItem{}
|
||||
err error
|
||||
)
|
||||
switch metricKey {
|
||||
case ClusterDocumentsMetricKey,
|
||||
|
@ -1343,7 +1344,7 @@ func (h *APIHandler) getCircuitBreakerMetric(ctx context.Context, id string, min
|
|||
"query": util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"should": should,
|
||||
"should": should,
|
||||
"must": []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
|
|
|
@ -112,7 +112,7 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
|
|||
func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
queryDSL := util.MustToJSONBytes(query)
|
||||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(),nil, queryDSL)
|
||||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -205,12 +205,12 @@ func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{
|
|||
dataKey = dataKey + "_deriv"
|
||||
}
|
||||
line.Data = grpMetricData[dataKey][line.Metric.Label]
|
||||
if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 {
|
||||
if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 {
|
||||
// remove first metric dot
|
||||
temp := v[1:]
|
||||
// // remove first last dot
|
||||
if len(temp) > 0 {
|
||||
temp = temp[0: len(temp)-1]
|
||||
temp = temp[0 : len(temp)-1]
|
||||
}
|
||||
line.Data = temp
|
||||
}
|
||||
|
@ -369,9 +369,9 @@ func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common
|
|||
},
|
||||
}
|
||||
queryDSL := util.MustToJSONBytes(query)
|
||||
response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
|
||||
response, err := elastic.GetClient(clusterID).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var minDate, maxDate int64
|
||||
|
@ -429,12 +429,12 @@ func (h *APIHandler) getSingleMetrics(ctx context.Context, metricItems []*common
|
|||
for _, line := range metricItem.Lines {
|
||||
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
|
||||
line.Data = metricData[line.Metric.GetDataKey()]
|
||||
if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 {
|
||||
if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 {
|
||||
// remove first metric dot
|
||||
temp := v[1:]
|
||||
// // remove first last dot
|
||||
if len(temp) > 0 {
|
||||
temp = temp[0: len(temp)-1]
|
||||
temp = temp[0 : len(temp)-1]
|
||||
}
|
||||
line.Data = temp
|
||||
}
|
||||
|
@ -912,13 +912,13 @@ func parseGroupMetricData(buckets []elastic.BucketBase, isPercent bool) ([]inter
|
|||
if bkMap, ok := statusBk.(map[string]interface{}); ok {
|
||||
statusKey := bkMap["key"].(string)
|
||||
count := bkMap["doc_count"].(float64)
|
||||
if isPercent{
|
||||
if isPercent {
|
||||
metricData = append(metricData, map[string]interface{}{
|
||||
"x": dateTime,
|
||||
"y": count / totalCount * 100,
|
||||
"g": statusKey,
|
||||
})
|
||||
}else{
|
||||
} else {
|
||||
metricData = append(metricData, map[string]interface{}{
|
||||
"x": dateTime,
|
||||
"y": count,
|
||||
|
@ -950,12 +950,12 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
|
|||
"field": line.Metric.Field,
|
||||
},
|
||||
}
|
||||
var sumBucketPath = "term_node>"+ line.Metric.ID
|
||||
var sumBucketPath = "term_node>" + line.Metric.ID
|
||||
aggs[line.Metric.ID] = leafAgg
|
||||
|
||||
sumAggs[line.Metric.ID] = util.MapStr{
|
||||
"sum_bucket": util.MapStr{
|
||||
"buckets_path": sumBucketPath,
|
||||
"buckets_path": sumBucketPath,
|
||||
},
|
||||
}
|
||||
if line.Metric.Field2 != "" {
|
||||
|
@ -966,9 +966,9 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
|
|||
}
|
||||
|
||||
aggs[line.Metric.ID+"_field2"] = leafAgg2
|
||||
sumAggs[line.Metric.ID + "_field2"] = util.MapStr{
|
||||
sumAggs[line.Metric.ID+"_field2"] = util.MapStr{
|
||||
"sum_bucket": util.MapStr{
|
||||
"buckets_path": sumBucketPath+"_field2",
|
||||
"buckets_path": sumBucketPath + "_field2",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -991,10 +991,10 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
|
|||
}
|
||||
}
|
||||
|
||||
sumAggs["term_node"]= util.MapStr{
|
||||
sumAggs["term_node"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.node_id",
|
||||
"size": 1000,
|
||||
"size": 1000,
|
||||
},
|
||||
"aggs": aggs,
|
||||
}
|
||||
|
@ -1015,7 +1015,7 @@ func (h *APIHandler) getSingleIndexMetricsByNodeStats(ctx context.Context, metri
|
|||
"aggs": sumAggs,
|
||||
},
|
||||
}
|
||||
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
|
||||
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize, metricData, metricItemsMap)
|
||||
}
|
||||
|
||||
func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) (map[string]*common.MetricItem, error) {
|
||||
|
@ -1035,11 +1035,11 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
|
|||
"field": line.Metric.Field,
|
||||
},
|
||||
}
|
||||
var sumBucketPath = "term_shard>"+ line.Metric.ID
|
||||
var sumBucketPath = "term_shard>" + line.Metric.ID
|
||||
aggs[line.Metric.ID] = leafAgg
|
||||
sumAggs[line.Metric.ID] = util.MapStr{
|
||||
"sum_bucket": util.MapStr{
|
||||
"buckets_path": sumBucketPath,
|
||||
"buckets_path": sumBucketPath,
|
||||
},
|
||||
}
|
||||
if line.Metric.Field2 != "" {
|
||||
|
@ -1050,9 +1050,9 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
|
|||
}
|
||||
aggs[line.Metric.ID+"_field2"] = leafAgg2
|
||||
|
||||
sumAggs[line.Metric.ID + "_field2"] = util.MapStr{
|
||||
sumAggs[line.Metric.ID+"_field2"] = util.MapStr{
|
||||
"sum_bucket": util.MapStr{
|
||||
"buckets_path": sumBucketPath+"_field2",
|
||||
"buckets_path": sumBucketPath + "_field2",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -1075,10 +1075,10 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
|
|||
}
|
||||
}
|
||||
|
||||
sumAggs["term_shard"]= util.MapStr{
|
||||
sumAggs["term_shard"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.shard_id",
|
||||
"size": 100000,
|
||||
"size": 100000,
|
||||
},
|
||||
"aggs": aggs,
|
||||
}
|
||||
|
@ -1092,7 +1092,7 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
|
|||
if len(metricItems) > 0 && len(metricItems[0].Lines) > 0 && metricItems[0].Lines[0].Metric.OnlyPrimary {
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
query["query"].(util.MapStr),
|
||||
{"term": util.MapStr{"payload.elasticsearch.shard_stats.routing.primary": true}},
|
||||
},
|
||||
|
@ -1109,7 +1109,7 @@ func (h *APIHandler) getSingleIndexMetrics(ctx context.Context, metricItems []*c
|
|||
"aggs": sumAggs,
|
||||
},
|
||||
}
|
||||
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize,metricData, metricItemsMap)
|
||||
return parseSingleIndexMetrics(ctx, clusterID, metricItems, query, bucketSize, metricData, metricItemsMap)
|
||||
}
|
||||
|
||||
func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int, metricData map[string][][]interface{}, metricItemsMap map[string]*common.MetricLine) (map[string]*common.MetricItem, error) {
|
||||
|
@ -1174,12 +1174,12 @@ func parseSingleIndexMetrics(ctx context.Context, clusterID string, metricItems
|
|||
for _, line := range metricItem.Lines {
|
||||
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
|
||||
line.Data = metricData[line.Metric.GetDataKey()]
|
||||
if v, ok := line.Data.([][]interface{}); ok && len(v)> 0 && bucketSize <= 60 {
|
||||
if v, ok := line.Data.([][]interface{}); ok && len(v) > 0 && bucketSize <= 60 {
|
||||
// remove first metric dot
|
||||
temp := v[1:]
|
||||
// // remove first last dot
|
||||
if len(temp) > 0 {
|
||||
temp = temp[0: len(temp)-1]
|
||||
temp = temp[0 : len(temp)-1]
|
||||
}
|
||||
line.Data = temp
|
||||
}
|
||||
|
|
|
@ -33,83 +33,81 @@ import (
|
|||
)
|
||||
|
||||
func TestGetMetricParams(t *testing.T) {
|
||||
handler:=APIHandler{}
|
||||
req, err :=http.NewRequest("GET","https://infinilabs.com/api/?bucket_size=1m",nil)
|
||||
handler := APIHandler{}
|
||||
req, err := http.NewRequest("GET", "https://infinilabs.com/api/?bucket_size=1m", nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bucketSize, min, max, err:=handler.GetMetricRangeAndBucketSize(req,"", "",15)
|
||||
bucketSize, min, max, err := handler.GetMetricRangeAndBucketSize(req, "", "", 15)
|
||||
|
||||
fmt.Println(bucketSize)
|
||||
fmt.Println(util.FormatUnixTimestamp(min/1000))//2022-01-27 15:28:57
|
||||
fmt.Println(util.FormatUnixTimestamp(max/1000))//2022-01-27 15:28:57
|
||||
fmt.Println(time.Now())//2022-01-27 15:28:57
|
||||
fmt.Println(util.FormatUnixTimestamp(min / 1000)) //2022-01-27 15:28:57
|
||||
fmt.Println(util.FormatUnixTimestamp(max / 1000)) //2022-01-27 15:28:57
|
||||
fmt.Println(time.Now()) //2022-01-27 15:28:57
|
||||
|
||||
fmt.Println(bucketSize, min, max, err)
|
||||
}
|
||||
|
||||
func TestConvertBucketItemsToAggQueryParams(t *testing.T) {
|
||||
bucketItem:=common.BucketItem{}
|
||||
bucketItem.Key="key1"
|
||||
bucketItem.Type=common.TermsBucket
|
||||
bucketItem.Parameters=map[string]interface{}{}
|
||||
bucketItem.Parameters["field"]="metadata.labels.cluster_id"
|
||||
bucketItem.Parameters["size"]=2
|
||||
bucketItem := common.BucketItem{}
|
||||
bucketItem.Key = "key1"
|
||||
bucketItem.Type = common.TermsBucket
|
||||
bucketItem.Parameters = map[string]interface{}{}
|
||||
bucketItem.Parameters["field"] = "metadata.labels.cluster_id"
|
||||
bucketItem.Parameters["size"] = 2
|
||||
|
||||
nestBucket := common.BucketItem{}
|
||||
nestBucket.Key = "key2"
|
||||
nestBucket.Type = common.DateHistogramBucket
|
||||
nestBucket.Parameters = map[string]interface{}{}
|
||||
nestBucket.Parameters["field"] = "timestamp"
|
||||
nestBucket.Parameters["calendar_interval"] = "1d"
|
||||
nestBucket.Parameters["time_zone"] = "+08:00"
|
||||
|
||||
nestBucket:=common.BucketItem{}
|
||||
nestBucket.Key="key2"
|
||||
nestBucket.Type=common.DateHistogramBucket
|
||||
nestBucket.Parameters=map[string]interface{}{}
|
||||
nestBucket.Parameters["field"]="timestamp"
|
||||
nestBucket.Parameters["calendar_interval"]="1d"
|
||||
nestBucket.Parameters["time_zone"]="+08:00"
|
||||
|
||||
leafBucket:=common.NewBucketItem(common.TermsBucket,util.MapStr{
|
||||
"size":5,
|
||||
"field":"payload.elasticsearch.cluster_health.status",
|
||||
leafBucket := common.NewBucketItem(common.TermsBucket, util.MapStr{
|
||||
"size": 5,
|
||||
"field": "payload.elasticsearch.cluster_health.status",
|
||||
})
|
||||
|
||||
leafBucket.Key="key3"
|
||||
leafBucket.Key = "key3"
|
||||
|
||||
metricItems:=[]*common.MetricItem{}
|
||||
var bucketSizeStr ="10s"
|
||||
metricItem:=newMetricItem("cluster_summary", 2, "cluster")
|
||||
metricItem.Key="key4"
|
||||
metricItem.AddLine("Indexing","Total Indexing","Number of documents being indexed for primary and replica shards.","group1",
|
||||
"payload.elasticsearch.index_stats.total.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Search","Total Search","Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!","group1",
|
||||
"payload.elasticsearch.index_stats.total.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItems := []*common.MetricItem{}
|
||||
var bucketSizeStr = "10s"
|
||||
metricItem := newMetricItem("cluster_summary", 2, "cluster")
|
||||
metricItem.Key = "key4"
|
||||
metricItem.AddLine("Indexing", "Total Indexing", "Number of documents being indexed for primary and replica shards.", "group1",
|
||||
"payload.elasticsearch.index_stats.total.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.AddLine("Search", "Total Search", "Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!", "group1",
|
||||
"payload.elasticsearch.index_stats.total.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
|
||||
nestBucket.AddNestBucket(leafBucket)
|
||||
nestBucket.Metrics=metricItems
|
||||
nestBucket.Metrics = metricItems
|
||||
|
||||
bucketItem.Buckets=[]*common.BucketItem{}
|
||||
bucketItem.Buckets=append(bucketItem.Buckets,&nestBucket)
|
||||
bucketItem.Buckets = []*common.BucketItem{}
|
||||
bucketItem.Buckets = append(bucketItem.Buckets, &nestBucket)
|
||||
|
||||
|
||||
aggs:=ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem},nil)
|
||||
aggs := ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem}, nil)
|
||||
fmt.Println(util.MustToJSON(aggs))
|
||||
|
||||
response:="{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }"
|
||||
res:=SearchResponse{}
|
||||
util.FromJSONBytes([]byte(response),&res)
|
||||
response := "{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }"
|
||||
res := SearchResponse{}
|
||||
util.FromJSONBytes([]byte(response), &res)
|
||||
fmt.Println(response)
|
||||
groupKey:="key1"
|
||||
metricLabelKey:="key2"
|
||||
metricValueKey:="c7qi5hii4h935v9bs920"
|
||||
data:=ParseAggregationResult(int(10),res.Aggregations,groupKey,metricLabelKey,metricValueKey)
|
||||
groupKey := "key1"
|
||||
metricLabelKey := "key2"
|
||||
metricValueKey := "c7qi5hii4h935v9bs920"
|
||||
data := ParseAggregationResult(int(10), res.Aggregations, groupKey, metricLabelKey, metricValueKey)
|
||||
fmt.Println(data)
|
||||
|
||||
}
|
||||
|
||||
func TestConvertBucketItems(t *testing.T) {
|
||||
response:="{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }"
|
||||
res:=SearchResponse{}
|
||||
util.FromJSONBytes([]byte(response),&res)
|
||||
response := "{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }"
|
||||
res := SearchResponse{}
|
||||
util.FromJSONBytes([]byte(response), &res)
|
||||
|
||||
data:=ParseAggregationBucketResult(int(10),res.Aggregations,"c7v2gm3i7638vvo4pv80","c7v2gm3i7638vvo4pv8g","c7v2gm3i7638vvo4pv90", func() {
|
||||
data := ParseAggregationBucketResult(int(10), res.Aggregations, "c7v2gm3i7638vvo4pv80", "c7v2gm3i7638vvo4pv8g", "c7v2gm3i7638vvo4pv90", func() {
|
||||
|
||||
})
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -45,40 +45,40 @@ import (
|
|||
)
|
||||
|
||||
func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody:=util.MapStr{}
|
||||
reqBody := struct{
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
SearchField string `json:"search_field"`
|
||||
resBody := util.MapStr{}
|
||||
reqBody := struct {
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
SearchField string `json:"search_field"`
|
||||
}{}
|
||||
err := h.DecodeJSON(req, &reqBody)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
|
||||
aggs["term_cluster_id"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.cluster_id",
|
||||
"size": 1000,
|
||||
"size": 1000,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"term_cluster_name": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.cluster_name",
|
||||
"size": 1,
|
||||
"size": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var should =[]util.MapStr{}
|
||||
if reqBody.SearchField != ""{
|
||||
var should = []util.MapStr{}
|
||||
if reqBody.SearchField != "" {
|
||||
should = []util.MapStr{
|
||||
{
|
||||
"prefix": util.MapStr{
|
||||
|
@ -101,7 +101,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
},
|
||||
},
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
should = []util.MapStr{
|
||||
{
|
||||
"prefix": util.MapStr{
|
||||
|
@ -143,30 +143,25 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
}
|
||||
clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id")
|
||||
if !hasPrivilege && clusterFilter == nil {
|
||||
h.WriteJSON(w, elastic.SearchResponse{
|
||||
|
||||
}, http.StatusOK)
|
||||
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
|
||||
return
|
||||
}
|
||||
must := []interface{}{
|
||||
}
|
||||
must := []interface{}{}
|
||||
if !hasPrivilege && clusterFilter != nil {
|
||||
must = append(must, clusterFilter)
|
||||
}
|
||||
|
||||
|
||||
|
||||
query := util.MapStr{
|
||||
"aggs": aggs,
|
||||
"size": reqBody.Size,
|
||||
"from": reqBody.From,
|
||||
"from": reqBody.From,
|
||||
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
|
||||
"query": util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"filter": elastic.BuildSearchTermFilter(reqBody.Filter),
|
||||
"should": should,
|
||||
"must": must,
|
||||
"filter": elastic.BuildSearchTermFilter(reqBody.Filter),
|
||||
"should": should,
|
||||
"must": must,
|
||||
},
|
||||
},
|
||||
"sort": []util.MapStr{
|
||||
|
@ -178,7 +173,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
},
|
||||
}
|
||||
if len(reqBody.Sort) > 1 {
|
||||
query["sort"] = []util.MapStr{
|
||||
query["sort"] = []util.MapStr{
|
||||
{
|
||||
reqBody.Sort[0]: util.MapStr{
|
||||
"order": reqBody.Sort[1],
|
||||
|
@ -190,7 +185,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(util.MustToJSONBytes(response))
|
||||
|
@ -299,7 +294,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
source["shard_info"] = shardInfo
|
||||
}
|
||||
if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok {
|
||||
if clusterID, ok := tempClusterID.(string); ok {
|
||||
if clusterID, ok := tempClusterID.(string); ok {
|
||||
if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil {
|
||||
source["is_master_node"] = meta.ClusterState.MasterNode == nodeID
|
||||
}
|
||||
|
@ -317,28 +312,28 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
return
|
||||
}
|
||||
// 索引速率
|
||||
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems := []GroupMetricItem{}
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
})
|
||||
queryMetric:=newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.node_stats.indices.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
queryMetric := newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.node_stats.indices.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
})
|
||||
|
||||
bucketSize := GetMinBucketSize()
|
||||
|
@ -346,11 +341,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
bucketSize = 60
|
||||
}
|
||||
var metricLen = 15
|
||||
aggs:=map[string]interface{}{}
|
||||
query=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
aggs := map[string]interface{}{}
|
||||
query = map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
"metadata.category": util.MapStr{
|
||||
|
@ -375,7 +370,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -383,15 +378,15 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
},
|
||||
}
|
||||
|
||||
for _,metricItem:=range nodeMetricItems{
|
||||
aggs[metricItem.ID]=util.MapStr{
|
||||
"max":util.MapStr{
|
||||
for _, metricItem := range nodeMetricItems {
|
||||
aggs[metricItem.ID] = util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field,
|
||||
},
|
||||
}
|
||||
if metricItem.IsDerivative{
|
||||
aggs[metricItem.ID+"_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
if metricItem.IsDerivative {
|
||||
aggs[metricItem.ID+"_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID,
|
||||
},
|
||||
}
|
||||
|
@ -403,8 +398,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
query["size"]=0
|
||||
query["aggs"]= util.MapStr{
|
||||
query["size"] = 0
|
||||
query["aggs"] = util.MapStr{
|
||||
"group_by_level": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.node_id",
|
||||
|
@ -412,11 +407,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
},
|
||||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram":util.MapStr{
|
||||
"field": "timestamp",
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs":aggs,
|
||||
"aggs": aggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -430,9 +425,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
indexMetrics := map[string]util.MapStr{}
|
||||
for key, item := range metrics {
|
||||
for _, line := range item.Lines {
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok{
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{
|
||||
}
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok {
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{}
|
||||
}
|
||||
indexMetrics[line.Metric.Label][key] = line.Data
|
||||
}
|
||||
|
@ -493,7 +487,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
// return
|
||||
//}
|
||||
q1 := orm.Query{
|
||||
Size: 1,
|
||||
Size: 1,
|
||||
WildcardIndex: true,
|
||||
}
|
||||
q1.Conds = orm.And(
|
||||
|
@ -518,7 +512,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
tt, _ := time.Parse(time.RFC3339, ts)
|
||||
if time.Now().Sub(tt).Seconds() > 30 {
|
||||
kvs["status"] = "unavailable"
|
||||
}else{
|
||||
} else {
|
||||
kvs["status"] = "available"
|
||||
}
|
||||
}
|
||||
|
@ -536,9 +530,9 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
|
||||
jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult)
|
||||
if ok {
|
||||
if jvmVal, ok := jvm.(map[string]interface{});ok {
|
||||
if jvmVal, ok := jvm.(map[string]interface{}); ok {
|
||||
kvs["jvm"] = util.MapStr{
|
||||
"mem": jvmVal["mem"],
|
||||
"mem": jvmVal["mem"],
|
||||
"uptime": jvmVal["uptime_in_millis"],
|
||||
}
|
||||
}
|
||||
|
@ -559,7 +553,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
}
|
||||
}
|
||||
}
|
||||
if len( response.Hits.Hits) > 0 {
|
||||
if len(response.Hits.Hits) > 0 {
|
||||
hit := response.Hits.Hits[0]
|
||||
innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source)
|
||||
if mp, ok := innerMetaData.(map[string]interface{}); ok {
|
||||
|
@ -593,15 +587,15 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
}
|
||||
should := []util.MapStr{
|
||||
{
|
||||
"term":util.MapStr{
|
||||
"metadata.labels.cluster_id":util.MapStr{
|
||||
"term": util.MapStr{
|
||||
"metadata.labels.cluster_id": util.MapStr{
|
||||
"value": clusterID,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"term":util.MapStr{
|
||||
"metadata.labels.cluster_uuid":util.MapStr{
|
||||
"term": util.MapStr{
|
||||
"metadata.labels.cluster_uuid": util.MapStr{
|
||||
"value": clusterUUID,
|
||||
},
|
||||
},
|
||||
|
@ -632,19 +626,19 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
},
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req,clusterID, v1.MetricTypeNodeStats,60)
|
||||
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, v1.MetricTypeNodeStats, 60)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
query:=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
query := map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": must,
|
||||
"must": must,
|
||||
"minimum_should_match": 1,
|
||||
"should": should,
|
||||
"should": should,
|
||||
"filter": []util.MapStr{
|
||||
{
|
||||
"range": util.MapStr{
|
||||
|
@ -658,8 +652,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
},
|
||||
}
|
||||
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
metricItems:=[]*common.MetricItem{}
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
metricItems := []*common.MetricItem{}
|
||||
metricKey := h.GetParameter(req, "key")
|
||||
timeout := h.GetParameterOrDefault(req, "timeout", "60s")
|
||||
du, err := time.ParseDuration(timeout)
|
||||
|
@ -679,13 +673,13 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
return
|
||||
}
|
||||
metrics["node_health"] = healthMetric
|
||||
}else if metricKey == ShardStateMetricKey {
|
||||
} else if metricKey == ShardStateMetricKey {
|
||||
query = util.MapStr{
|
||||
"size": 0,
|
||||
"query": util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"should": should,
|
||||
"should": should,
|
||||
"must": []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
|
@ -729,74 +723,74 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
return
|
||||
}
|
||||
metrics["shard_state"] = shardStateMetric
|
||||
}else{
|
||||
} else {
|
||||
switch metricKey {
|
||||
case NodeProcessCPUMetricKey:
|
||||
metricItem:=newMetricItem("cpu", 1, SystemGroupKey)
|
||||
metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
|
||||
metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem := newMetricItem("cpu", 1, SystemGroupKey)
|
||||
metricItem.AddAxi("cpu", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true)
|
||||
metricItem.AddLine("Process CPU", "Process CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.process.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItem.AddLine("OS CPU", "OS CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.os.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
case NodeCPUJVMMetricKey:
|
||||
metricItem := newMetricItem("jvm", 2, SystemGroupKey)
|
||||
metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem.AddAxi("JVM Heap", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
metricItem.AddLine("Max Heap", "Max Heap", "JVM max Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItem.AddLine("Used Heap", "Used Heap", "JVM used Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
case v1.IndexThroughputMetricKey:
|
||||
metricItem := newMetricItem("index_throughput", 3, OperationGroupKey)
|
||||
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
metricItem.AddLine("Indexing Rate", "Total Shards", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
case v1.SearchThroughputMetricKey:
|
||||
metricItem := newMetricItem("search_throughput", 4, OperationGroupKey)
|
||||
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
|
||||
metricItem.AddLine("Search Rate","Total Shards",
|
||||
metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
|
||||
metricItem.AddLine("Search Rate", "Total Shards",
|
||||
"Number of search requests being executed.",
|
||||
"group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
"group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
case v1.IndexLatencyMetricKey:
|
||||
metricItem := newMetricItem("index_latency", 5, LatencyGroupKey)
|
||||
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
|
||||
metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
|
||||
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
|
||||
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
case v1.SearchLatencyMetricKey:
|
||||
metricItem := newMetricItem("search_latency", 6, LatencyGroupKey)
|
||||
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
|
||||
metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
|
||||
|
||||
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
|
||||
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
|
||||
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
|
||||
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
case ParentBreakerMetricKey:
|
||||
metricItem := newMetricItem("parent_breaker", 8, SystemGroupKey)
|
||||
metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem.AddLine("Parent Breaker Tripped", "Parent Breaker Tripped", "Rate of the circuit breaker has been triggered and prevented an out of memory error.", "group1", "payload.elasticsearch.node_stats.breakers.parent.tripped", "max", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
}
|
||||
|
||||
metrics, err = h.getSingleMetrics(ctx, metricItems,query, bucketSize)
|
||||
metrics, err = h.getSingleMetrics(ctx, metricItems, query, bucketSize)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w, err, http.StatusInternalServerError)
|
||||
|
@ -808,7 +802,7 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
minBucketSize, err := v1.GetMetricMinBucketSize(clusterID, v1.MetricTypeNodeStats)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[metricKey].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -818,8 +812,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize int) (*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -828,14 +822,14 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize
|
|||
query["aggs"] = util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"groups": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "payload.elasticsearch.shard_stats.routing.state",
|
||||
"size": 10,
|
||||
"size": 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -848,8 +842,8 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize
|
|||
return nil, err
|
||||
}
|
||||
|
||||
metricItem:=newMetricItem("shard_state", 0, "")
|
||||
metricItem.AddLine("Shard State","Shard State","","group1","payload.elasticsearch.shard_stats.routing.state","count",bucketSizeStr,"","ratio","0.[00]","0.[00]",false,false)
|
||||
metricItem := newMetricItem("shard_state", 0, "")
|
||||
metricItem.AddLine("Shard State", "Shard State", "", "group1", "payload.elasticsearch.shard_stats.routing.state", "count", bucketSizeStr, "", "ratio", "0.[00]", "0.[00]", false, false)
|
||||
|
||||
metricData := []interface{}{}
|
||||
if response.StatusCode == 200 {
|
||||
|
@ -864,8 +858,8 @@ func getNodeShardStateMetric(ctx context.Context, query util.MapStr, bucketSize
|
|||
return metricItem, nil
|
||||
}
|
||||
|
||||
func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)(*common.MetricItem, error){
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int) (*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -873,7 +867,7 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)
|
|||
query["aggs"] = util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
|
@ -886,14 +880,14 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)
|
|||
},
|
||||
}
|
||||
queryDSL := util.MustToJSONBytes(query)
|
||||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
|
||||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metricItem:=newMetricItem("node_health", 0, "")
|
||||
metricItem.AddLine("Node health","Node Health","","group1","payload.elasticsearch.node_stats.jvm.uptime_in_millis","min",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false)
|
||||
metricItem := newMetricItem("node_health", 0, "")
|
||||
metricItem.AddLine("Node health", "Node Health", "", "group1", "payload.elasticsearch.node_stats.jvm.uptime_in_millis", "min", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false)
|
||||
|
||||
metricData := []interface{}{}
|
||||
if response.StatusCode == 200 {
|
||||
|
@ -923,7 +917,7 @@ func getNodeHealthMetric(ctx context.Context, query util.MapStr, bucketSize int)
|
|||
return metricItem, nil
|
||||
}
|
||||
|
||||
func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, error){
|
||||
func getNodeOnlineStatusOfRecentDay(nodeIDs []string) (map[string][]interface{}, error) {
|
||||
q := orm.Query{
|
||||
WildcardIndex: true,
|
||||
}
|
||||
|
@ -932,64 +926,64 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
|
|||
"group_by_node_id": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.node_id",
|
||||
"size": 100,
|
||||
"size": 100,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"uptime_histogram": util.MapStr{
|
||||
"date_range": util.MapStr{
|
||||
"date_range": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"format": "yyyy-MM-dd",
|
||||
"time_zone": "+08:00",
|
||||
"ranges": []util.MapStr{
|
||||
{
|
||||
"from": "now-13d/d",
|
||||
"to": "now-12d/d",
|
||||
"to": "now-12d/d",
|
||||
}, {
|
||||
"from": "now-12d/d",
|
||||
"to": "now-11d/d",
|
||||
"to": "now-11d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-11d/d",
|
||||
"to": "now-10d/d",
|
||||
"to": "now-10d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-10d/d",
|
||||
"to": "now-9d/d",
|
||||
"to": "now-9d/d",
|
||||
}, {
|
||||
"from": "now-9d/d",
|
||||
"to": "now-8d/d",
|
||||
"to": "now-8d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-8d/d",
|
||||
"to": "now-7d/d",
|
||||
"to": "now-7d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-7d/d",
|
||||
"to": "now-6d/d",
|
||||
"to": "now-6d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-6d/d",
|
||||
"to": "now-5d/d",
|
||||
"to": "now-5d/d",
|
||||
}, {
|
||||
"from": "now-5d/d",
|
||||
"to": "now-4d/d",
|
||||
"to": "now-4d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-4d/d",
|
||||
"to": "now-3d/d",
|
||||
},{
|
||||
"to": "now-3d/d",
|
||||
}, {
|
||||
"from": "now-3d/d",
|
||||
"to": "now-2d/d",
|
||||
"to": "now-2d/d",
|
||||
}, {
|
||||
"from": "now-2d/d",
|
||||
"to": "now-1d/d",
|
||||
"to": "now-1d/d",
|
||||
}, {
|
||||
"from": "now-1d/d",
|
||||
"to": "now/d",
|
||||
"to": "now/d",
|
||||
},
|
||||
{
|
||||
"from": "now/d",
|
||||
"to": "now",
|
||||
"to": "now",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1018,7 +1012,7 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte":"now-15d",
|
||||
"gte": "now-15d",
|
||||
"lte": "now",
|
||||
},
|
||||
},
|
||||
|
@ -1056,13 +1050,13 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
|
|||
recentStatus[nodeKey] = []interface{}{}
|
||||
if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok {
|
||||
if bks, ok := histogramAgg["buckets"].([]interface{}); ok {
|
||||
for _, bkItem := range bks {
|
||||
for _, bkItem := range bks {
|
||||
if bkVal, ok := bkItem.(map[string]interface{}); ok {
|
||||
if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok {
|
||||
//mark node status as offline when uptime less than 10m
|
||||
if v, ok := minUptime.(float64); ok && v >= 600000 {
|
||||
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"})
|
||||
}else{
|
||||
} else {
|
||||
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"})
|
||||
}
|
||||
}
|
||||
|
@ -1080,10 +1074,10 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
max = h.GetParameterOrDefault(req, "max", "now")
|
||||
)
|
||||
|
||||
resBody := map[string] interface{}{}
|
||||
resBody := map[string]interface{}{}
|
||||
id := ps.ByName("id")
|
||||
nodeUUID := ps.ByName("node_id")
|
||||
q := &orm.Query{ Size: 1}
|
||||
q := &orm.Query{Size: 1}
|
||||
q.AddSort("timestamp", orm.DESC)
|
||||
q.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -1095,16 +1089,16 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result := orm.Search(event.Event{}, q)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
namesM := util.MapStr{}
|
||||
if len(result.Result) > 0 {
|
||||
if data, ok := result.Result[0].(map[string]interface{}); ok {
|
||||
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists {
|
||||
if rows, ok := routingTable.([]interface{}); ok{
|
||||
if rows, ok := routingTable.([]interface{}); ok {
|
||||
for _, row := range rows {
|
||||
if v, ok := row.(map[string]interface{}); ok {
|
||||
if indexName, ok := v["index"].(string); ok{
|
||||
if indexName, ok := v["index"].(string); ok {
|
||||
namesM[indexName] = true
|
||||
}
|
||||
}
|
||||
|
@ -1114,12 +1108,12 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
}
|
||||
}
|
||||
|
||||
indexNames := make([]interface{}, 0, len(namesM) )
|
||||
indexNames := make([]interface{}, 0, len(namesM))
|
||||
for name, _ := range namesM {
|
||||
indexNames = append(indexNames, name)
|
||||
}
|
||||
|
||||
q1 := &orm.Query{ Size: 100}
|
||||
q1 := &orm.Query{Size: 100}
|
||||
q1.AddSort("timestamp", orm.DESC)
|
||||
q1.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -1130,28 +1124,29 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result = orm.Search(elastic.IndexConfig{}, q1)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
indices, err := h.getLatestIndices(req, min, max, id, &result)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
h.WriteJSON(w, indices, http.StatusOK)
|
||||
}
|
||||
|
||||
type ShardsSummary struct {
|
||||
Index string `json:"index"`
|
||||
Shards int `json:"shards"`
|
||||
Replicas int `json:"replicas"`
|
||||
DocsCount int64 `json:"docs_count"`
|
||||
DocsDeleted int64 `json:"docs_deleted"`
|
||||
StoreInBytes int64 `json:"store_in_bytes"`
|
||||
PriStoreInBytes int64 `json:"pri_store_in_bytes"`
|
||||
Timestamp interface{} `json:"timestamp"`
|
||||
Index string `json:"index"`
|
||||
Shards int `json:"shards"`
|
||||
Replicas int `json:"replicas"`
|
||||
DocsCount int64 `json:"docs_count"`
|
||||
DocsDeleted int64 `json:"docs_deleted"`
|
||||
StoreInBytes int64 `json:"store_in_bytes"`
|
||||
PriStoreInBytes int64 `json:"pri_store_in_bytes"`
|
||||
Timestamp interface{} `json:"timestamp"`
|
||||
}
|
||||
|
||||
func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, clusterID string, result *orm.Result) ([]interface{}, error) {
|
||||
//filter indices
|
||||
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID)
|
||||
|
@ -1165,7 +1160,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
|
|||
|
||||
query := util.MapStr{
|
||||
"size": 10000,
|
||||
"_source": []string{"metadata.labels.index_name", "payload.elasticsearch.shard_stats.docs","payload.elasticsearch.shard_stats.store", "payload.elasticsearch.shard_stats.routing", "timestamp"},
|
||||
"_source": []string{"metadata.labels.index_name", "payload.elasticsearch.shard_stats.docs", "payload.elasticsearch.shard_stats.store", "payload.elasticsearch.shard_stats.routing", "timestamp"},
|
||||
"collapse": util.MapStr{
|
||||
"field": "metadata.labels.shard_id",
|
||||
},
|
||||
|
@ -1240,7 +1235,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
|
|||
}
|
||||
if primary == true {
|
||||
indexInfo.Shards++
|
||||
}else{
|
||||
} else {
|
||||
indexInfo.Replicas++
|
||||
}
|
||||
indexInfo.Timestamp = hitM["timestamp"]
|
||||
|
@ -1249,7 +1244,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
|
|||
}
|
||||
indices := []interface{}{}
|
||||
var indexPattern *radix.Pattern
|
||||
if !hasAllPrivilege{
|
||||
if !hasAllPrivilege {
|
||||
indexPattern = radix.Compile(allowedIndices...)
|
||||
}
|
||||
|
||||
|
@ -1273,21 +1268,21 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
|
|||
}
|
||||
if indexInfos[v] != nil {
|
||||
indices = append(indices, util.MapStr{
|
||||
"index": v,
|
||||
"status": state,
|
||||
"health": health,
|
||||
"timestamp": indexInfos[v].Timestamp,
|
||||
"docs_count": indexInfos[v].DocsCount,
|
||||
"shards": indexInfos[v].Shards,
|
||||
"replicas": replicasNum,
|
||||
"unassigned_shards": (replicasNum + 1) * shardsNum - indexInfos[v].Shards - replicasNum,
|
||||
"store_size": util.FormatBytes(float64(indexInfos[v].StoreInBytes), 1),
|
||||
"index": v,
|
||||
"status": state,
|
||||
"health": health,
|
||||
"timestamp": indexInfos[v].Timestamp,
|
||||
"docs_count": indexInfos[v].DocsCount,
|
||||
"shards": indexInfos[v].Shards,
|
||||
"replicas": replicasNum,
|
||||
"unassigned_shards": (replicasNum+1)*shardsNum - indexInfos[v].Shards - replicasNum,
|
||||
"store_size": util.FormatBytes(float64(indexInfos[v].StoreInBytes), 1),
|
||||
})
|
||||
} else {
|
||||
indices = append(indices, util.MapStr{
|
||||
"index": v,
|
||||
"status": state,
|
||||
"health": health,
|
||||
"health": health,
|
||||
"timestamp": hitM["timestamp"],
|
||||
})
|
||||
}
|
||||
|
@ -1297,7 +1292,6 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
|
|||
return indices, nil
|
||||
}
|
||||
|
||||
|
||||
func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
clusterID := ps.MustGetParameter("id")
|
||||
if GetMonitorState(clusterID) == elastic.ModeAgentless {
|
||||
|
@ -1306,7 +1300,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
|
|||
}
|
||||
nodeID := ps.MustGetParameter("node_id")
|
||||
q1 := orm.Query{
|
||||
Size: 1000,
|
||||
Size: 1000,
|
||||
WildcardIndex: true,
|
||||
CollapseField: "metadata.labels.shard_id",
|
||||
}
|
||||
|
@ -1327,7 +1321,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result := orm.Search(&event.Event{}, &q1)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w, err.Error(), http.StatusInternalServerError )
|
||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var shards = []interface{}{}
|
||||
|
@ -1360,7 +1354,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
|
|||
primary, _ := shardM.GetValue("routing.primary")
|
||||
if primary == true {
|
||||
shardInfo["prirep"] = "p"
|
||||
}else{
|
||||
} else {
|
||||
shardInfo["prirep"] = "r"
|
||||
}
|
||||
shardInfo["state"], _ = shardM.GetValue("routing.state")
|
||||
|
@ -1380,7 +1374,7 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
|
|||
h.WriteJSON(w, shards, http.StatusOK)
|
||||
}
|
||||
|
||||
//deleteNodeMetadata used to clean node metadata after node is offline and not active within 7 days
|
||||
// deleteNodeMetadata used to clean node metadata after node is offline and not active within 7 days
|
||||
func (h APIHandler) deleteNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
indexName := orm.GetIndexName(elastic.NodeConfig{})
|
||||
|
|
|
@ -278,5 +278,3 @@ func rewriteTableNamesOfSqlRequest(req *http.Request, distribution string) (stri
|
|||
}
|
||||
return strings.Join(unescapedTableNames, ","), nil
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -38,11 +38,10 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -51,8 +50,8 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusNotFound)
|
||||
return
|
||||
|
@ -69,7 +68,7 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
|
|||
}
|
||||
var body = map[string]interface{}{
|
||||
"script": map[string]interface{}{
|
||||
"lang": "mustache",
|
||||
"lang": "mustache",
|
||||
"source": template.Source,
|
||||
},
|
||||
}
|
||||
|
@ -89,7 +88,7 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
|
|||
template.Created = time.Now()
|
||||
template.Updated = template.Created
|
||||
template.ClusterID = targetClusterID
|
||||
index:=orm.GetIndexName(elastic.SearchTemplate{})
|
||||
index := orm.GetIndexName(elastic.SearchTemplate{})
|
||||
insertRes, err := esClient.Index(index, "", id, template, "wait_for")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -102,14 +101,13 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
|
|||
resBody["_id"] = id
|
||||
resBody["result"] = insertRes.Result
|
||||
|
||||
h.WriteJSON(w, resBody,http.StatusOK)
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -118,8 +116,8 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusNotFound)
|
||||
return
|
||||
|
@ -136,8 +134,8 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
|
|||
}
|
||||
templateID := ps.ByName("template_id")
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
index:=orm.GetIndexName(elastic.SearchTemplate{})
|
||||
getRes, err := esClient.Get(index, "",templateID)
|
||||
index := orm.GetIndexName(elastic.SearchTemplate{})
|
||||
getRes, err := esClient.Get(index, "", templateID)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
|
@ -197,9 +195,9 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
|
|||
|
||||
ht := &elastic.SearchTemplateHistory{
|
||||
TemplateID: templateID,
|
||||
Action: "update",
|
||||
Content: originTemplate,
|
||||
Created: time.Now(),
|
||||
Action: "update",
|
||||
Content: originTemplate,
|
||||
Created: time.Now(),
|
||||
}
|
||||
esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "")
|
||||
|
||||
|
@ -207,14 +205,13 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
|
|||
resBody["_id"] = templateID
|
||||
resBody["result"] = insertRes.Result
|
||||
|
||||
h.WriteJSON(w, resBody,http.StatusOK)
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
|
@ -222,8 +219,8 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusNotFound)
|
||||
return
|
||||
|
@ -231,7 +228,7 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
|
|||
|
||||
templateID := ps.ByName("template_id")
|
||||
|
||||
index:=orm.GetIndexName(elastic.SearchTemplate{})
|
||||
index := orm.GetIndexName(elastic.SearchTemplate{})
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
res, err := esClient.Get(index, "", templateID)
|
||||
if err != nil {
|
||||
|
@ -258,9 +255,9 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
|
|||
|
||||
ht := &elastic.SearchTemplateHistory{
|
||||
TemplateID: templateID,
|
||||
Action: "delete",
|
||||
Content: res.Source,
|
||||
Created: time.Now(),
|
||||
Action: "delete",
|
||||
Content: res.Source,
|
||||
Created: time.Now(),
|
||||
}
|
||||
_, err = esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "wait_for")
|
||||
if err != nil {
|
||||
|
@ -273,21 +270,20 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
|
|||
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
var (
|
||||
name = h.GetParameterOrDefault(req, "name", "")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
|
||||
name = h.GetParameterOrDefault(req, "name", "")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
|
||||
mustBuilder = &strings.Builder{}
|
||||
)
|
||||
from, _ := strconv.Atoi(strFrom)
|
||||
size, _ := strconv.Atoi(strSize)
|
||||
targetClusterID := ps.ByName("id")
|
||||
mustBuilder.WriteString(fmt.Sprintf(`{"match":{"cluster_id": "%s"}}`, targetClusterID))
|
||||
if name != ""{
|
||||
if name != "" {
|
||||
mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"name": "%s"}}`, name))
|
||||
}
|
||||
|
||||
|
@ -305,8 +301,8 @@ func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req
|
|||
h.WriteJSON(w, res, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{}
|
||||
func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
|
||||
id := ps.ByName("template_id")
|
||||
indexName := orm.GetIndexName(elastic.SearchTemplate{})
|
||||
|
@ -314,31 +310,30 @@ func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *h
|
|||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
if getResponse!=nil{
|
||||
if getResponse != nil {
|
||||
h.WriteJSON(w, resBody, getResponse.StatusCode)
|
||||
}else{
|
||||
} else {
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
h.WriteJSON(w,getResponse,200)
|
||||
h.WriteJSON(w, getResponse, 200)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
var (
|
||||
templateID = h.GetParameterOrDefault(req, "template_id", "")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
|
||||
templateID = h.GetParameterOrDefault(req, "template_id", "")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
|
||||
mustBuilder = &strings.Builder{}
|
||||
)
|
||||
from, _ := strconv.Atoi(strFrom)
|
||||
size, _ := strconv.Atoi(strSize)
|
||||
targetClusterID := ps.ByName("id")
|
||||
mustBuilder.WriteString(fmt.Sprintf(`{"match":{"content.cluster_id": "%s"}}`, targetClusterID))
|
||||
if templateID != ""{
|
||||
if templateID != "" {
|
||||
mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"template_id": "%s"}}`, templateID))
|
||||
}
|
||||
|
||||
|
@ -356,11 +351,10 @@ func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWrit
|
|||
h.WriteJSON(w, res, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
|
@ -368,8 +362,8 @@ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusNotFound)
|
||||
return
|
||||
|
@ -394,11 +388,10 @@ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http
|
|||
h.WriteJSON(w, string(res), http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
|
@ -406,8 +399,8 @@ func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusNotFound)
|
||||
return
|
||||
|
|
|
@ -36,8 +36,7 @@ import (
|
|||
)
|
||||
|
||||
func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
|
@ -58,12 +57,11 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques
|
|||
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, []byte(queryDSL))
|
||||
if len(searchRes.Hits.Hits) > 0 {
|
||||
_, err = esClient.Index(indexName, "", searchRes.Hits.Hits[0].ID, reqParams, "wait_for")
|
||||
}else{
|
||||
} else {
|
||||
reqParams.ID = util.GetUUID()
|
||||
_, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
|
||||
}
|
||||
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err
|
||||
|
@ -71,12 +69,11 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques
|
|||
return
|
||||
}
|
||||
resBody["acknowledged"] = true
|
||||
h.WriteJSON(w, resBody ,http.StatusOK)
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
|
@ -94,8 +91,8 @@ func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Req
|
|||
var value interface{}
|
||||
if len(searchRes.Hits.Hits) > 0 {
|
||||
value = searchRes.Hits.Hits[0].Source["value"]
|
||||
}else{
|
||||
} else {
|
||||
value = ""
|
||||
}
|
||||
h.WriteJSON(w, value ,http.StatusOK)
|
||||
h.WriteJSON(w, value, http.StatusOK)
|
||||
}
|
||||
|
|
|
@ -28,12 +28,12 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
log "github.com/cihub/seelog"
|
||||
httprouter "infini.sh/framework/core/api/router"
|
||||
"infini.sh/framework/core/event"
|
||||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/modules/elastic/adapter"
|
||||
"net/http"
|
||||
log "github.com/cihub/seelog"
|
||||
httprouter "infini.sh/framework/core/api/router"
|
||||
)
|
||||
|
||||
func (h *APIHandler) GetShardInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
"src/github.com/buger/jsonparser"
|
||||
)
|
||||
|
||||
func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
clusterID := ps.MustGetParameter("id")
|
||||
esClient := elastic.GetClient(clusterID)
|
||||
templates, err := esClient.GetTemplate("")
|
||||
|
@ -48,7 +48,7 @@ func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Re
|
|||
h.WriteJSON(w, templates, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
clusterID := ps.MustGetParameter("id")
|
||||
templateName := ps.MustGetParameter("template_name")
|
||||
esClient := elastic.GetClient(clusterID)
|
||||
|
@ -66,7 +66,7 @@ func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.R
|
|||
}
|
||||
resErr, _, _, _ := jsonparser.Get(esResBody, "error")
|
||||
if resErr != nil {
|
||||
errStr := string(resErr)
|
||||
errStr := string(resErr)
|
||||
log.Errorf("put template error: %s", errStr)
|
||||
h.WriteError(w, errStr, http.StatusInternalServerError)
|
||||
return
|
||||
|
|
|
@ -35,57 +35,57 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ThreadPoolGetGroupKey = "thread_pool_get"
|
||||
ThreadPoolSearchGroupKey = "thread_pool_search"
|
||||
ThreadPoolFlushGroupKey = "thread_pool_flush"
|
||||
ThreadPoolRefreshGroupKey = "thread_pool_refresh"
|
||||
ThreadPoolGetGroupKey = "thread_pool_get"
|
||||
ThreadPoolSearchGroupKey = "thread_pool_search"
|
||||
ThreadPoolFlushGroupKey = "thread_pool_flush"
|
||||
ThreadPoolRefreshGroupKey = "thread_pool_refresh"
|
||||
ThreadPoolWriteGroupKey = "thread_pool_write"
|
||||
ThreadPoolForceMergeGroupKey = "thread_pool_force_merge"
|
||||
ThreadPoolIndexGroupKey = "thread_pool_index"
|
||||
ThreadPoolBulkGroupKey = "thread_pool_bulk"
|
||||
ThreadPoolIndexGroupKey = "thread_pool_index"
|
||||
ThreadPoolBulkGroupKey = "thread_pool_bulk"
|
||||
)
|
||||
|
||||
const (
|
||||
SearchThreadsMetricKey = "search_threads"
|
||||
IndexThreadsMetricKey = "index_threads"
|
||||
BulkThreadsMetricKey = "bulk_threads"
|
||||
FlushThreadsMetricKey = "flush_threads"
|
||||
RefreshThreadsMetricKey = "refresh_threads"
|
||||
WriteThreadsMetricKey = "write_threads"
|
||||
ForceMergeThreadsMetricKey = "force_merge_threads"
|
||||
SearchQueueMetricKey = "search_queue"
|
||||
IndexQueueMetricKey = "index_queue"
|
||||
BulkQueueMetricKey = "bulk_queue"
|
||||
FlushQueueMetricKey = "flush_queue"
|
||||
RefreshQueueMetricKey = "refresh_queue"
|
||||
WriteQueueMetricKey = "write_queue"
|
||||
SearchActiveMetricKey = "search_active"
|
||||
IndexActiveMetricKey = "index_active"
|
||||
BulkActiveMetricKey = "bulk_active"
|
||||
FlushActiveMetricKey = "flush_active"
|
||||
WriteActiveMetricKey = "write_active"
|
||||
ForceMergeActiveMetricKey = "force_merge_active"
|
||||
SearchRejectedMetricKey = "search_rejected"
|
||||
IndexRejectedMetricKey = "index_rejected"
|
||||
BulkRejectedMetricKey = "bulk_rejected"
|
||||
FlushRejectedMetricKey = "flush_rejected"
|
||||
WriteRejectedMetricKey = "write_rejected"
|
||||
SearchThreadsMetricKey = "search_threads"
|
||||
IndexThreadsMetricKey = "index_threads"
|
||||
BulkThreadsMetricKey = "bulk_threads"
|
||||
FlushThreadsMetricKey = "flush_threads"
|
||||
RefreshThreadsMetricKey = "refresh_threads"
|
||||
WriteThreadsMetricKey = "write_threads"
|
||||
ForceMergeThreadsMetricKey = "force_merge_threads"
|
||||
SearchQueueMetricKey = "search_queue"
|
||||
IndexQueueMetricKey = "index_queue"
|
||||
BulkQueueMetricKey = "bulk_queue"
|
||||
FlushQueueMetricKey = "flush_queue"
|
||||
RefreshQueueMetricKey = "refresh_queue"
|
||||
WriteQueueMetricKey = "write_queue"
|
||||
SearchActiveMetricKey = "search_active"
|
||||
IndexActiveMetricKey = "index_active"
|
||||
BulkActiveMetricKey = "bulk_active"
|
||||
FlushActiveMetricKey = "flush_active"
|
||||
WriteActiveMetricKey = "write_active"
|
||||
ForceMergeActiveMetricKey = "force_merge_active"
|
||||
SearchRejectedMetricKey = "search_rejected"
|
||||
IndexRejectedMetricKey = "index_rejected"
|
||||
BulkRejectedMetricKey = "bulk_rejected"
|
||||
FlushRejectedMetricKey = "flush_rejected"
|
||||
WriteRejectedMetricKey = "write_rejected"
|
||||
ForceMergeRejectedMetricKey = "force_merge_rejected"
|
||||
GetThreadsMetricKey = "get_threads"
|
||||
GetQueueMetricKey = "get_queue"
|
||||
GetActiveMetricKey = "get_active"
|
||||
GetRejectedMetricKey = "get_rejected"
|
||||
RefreshActiveMetricKey = "refresh_active"
|
||||
RefreshRejectedMetricKey = "refresh_rejected"
|
||||
ForceMergeQueueMetricKey = "force_merge_queue"
|
||||
GetThreadsMetricKey = "get_threads"
|
||||
GetQueueMetricKey = "get_queue"
|
||||
GetActiveMetricKey = "get_active"
|
||||
GetRejectedMetricKey = "get_rejected"
|
||||
RefreshActiveMetricKey = "refresh_active"
|
||||
RefreshRejectedMetricKey = "refresh_rejected"
|
||||
ForceMergeQueueMetricKey = "force_merge_queue"
|
||||
)
|
||||
|
||||
func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error){
|
||||
func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string, bucketSize int, min, max int64, nodeName string, top int, metricKey string) (map[string]*common.MetricItem, error) {
|
||||
clusterUUID, err := h.getClusterUUID(clusterID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
var must = []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
|
@ -108,7 +108,7 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
|
|||
if nodeName != "" {
|
||||
nodeNames = strings.Split(nodeName, ",")
|
||||
top = len(nodeNames)
|
||||
}else{
|
||||
} else {
|
||||
nodeNames, err = h.getTopNodeName(clusterID, top, 15)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -131,10 +131,9 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
|
|||
},
|
||||
},
|
||||
},
|
||||
|
||||
})
|
||||
}
|
||||
should := []util.MapStr{
|
||||
should := []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
"metadata.labels.cluster_id": util.MapStr{
|
||||
|
@ -143,20 +142,20 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
|
|||
},
|
||||
},
|
||||
{
|
||||
"term":util.MapStr{
|
||||
"metadata.labels.cluster_uuid":util.MapStr{
|
||||
"term": util.MapStr{
|
||||
"metadata.labels.cluster_uuid": util.MapStr{
|
||||
"value": clusterUUID,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query:=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
query := map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": must,
|
||||
"must": must,
|
||||
"minimum_should_match": 1,
|
||||
"should": should,
|
||||
"should": should,
|
||||
"filter": []util.MapStr{
|
||||
{
|
||||
"range": util.MapStr{
|
||||
|
@ -173,159 +172,159 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
|
|||
switch metricKey {
|
||||
case SearchThreadsMetricKey:
|
||||
searchThreadsMetric := newMetricItem(SearchThreadsMetricKey, 1, ThreadPoolSearchGroupKey)
|
||||
searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
searchThreadsMetric.AddAxi("Search Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "search_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
|
||||
ID: util.GetUUID(),
|
||||
Key: "search_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: searchThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: searchThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case SearchQueueMetricKey:
|
||||
searchQueueMetric := newMetricItem(SearchQueueMetricKey, 1, ThreadPoolSearchGroupKey)
|
||||
searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
searchQueueMetric.AddAxi("Search Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "search_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
|
||||
ID: util.GetUUID(),
|
||||
Key: "search_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: searchQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: searchQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case SearchActiveMetricKey:
|
||||
searchActiveMetric := newMetricItem(SearchActiveMetricKey, 1, ThreadPoolSearchGroupKey)
|
||||
searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
searchActiveMetric.AddAxi("Search Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "search_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
|
||||
ID: util.GetUUID(),
|
||||
Key: "search_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: searchActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: searchActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case SearchRejectedMetricKey:
|
||||
searchRejectedMetric := newMetricItem(SearchRejectedMetricKey, 1, ThreadPoolSearchGroupKey)
|
||||
searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
searchRejectedMetric.AddAxi("Search Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "search_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
|
||||
ID: util.GetUUID(),
|
||||
Key: "search_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: searchRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
MetricItem: searchRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
})
|
||||
case GetThreadsMetricKey:
|
||||
getThreadsMetric := newMetricItem(GetThreadsMetricKey, 1, ThreadPoolGetGroupKey)
|
||||
getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
getThreadsMetric.AddAxi("Get Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "get_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
|
||||
ID: util.GetUUID(),
|
||||
Key: "get_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: getThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: getThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case GetQueueMetricKey:
|
||||
getQueueMetric := newMetricItem(GetQueueMetricKey, 1, ThreadPoolGetGroupKey)
|
||||
getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
getQueueMetric.AddAxi("Get Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "get_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
|
||||
ID: util.GetUUID(),
|
||||
Key: "get_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: getQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: getQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case GetActiveMetricKey:
|
||||
getActiveMetric := newMetricItem(GetActiveMetricKey, 1, ThreadPoolGetGroupKey)
|
||||
getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
getActiveMetric.AddAxi("Get Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "get_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
|
||||
ID: util.GetUUID(),
|
||||
Key: "get_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: getActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: getActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case GetRejectedMetricKey:
|
||||
getRejectedMetric := newMetricItem(GetRejectedMetricKey, 1, ThreadPoolGetGroupKey)
|
||||
getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
getRejectedMetric.AddAxi("Get Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "get_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
|
||||
ID: util.GetUUID(),
|
||||
Key: "get_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: getRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
MetricItem: getRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
})
|
||||
case FlushThreadsMetricKey:
|
||||
flushThreadsMetric := newMetricItem(FlushThreadsMetricKey, 1, ThreadPoolFlushGroupKey)
|
||||
flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
flushThreadsMetric.AddAxi("Flush Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "flush_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
|
||||
ID: util.GetUUID(),
|
||||
Key: "flush_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: flushThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: flushThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case FlushQueueMetricKey:
|
||||
flushQueueMetric := newMetricItem(FlushQueueMetricKey, 1, ThreadPoolFlushGroupKey)
|
||||
flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
flushQueueMetric.AddAxi("Get Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "flush_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
|
||||
ID: util.GetUUID(),
|
||||
Key: "flush_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: flushQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: flushQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case FlushActiveMetricKey:
|
||||
flushActiveMetric := newMetricItem(FlushActiveMetricKey, 1, ThreadPoolFlushGroupKey)
|
||||
flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
flushActiveMetric.AddAxi("Flush Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "flush_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
|
||||
ID: util.GetUUID(),
|
||||
Key: "flush_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: flushActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: flushActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
|
||||
case FlushRejectedMetricKey:
|
||||
flushRejectedMetric := newMetricItem(FlushRejectedMetricKey, 1, ThreadPoolFlushGroupKey)
|
||||
flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
flushRejectedMetric.AddAxi("Flush Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "flush_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
|
||||
ID: util.GetUUID(),
|
||||
Key: "flush_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: flushRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
MetricItem: flushRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
})
|
||||
case IndexThreadsMetricKey:
|
||||
indexThreadsMetric := newMetricItem(IndexThreadsMetricKey, 1, ThreadPoolIndexGroupKey)
|
||||
|
@ -485,137 +484,136 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
|
|||
})
|
||||
case RefreshThreadsMetricKey:
|
||||
refreshThreadsMetric := newMetricItem(RefreshThreadsMetricKey, 1, ThreadPoolRefreshGroupKey)
|
||||
refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
refreshThreadsMetric.AddAxi("Refresh Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "refresh_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
|
||||
ID: util.GetUUID(),
|
||||
Key: "refresh_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: refreshThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: refreshThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case RefreshQueueMetricKey:
|
||||
refreshQueueMetric := newMetricItem(RefreshQueueMetricKey, 1, ThreadPoolRefreshGroupKey)
|
||||
refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
refreshQueueMetric.AddAxi("Refresh Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "refresh_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
|
||||
ID: util.GetUUID(),
|
||||
Key: "refresh_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: refreshQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: refreshQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case RefreshActiveMetricKey:
|
||||
refreshActiveMetric := newMetricItem(RefreshActiveMetricKey, 1, ThreadPoolRefreshGroupKey)
|
||||
refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
refreshActiveMetric.AddAxi("Refresh Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "refresh_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
|
||||
ID: util.GetUUID(),
|
||||
Key: "refresh_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: refreshActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: refreshActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case RefreshRejectedMetricKey:
|
||||
refreshRejectedMetric := newMetricItem(RefreshRejectedMetricKey, 1, ThreadPoolRefreshGroupKey)
|
||||
refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
refreshRejectedMetric.AddAxi("Refresh Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "refresh_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
|
||||
ID: util.GetUUID(),
|
||||
Key: "refresh_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: refreshRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
MetricItem: refreshRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
})
|
||||
case ForceMergeThreadsMetricKey:
|
||||
forceMergeThreadsMetric := newMetricItem(ForceMergeThreadsMetricKey, 1, ThreadPoolForceMergeGroupKey)
|
||||
forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
forceMergeThreadsMetric.AddAxi("Force Merge Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "force_merge_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
|
||||
ID: util.GetUUID(),
|
||||
Key: "force_merge_threads",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: forceMergeThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: forceMergeThreadsMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case ForceMergeQueueMetricKey:
|
||||
forceMergeQueueMetric := newMetricItem(ForceMergeQueueMetricKey, 1, ThreadPoolForceMergeGroupKey)
|
||||
forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
forceMergeQueueMetric.AddAxi("Force Merge Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "force_merge_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
|
||||
ID: util.GetUUID(),
|
||||
Key: "force_merge_queue",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: forceMergeQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: forceMergeQueueMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case ForceMergeActiveMetricKey:
|
||||
forceMergeActiveMetric := newMetricItem(ForceMergeActiveMetricKey, 1, ThreadPoolForceMergeGroupKey)
|
||||
forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
forceMergeActiveMetric.AddAxi("Force Merge Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "force_merge_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
|
||||
ID: util.GetUUID(),
|
||||
Key: "force_merge_active",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: forceMergeActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: forceMergeActiveMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case ForceMergeRejectedMetricKey:
|
||||
forceMergeRejectedMetric := newMetricItem(ForceMergeRejectedMetricKey, 1, ThreadPoolForceMergeGroupKey)
|
||||
forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
|
||||
queueMetricItems = append(queueMetricItems, GroupMetricItem{
|
||||
Key: "force_merge_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
|
||||
ID: util.GetUUID(),
|
||||
Key: "force_merge_rejected",
|
||||
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: forceMergeRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
MetricItem: forceMergeRejectedMetric,
|
||||
FormatType: "num",
|
||||
Units: "rejected/s",
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
//Get Thread Pool queue
|
||||
aggs:=map[string]interface{}{}
|
||||
aggs := map[string]interface{}{}
|
||||
|
||||
for _,metricItem:=range queueMetricItems{
|
||||
aggs[metricItem.ID]=util.MapStr{
|
||||
"max":util.MapStr{
|
||||
for _, metricItem := range queueMetricItems {
|
||||
aggs[metricItem.ID] = util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field,
|
||||
},
|
||||
}
|
||||
if metricItem.Field2 != "" {
|
||||
aggs[metricItem.ID + "_field2"]=util.MapStr{
|
||||
"max":util.MapStr{
|
||||
aggs[metricItem.ID+"_field2"] = util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field2,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if metricItem.IsDerivative{
|
||||
aggs[metricItem.ID+"_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
if metricItem.IsDerivative {
|
||||
aggs[metricItem.ID+"_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID,
|
||||
},
|
||||
}
|
||||
if metricItem.Field2 != "" {
|
||||
aggs[metricItem.ID + "_field2_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
aggs[metricItem.ID+"_field2_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID + "_field2",
|
||||
},
|
||||
}
|
||||
|
@ -628,8 +626,8 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
|
|||
panic(err)
|
||||
}
|
||||
|
||||
query["size"]=0
|
||||
query["aggs"]= util.MapStr{
|
||||
query["size"] = 0
|
||||
query["aggs"] = util.MapStr{
|
||||
"group_by_level": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.transport_address",
|
||||
|
@ -637,11 +635,11 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
|
|||
},
|
||||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram":util.MapStr{
|
||||
"field": "timestamp",
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs":aggs,
|
||||
"aggs": aggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -38,10 +38,9 @@ import (
|
|||
)
|
||||
|
||||
func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
targetClusterID := ps.ByName("id")
|
||||
exists,client,err:=h.GetClusterClient(targetClusterID)
|
||||
exists, client, err := h.GetClusterClient(targetClusterID)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -50,16 +49,14 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *
|
|||
return
|
||||
}
|
||||
|
||||
if !exists{
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
|
||||
if !exists {
|
||||
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
var traceReq = &elastic.TraceTemplate{
|
||||
|
||||
}
|
||||
var traceReq = &elastic.TraceTemplate{}
|
||||
|
||||
err = h.DecodeJSON(req, traceReq)
|
||||
if err != nil {
|
||||
|
@ -84,22 +81,21 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *
|
|||
resBody["_id"] = insertRes.ID
|
||||
resBody["result"] = insertRes.Result
|
||||
|
||||
h.WriteJSON(w, resBody,http.StatusOK)
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string] interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
var (
|
||||
name = h.GetParameterOrDefault(req, "name", "")
|
||||
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
name = h.GetParameterOrDefault(req, "name", "")
|
||||
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
mustBuilder = &strings.Builder{}
|
||||
)
|
||||
targetClusterID := ps.ByName("id")
|
||||
mustBuilder.WriteString(fmt.Sprintf(`{"term":{"cluster_id":{"value": "%s"}}}`, targetClusterID))
|
||||
if name != ""{
|
||||
if name != "" {
|
||||
mustBuilder.WriteString(fmt.Sprintf(`,{"prefix":{"name": "%s"}}`, name))
|
||||
}
|
||||
size, _ := strconv.Atoi(strSize)
|
||||
|
@ -126,8 +122,7 @@ func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req
|
|||
}
|
||||
|
||||
func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
|
||||
reqParams := elastic.TraceTemplate{}
|
||||
err := h.DecodeJSON(req, &reqParams)
|
||||
|
@ -140,7 +135,7 @@ func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *h
|
|||
reqParams.ID = ps.ByName("template_id")
|
||||
reqParams.Updated = time.Now()
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
_, err = esClient.Index(orm.GetIndexName(reqParams),"", reqParams.ID, reqParams, "wait_for")
|
||||
_, err = esClient.Index(orm.GetIndexName(reqParams), "", reqParams.ID, reqParams, "wait_for")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
|
@ -152,11 +147,11 @@ func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *h
|
|||
resBody["result"] = "updated"
|
||||
resBody["_source"] = reqParams
|
||||
|
||||
h.WriteJSON(w, resBody,http.StatusOK)
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
|
||||
resBody := map[string] interface{}{}
|
||||
func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{}
|
||||
|
||||
id := ps.ByName("template_id")
|
||||
indexName := orm.GetIndexName(elastic.TraceTemplate{})
|
||||
|
@ -166,7 +161,7 @@ func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *ht
|
|||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
h.WriteJSON(w,getResponse, getResponse.StatusCode)
|
||||
h.WriteJSON(w, getResponse, getResponse.StatusCode)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
@ -177,9 +172,9 @@ func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req
|
|||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
if delRes!=nil{
|
||||
if delRes != nil {
|
||||
h.WriteJSON(w, resBody, delRes.StatusCode)
|
||||
}else{
|
||||
} else {
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
|
|||
}
|
||||
histgram := common.NewBucketItem(
|
||||
common.DateHistogramBucket, util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
})
|
||||
histgram.AddMetricItems(metricItems...)
|
||||
|
@ -669,8 +669,8 @@ type RealtimeNodeInfo struct {
|
|||
|
||||
func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map[string]util.MapStr, error) {
|
||||
ver := h.Client().GetVersion()
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
bucketSizeStr := fmt.Sprintf("%ds", bucketSizeInSeconds)
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(ver.Distribution, ver.Number, bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -685,7 +685,7 @@ func (h *APIHandler) getIndexQPS(clusterID string, bucketSizeInSeconds int) (map
|
|||
"aggs": util.MapStr{
|
||||
"date": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: "10s",
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
|
@ -775,9 +775,9 @@ func (h *APIHandler) QueryQPS(query util.MapStr, bucketSizeInSeconds int) (map[s
|
|||
maxIndexRate float64
|
||||
maxQueryRate float64
|
||||
maxIndexBytesRate float64
|
||||
preIndexTotal float64
|
||||
dropNext bool
|
||||
maxTimestamp float64
|
||||
preIndexTotal float64
|
||||
dropNext bool
|
||||
maxTimestamp float64
|
||||
)
|
||||
for _, dateBk := range bks {
|
||||
if dateBkVal, ok := dateBk.(map[string]interface{}); ok {
|
||||
|
@ -786,11 +786,11 @@ func (h *APIHandler) QueryQPS(query util.MapStr, bucketSizeInSeconds int) (map[s
|
|||
if preIndexTotal > 0 {
|
||||
//if value of indexTotal is decreasing, drop the next value,
|
||||
//and we will drop current and next qps value
|
||||
if indexTotalVal - preIndexTotal < 0 {
|
||||
if indexTotalVal-preIndexTotal < 0 {
|
||||
dropNext = true
|
||||
preIndexTotal = indexTotalVal
|
||||
continue
|
||||
}else{
|
||||
} else {
|
||||
dropNext = false
|
||||
}
|
||||
}
|
||||
|
@ -866,11 +866,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
|
|||
{
|
||||
"match": util.MapStr{
|
||||
reqBody.SearchField: util.MapStr{
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -912,11 +912,11 @@ func (h *APIHandler) SearchClusterMetadata(w http.ResponseWriter, req *http.Requ
|
|||
{
|
||||
"match": util.MapStr{
|
||||
"search_text": util.MapStr{
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
"query": reqBody.Keyword,
|
||||
"fuzziness": "AUTO",
|
||||
"max_expansions": 10,
|
||||
"prefix_length": 2,
|
||||
"boost": 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -39,54 +39,53 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
IndexStorageMetricKey = "index_storage"
|
||||
SegmentCountMetricKey = "segment_count"
|
||||
DocCountMetricKey = "doc_count"
|
||||
DocsDeletedMetricKey = "docs_deleted"
|
||||
QueryTimesMetricKey = "query_times"
|
||||
FetchTimesMetricKey = "fetch_times"
|
||||
ScrollTimesMetricKey = "scroll_times"
|
||||
MergeTimesMetricKey = "merge_times"
|
||||
RefreshTimesMetricKey = "refresh_times"
|
||||
FlushTimesMetricKey = "flush_times"
|
||||
IndexingRateMetricKey = "indexing_rate"
|
||||
IndexingBytesMetricKey = "indexing_bytes"
|
||||
IndexingLatencyMetricKey = "indexing_latency"
|
||||
QueryLatencyMetricKey = "query_latency"
|
||||
FetchLatencyMetricKey = "fetch_latency"
|
||||
MergeLatencyMetricKey = "merge_latency"
|
||||
RefreshLatencyMetricKey = "refresh_latency"
|
||||
ScrollLatencyMetricKey = "scroll_latency"
|
||||
FlushLatencyMetricKey = "flush_latency"
|
||||
QueryCacheMetricKey = "query_cache"
|
||||
RequestCacheMetricKey = "request_cache"
|
||||
RequestCacheHitMetricKey = "request_cache_hit"
|
||||
RequestCacheMissMetricKey = "request_cache_miss"
|
||||
QueryCacheCountMetricKey = "query_cache_count"
|
||||
QueryCacheHitMetricKey = "query_cache_hit"
|
||||
QueryCacheMissMetricKey = "query_cache_miss"
|
||||
FielddataCacheMetricKey = "fielddata_cache"
|
||||
SegmentMemoryMetricKey = "segment_memory"
|
||||
SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory"
|
||||
SegmentTermsMemoryMetricKey = "segment_terms_memory"
|
||||
SegmentFieldsMemoryMetricKey = "segment_fields_memory"
|
||||
IndexStorageMetricKey = "index_storage"
|
||||
SegmentCountMetricKey = "segment_count"
|
||||
DocCountMetricKey = "doc_count"
|
||||
DocsDeletedMetricKey = "docs_deleted"
|
||||
QueryTimesMetricKey = "query_times"
|
||||
FetchTimesMetricKey = "fetch_times"
|
||||
ScrollTimesMetricKey = "scroll_times"
|
||||
MergeTimesMetricKey = "merge_times"
|
||||
RefreshTimesMetricKey = "refresh_times"
|
||||
FlushTimesMetricKey = "flush_times"
|
||||
IndexingRateMetricKey = "indexing_rate"
|
||||
IndexingBytesMetricKey = "indexing_bytes"
|
||||
IndexingLatencyMetricKey = "indexing_latency"
|
||||
QueryLatencyMetricKey = "query_latency"
|
||||
FetchLatencyMetricKey = "fetch_latency"
|
||||
MergeLatencyMetricKey = "merge_latency"
|
||||
RefreshLatencyMetricKey = "refresh_latency"
|
||||
ScrollLatencyMetricKey = "scroll_latency"
|
||||
FlushLatencyMetricKey = "flush_latency"
|
||||
QueryCacheMetricKey = "query_cache"
|
||||
RequestCacheMetricKey = "request_cache"
|
||||
RequestCacheHitMetricKey = "request_cache_hit"
|
||||
RequestCacheMissMetricKey = "request_cache_miss"
|
||||
QueryCacheCountMetricKey = "query_cache_count"
|
||||
QueryCacheHitMetricKey = "query_cache_hit"
|
||||
QueryCacheMissMetricKey = "query_cache_miss"
|
||||
FielddataCacheMetricKey = "fielddata_cache"
|
||||
SegmentMemoryMetricKey = "segment_memory"
|
||||
SegmentDocValuesMemoryMetricKey = "segment_doc_values_memory"
|
||||
SegmentTermsMemoryMetricKey = "segment_terms_memory"
|
||||
SegmentFieldsMemoryMetricKey = "segment_fields_memory"
|
||||
SegmentIndexWriterMemoryMetricKey = "segment_index_writer_memory"
|
||||
SegmentTermVectorsMemoryMetricKey = "segment_term_vectors_memory"
|
||||
DocPercentMetricKey = "doc_percent"
|
||||
SegmentNormsMetricKey = "segment_norms_memory"
|
||||
SegmentPointsMetricKey = "segment_points_memory"
|
||||
VersionMapMetricKey = "segment_version_map"
|
||||
FixedBitSetMetricKey = "segment_fixed_bit_set"
|
||||
|
||||
DocPercentMetricKey = "doc_percent"
|
||||
SegmentNormsMetricKey = "segment_norms_memory"
|
||||
SegmentPointsMetricKey = "segment_points_memory"
|
||||
VersionMapMetricKey = "segment_version_map"
|
||||
FixedBitSetMetricKey = "segment_fixed_bit_set"
|
||||
)
|
||||
|
||||
func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) (map[string]*common.MetricItem, error){
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) (map[string]*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
|
||||
var must = []util.MapStr{
|
||||
{
|
||||
"term":util.MapStr{
|
||||
"metadata.labels.cluster_id":util.MapStr{
|
||||
"term": util.MapStr{
|
||||
"metadata.labels.cluster_id": util.MapStr{
|
||||
"value": clusterID,
|
||||
},
|
||||
},
|
||||
|
@ -108,7 +107,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
}
|
||||
var (
|
||||
indexNames []string
|
||||
err error
|
||||
err error
|
||||
)
|
||||
if indexName != "" {
|
||||
indexNames = strings.Split(indexName, ",")
|
||||
|
@ -116,11 +115,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
if !hasAllPrivilege && len(allowedIndices) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if !hasAllPrivilege{
|
||||
if !hasAllPrivilege {
|
||||
namePattern := radix.Compile(allowedIndices...)
|
||||
var filterNames []string
|
||||
for _, name := range indexNames {
|
||||
if namePattern.Match(name){
|
||||
if namePattern.Match(name) {
|
||||
filterNames = append(filterNames, name)
|
||||
}
|
||||
}
|
||||
|
@ -131,7 +130,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
}
|
||||
top = len(indexNames)
|
||||
|
||||
}else{
|
||||
} else {
|
||||
indexNames, err = h.getTopIndexName(req, clusterID, top, 15)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -146,8 +145,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
})
|
||||
}
|
||||
|
||||
query:=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
query := map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": must,
|
||||
"must_not": []util.MapStr{
|
||||
|
@ -295,32 +294,32 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
case FlushTimesMetricKey:
|
||||
//flush 次数
|
||||
flushTimesMetric := newMetricItem(FlushTimesMetricKey, 6, OperationGroupKey)
|
||||
flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
flushTimesMetric.AddAxi("flush times", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "flush_times",
|
||||
Field: "payload.elasticsearch.index_stats.total.flush.total",
|
||||
ID: util.GetUUID(),
|
||||
Key: "flush_times",
|
||||
Field: "payload.elasticsearch.index_stats.total.flush.total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: flushTimesMetric,
|
||||
FormatType: "num",
|
||||
Units: "requests/s",
|
||||
MetricItem: flushTimesMetric,
|
||||
FormatType: "num",
|
||||
Units: "requests/s",
|
||||
})
|
||||
case IndexingRateMetricKey:
|
||||
//写入速率
|
||||
indexingRateMetric := newMetricItem(IndexingRateMetricKey, 1, OperationGroupKey)
|
||||
indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
indexingRateMetric.AddAxi("Indexing rate", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "indexing_rate",
|
||||
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
Key: "indexing_rate",
|
||||
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: indexingRateMetric,
|
||||
FormatType: "num",
|
||||
Units: "doc/s",
|
||||
MetricItem: indexingRateMetric,
|
||||
FormatType: "num",
|
||||
Units: "doc/s",
|
||||
})
|
||||
case IndexingBytesMetricKey:
|
||||
indexingBytesMetric := newMetricItem(IndexingBytesMetricKey, 2, OperationGroupKey)
|
||||
indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
indexingBytesMetric.AddAxi("Indexing bytes", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "indexing_bytes",
|
||||
Field: "payload.elasticsearch.index_stats.primaries.store.size_in_bytes",
|
||||
|
@ -333,382 +332,381 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
case IndexingLatencyMetricKey:
|
||||
//写入时延
|
||||
indexingLatencyMetric := newMetricItem(IndexingLatencyMetricKey, 1, LatencyGroupKey)
|
||||
indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
indexingLatencyMetric.AddAxi("Indexing latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "indexing_latency",
|
||||
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis",
|
||||
Key: "indexing_latency",
|
||||
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis",
|
||||
Field2: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
|
||||
Calc: func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
},
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: indexingLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
MetricItem: indexingLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
})
|
||||
case QueryLatencyMetricKey:
|
||||
//查询时延
|
||||
queryLatencyMetric := newMetricItem(QueryLatencyMetricKey, 2, LatencyGroupKey)
|
||||
queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
queryLatencyMetric.AddAxi("Query latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis",
|
||||
Key: "query_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis",
|
||||
Field2: "payload.elasticsearch.index_stats.total.search.query_total",
|
||||
Calc: func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
},
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
MetricItem: queryLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
})
|
||||
case FetchLatencyMetricKey:
|
||||
//fetch时延
|
||||
fetchLatencyMetric := newMetricItem(FetchLatencyMetricKey, 3, LatencyGroupKey)
|
||||
fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
fetchLatencyMetric.AddAxi("Fetch latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "fetch_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis",
|
||||
Key: "fetch_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis",
|
||||
Field2: "payload.elasticsearch.index_stats.total.search.fetch_total",
|
||||
Calc: func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
},
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: fetchLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
MetricItem: fetchLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
})
|
||||
case MergeLatencyMetricKey:
|
||||
//merge时延
|
||||
mergeLatencyMetric := newMetricItem(MergeLatencyMetricKey, 7, LatencyGroupKey)
|
||||
mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
mergeLatencyMetric.AddAxi("Merge latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "merge_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis",
|
||||
Key: "merge_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis",
|
||||
Field2: "payload.elasticsearch.index_stats.total.merges.total",
|
||||
Calc: func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
},
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: mergeLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
MetricItem: mergeLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
})
|
||||
case RefreshLatencyMetricKey:
|
||||
|
||||
//refresh时延
|
||||
refreshLatencyMetric := newMetricItem(RefreshLatencyMetricKey, 5, LatencyGroupKey)
|
||||
refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
refreshLatencyMetric.AddAxi("Refresh latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "refresh_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis",
|
||||
Key: "refresh_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis",
|
||||
Field2: "payload.elasticsearch.index_stats.total.refresh.total",
|
||||
Calc: func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
},
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: refreshLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
MetricItem: refreshLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
})
|
||||
case ScrollLatencyMetricKey:
|
||||
//scroll时延
|
||||
scrollLatencyMetric := newMetricItem(ScrollLatencyMetricKey, 4, LatencyGroupKey)
|
||||
scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
scrollLatencyMetric.AddAxi("Scroll Latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "scroll_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis",
|
||||
Key: "scroll_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis",
|
||||
Field2: "payload.elasticsearch.index_stats.total.search.scroll_total",
|
||||
Calc: func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
},
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: scrollLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
MetricItem: scrollLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
})
|
||||
case FlushLatencyMetricKey:
|
||||
//flush 时延
|
||||
flushLatencyMetric := newMetricItem(FlushLatencyMetricKey, 6, LatencyGroupKey)
|
||||
flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
|
||||
flushLatencyMetric.AddAxi("Flush latency", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "flush_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis",
|
||||
Key: "flush_latency",
|
||||
Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis",
|
||||
Field2: "payload.elasticsearch.index_stats.total.flush.total",
|
||||
Calc: func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
},
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: flushLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
MetricItem: flushLatencyMetric,
|
||||
FormatType: "num",
|
||||
Units: "ms",
|
||||
})
|
||||
case QueryCacheMetricKey:
|
||||
//queryCache
|
||||
queryCacheMetric := newMetricItem(QueryCacheMetricKey, 1, CacheGroupKey)
|
||||
queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
queryCacheMetric.AddAxi("Query cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_cache",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: "query_cache",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: queryCacheMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: queryCacheMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case RequestCacheMetricKey:
|
||||
//requestCache
|
||||
requestCacheMetric := newMetricItem(RequestCacheMetricKey, 2, CacheGroupKey)
|
||||
requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
requestCacheMetric.AddAxi("request cache", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "request_cache",
|
||||
Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: "request_cache",
|
||||
Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: requestCacheMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: requestCacheMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case RequestCacheHitMetricKey:
|
||||
// Request Cache Hit
|
||||
requestCacheHitMetric:=newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey)
|
||||
requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "request_cache_hit",
|
||||
Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count",
|
||||
ID: util.GetUUID(),
|
||||
requestCacheHitMetric := newMetricItem(RequestCacheHitMetricKey, 6, CacheGroupKey)
|
||||
requestCacheHitMetric.AddAxi("request cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "request_cache_hit",
|
||||
Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: requestCacheHitMetric,
|
||||
FormatType: "num",
|
||||
Units: "hits",
|
||||
MetricItem: requestCacheHitMetric,
|
||||
FormatType: "num",
|
||||
Units: "hits",
|
||||
})
|
||||
case RequestCacheMissMetricKey:
|
||||
// Request Cache Miss
|
||||
requestCacheMissMetric:=newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey)
|
||||
requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "request_cache_miss",
|
||||
Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count",
|
||||
ID: util.GetUUID(),
|
||||
requestCacheMissMetric := newMetricItem(RequestCacheMissMetricKey, 8, CacheGroupKey)
|
||||
requestCacheMissMetric.AddAxi("request cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "request_cache_miss",
|
||||
Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: requestCacheMissMetric,
|
||||
FormatType: "num",
|
||||
Units: "misses",
|
||||
MetricItem: requestCacheMissMetric,
|
||||
FormatType: "num",
|
||||
Units: "misses",
|
||||
})
|
||||
case QueryCacheCountMetricKey:
|
||||
// Query Cache Count
|
||||
queryCacheCountMetric:=newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey)
|
||||
queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_cache_count",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count",
|
||||
ID: util.GetUUID(),
|
||||
queryCacheCountMetric := newMetricItem(QueryCacheCountMetricKey, 4, CacheGroupKey)
|
||||
queryCacheCountMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_cache_count",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryCacheCountMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
MetricItem: queryCacheCountMetric,
|
||||
FormatType: "num",
|
||||
Units: "",
|
||||
})
|
||||
case QueryCacheHitMetricKey:
|
||||
// Query Cache Miss
|
||||
queryCacheHitMetric:=newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey)
|
||||
queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_cache_hit",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count",
|
||||
ID: util.GetUUID(),
|
||||
queryCacheHitMetric := newMetricItem(QueryCacheHitMetricKey, 5, CacheGroupKey)
|
||||
queryCacheHitMetric.AddAxi("query cache hit", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_cache_hit",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryCacheHitMetric,
|
||||
FormatType: "num",
|
||||
Units: "hits",
|
||||
MetricItem: queryCacheHitMetric,
|
||||
FormatType: "num",
|
||||
Units: "hits",
|
||||
})
|
||||
case QueryCacheMissMetricKey:
|
||||
// Query Cache Miss
|
||||
queryCacheMissMetric:=newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey)
|
||||
queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_cache_miss",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count",
|
||||
ID: util.GetUUID(),
|
||||
queryCacheMissMetric := newMetricItem(QueryCacheMissMetricKey, 7, CacheGroupKey)
|
||||
queryCacheMissMetric.AddAxi("query cache miss", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "query_cache_miss",
|
||||
Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryCacheMissMetric,
|
||||
FormatType: "num",
|
||||
Units: "misses",
|
||||
MetricItem: queryCacheMissMetric,
|
||||
FormatType: "num",
|
||||
Units: "misses",
|
||||
})
|
||||
case FielddataCacheMetricKey:
|
||||
// Fielddata内存占用大小
|
||||
fieldDataCacheMetric:=newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey)
|
||||
fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "fielddata_cache",
|
||||
Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
fieldDataCacheMetric := newMetricItem(FielddataCacheMetricKey, 3, CacheGroupKey)
|
||||
fieldDataCacheMetric.AddAxi("FieldData Cache", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "fielddata_cache",
|
||||
Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: fieldDataCacheMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: fieldDataCacheMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentMemoryMetricKey:
|
||||
//segment memory
|
||||
segmentMemoryMetric := newMetricItem(SegmentMemoryMetricKey, 13, MemoryGroupKey)
|
||||
segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
segmentMemoryMetric.AddAxi("Segment memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: "segment_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: segmentMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: segmentMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentDocValuesMemoryMetricKey:
|
||||
//segment doc values memory
|
||||
docValuesMemoryMetric := newMetricItem(SegmentDocValuesMemoryMetricKey, 13, MemoryGroupKey)
|
||||
docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
docValuesMemoryMetric.AddAxi("Segment Doc values Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_doc_values_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: "segment_doc_values_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: docValuesMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: docValuesMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentTermsMemoryMetricKey:
|
||||
//segment terms memory
|
||||
termsMemoryMetric := newMetricItem(SegmentTermsMemoryMetricKey, 13, MemoryGroupKey)
|
||||
termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
termsMemoryMetric.AddAxi("Segment Terms Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_terms_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: "segment_terms_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: termsMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: termsMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentFieldsMemoryMetricKey:
|
||||
//segment fields memory
|
||||
fieldsMemoryMetric := newMetricItem(SegmentFieldsMemoryMetricKey, 13, MemoryGroupKey)
|
||||
fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
fieldsMemoryMetric.AddAxi("Segment Fields Memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_fields_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: "segment_fields_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: fieldsMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: fieldsMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentIndexWriterMemoryMetricKey:
|
||||
// segment index writer memory
|
||||
segmentIndexWriterMemoryMetric:=newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
|
||||
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_index_writer_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
segmentIndexWriterMemoryMetric := newMetricItem(SegmentIndexWriterMemoryMetricKey, 16, MemoryGroupKey)
|
||||
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_index_writer_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: segmentIndexWriterMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: segmentIndexWriterMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentTermVectorsMemoryMetricKey:
|
||||
// segment term vectors memory
|
||||
segmentTermVectorsMemoryMetric:=newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
|
||||
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
|
||||
indexMetricItems=append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_term_vectors_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
segmentTermVectorsMemoryMetric := newMetricItem(SegmentTermVectorsMemoryMetricKey, 16, MemoryGroupKey)
|
||||
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory", "group1", common.PositionLeft, "bytes", "0,0", "0,0.[00]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: "segment_term_vectors_memory",
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: segmentTermVectorsMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: segmentTermVectorsMemoryMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentNormsMetricKey:
|
||||
segmentNormsMetric := newMetricItem(SegmentNormsMetricKey, 17, MemoryGroupKey)
|
||||
segmentNormsMetric.AddAxi("Segment norms memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
segmentNormsMetric.AddAxi("Segment norms memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: SegmentNormsMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.norms_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: SegmentNormsMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.norms_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: segmentNormsMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: segmentNormsMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case SegmentPointsMetricKey:
|
||||
segmentPointsMetric := newMetricItem(SegmentPointsMetricKey, 18, MemoryGroupKey)
|
||||
segmentPointsMetric.AddAxi("Segment points memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
segmentPointsMetric.AddAxi("Segment points memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: SegmentPointsMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.points_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: SegmentPointsMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.points_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: segmentPointsMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: segmentPointsMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case VersionMapMetricKey:
|
||||
segmentVersionMapMetric := newMetricItem(VersionMapMetricKey, 18, MemoryGroupKey)
|
||||
segmentVersionMapMetric.AddAxi("Segment version map memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
segmentVersionMapMetric.AddAxi("Segment version map memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: VersionMapMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.version_map_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: VersionMapMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.version_map_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: segmentVersionMapMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: segmentVersionMapMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
case FixedBitSetMetricKey:
|
||||
segmentFixedBitSetMetric := newMetricItem(FixedBitSetMetricKey, 18, MemoryGroupKey)
|
||||
segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
segmentFixedBitSetMetric.AddAxi("Segment fixed bit set memory", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
indexMetricItems = append(indexMetricItems, GroupMetricItem{
|
||||
Key: FixedBitSetMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.fixed_bit_set_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
Key: FixedBitSetMetricKey,
|
||||
Field: "payload.elasticsearch.index_stats.total.segments.fixed_bit_set_memory_in_bytes",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: false,
|
||||
MetricItem: segmentFixedBitSetMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
MetricItem: segmentFixedBitSetMetric,
|
||||
FormatType: "bytes",
|
||||
Units: "",
|
||||
})
|
||||
}
|
||||
|
||||
aggs := map[string]interface{}{}
|
||||
|
||||
aggs:=map[string]interface{}{}
|
||||
|
||||
for _,metricItem:=range indexMetricItems {
|
||||
aggs[metricItem.ID]=util.MapStr{
|
||||
"max":util.MapStr{
|
||||
for _, metricItem := range indexMetricItems {
|
||||
aggs[metricItem.ID] = util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field,
|
||||
},
|
||||
}
|
||||
|
||||
if metricItem.Field2 != ""{
|
||||
aggs[metricItem.ID + "_field2"]=util.MapStr{
|
||||
"max":util.MapStr{
|
||||
if metricItem.Field2 != "" {
|
||||
aggs[metricItem.ID+"_field2"] = util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field2,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if metricItem.IsDerivative{
|
||||
aggs[metricItem.ID+"_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
if metricItem.IsDerivative {
|
||||
aggs[metricItem.ID+"_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID,
|
||||
},
|
||||
}
|
||||
if metricItem.Field2 != "" {
|
||||
aggs[metricItem.ID + "_deriv_field2"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
aggs[metricItem.ID+"_deriv_field2"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID + "_field2",
|
||||
},
|
||||
}
|
||||
|
@ -720,8 +718,8 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
return nil, err
|
||||
}
|
||||
|
||||
query["size"]=0
|
||||
query["aggs"]= util.MapStr{
|
||||
query["size"] = 0
|
||||
query["aggs"] = util.MapStr{
|
||||
"group_by_level": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.index_name",
|
||||
|
@ -732,11 +730,11 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
},
|
||||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram":util.MapStr{
|
||||
"field": "timestamp",
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs":aggs,
|
||||
"aggs": aggs,
|
||||
},
|
||||
"max_store": util.MapStr{
|
||||
"max": util.MapStr{
|
||||
|
@ -750,7 +748,7 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
|
|||
|
||||
}
|
||||
|
||||
func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error){
|
||||
func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error) {
|
||||
ver := h.Client().GetVersion()
|
||||
cr, _ := util.VersionCompare(ver.Number, "6.1")
|
||||
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 {
|
||||
|
@ -758,8 +756,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
|
|||
}
|
||||
var (
|
||||
now = time.Now()
|
||||
max = now.UnixNano()/1e6
|
||||
min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6
|
||||
max = now.UnixNano() / 1e6
|
||||
min = now.Add(-time.Duration(lastMinutes)*time.Minute).UnixNano() / 1e6
|
||||
)
|
||||
var must = []util.MapStr{
|
||||
{
|
||||
|
@ -791,8 +789,8 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
|
|||
if !hasAllPrivilege {
|
||||
must = append(must, util.MapStr{
|
||||
"query_string": util.MapStr{
|
||||
"query": strings.Join(allowedIndices, " "),
|
||||
"fields": []string{"metadata.labels.index_name"},
|
||||
"query": strings.Join(allowedIndices, " "),
|
||||
"fields": []string{"metadata.labels.index_name"},
|
||||
"default_operator": "OR",
|
||||
},
|
||||
})
|
||||
|
@ -850,7 +848,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
|
|||
},
|
||||
"dates": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
|
@ -889,7 +887,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
|
|||
},
|
||||
"dates": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
|
@ -909,20 +907,20 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
|
|||
},
|
||||
},
|
||||
}
|
||||
response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query))
|
||||
if err!=nil{
|
||||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
var maxQpsKVS = map[string] float64{}
|
||||
var maxQpsKVS = map[string]float64{}
|
||||
for _, agg := range response.Aggregations {
|
||||
for _, bk := range agg.Buckets {
|
||||
key := bk["key"].(string)
|
||||
if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok {
|
||||
val := maxQps["value"].(float64)
|
||||
if _, ok = maxQpsKVS[key] ; ok {
|
||||
if _, ok = maxQpsKVS[key]; ok {
|
||||
maxQpsKVS[key] = maxQpsKVS[key] + val
|
||||
}else{
|
||||
} else {
|
||||
maxQpsKVS[key] = val
|
||||
}
|
||||
}
|
||||
|
@ -943,7 +941,7 @@ func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top in
|
|||
length = len(qpsValues)
|
||||
}
|
||||
indexNames := []string{}
|
||||
for i := 0; i <length; i++ {
|
||||
for i := 0; i < length; i++ {
|
||||
indexNames = append(indexNames, qpsValues[i].Key)
|
||||
}
|
||||
return indexNames, nil
|
||||
|
@ -954,12 +952,13 @@ type TopTerm struct {
|
|||
Value float64
|
||||
}
|
||||
type TopTermOrder []TopTerm
|
||||
func (t TopTermOrder) Len() int{
|
||||
|
||||
func (t TopTermOrder) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
func (t TopTermOrder) Less(i, j int) bool{
|
||||
func (t TopTermOrder) Less(i, j int) bool {
|
||||
return t[i].Value > t[j].Value //desc
|
||||
}
|
||||
func (t TopTermOrder) Swap(i, j int){
|
||||
func (t TopTermOrder) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
for _, v := range results.Result {
|
||||
result, ok := v.(map[string]interface{})
|
||||
if ok {
|
||||
if indexID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "index_id"}, result); ok {
|
||||
if indexID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "index_id"}, result); ok {
|
||||
summary := map[string]interface{}{}
|
||||
if docs, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_stats", "total", "docs"}, result); ok {
|
||||
if docsM, ok := docs.(map[string]interface{}); ok {
|
||||
|
@ -97,7 +97,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
if sinfo, ok := shardInfo.([]interface{}); ok {
|
||||
unassignedCount := 0
|
||||
for _, item := range sinfo {
|
||||
if itemMap, ok := item.(map[string]interface{}); ok{
|
||||
if itemMap, ok := item.(map[string]interface{}); ok {
|
||||
if itemMap["state"] == "UNASSIGNED" {
|
||||
unassignedCount++
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
return
|
||||
}
|
||||
firstClusterID, firstIndexName = parts[0], parts[1]
|
||||
}else{
|
||||
} else {
|
||||
h.WriteError(w, fmt.Sprintf("invalid index_id: %v", indexID), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
@ -137,35 +137,35 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
}
|
||||
var metricLen = 15
|
||||
// 索引速率
|
||||
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems := []GroupMetricItem{}
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
})
|
||||
queryMetric:=newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
queryMetric := newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.index_stats.total.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
})
|
||||
|
||||
aggs:=map[string]interface{}{}
|
||||
query :=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
aggs := map[string]interface{}{}
|
||||
query := map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
"metadata.category": util.MapStr{
|
||||
|
@ -190,7 +190,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen * bucketSize),
|
||||
"gte": fmt.Sprintf("now-%ds", metricLen*bucketSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -198,15 +198,15 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
},
|
||||
}
|
||||
|
||||
for _,metricItem:=range nodeMetricItems{
|
||||
aggs[metricItem.ID]=util.MapStr{
|
||||
"max":util.MapStr{
|
||||
for _, metricItem := range nodeMetricItems {
|
||||
aggs[metricItem.ID] = util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field,
|
||||
},
|
||||
}
|
||||
if metricItem.IsDerivative{
|
||||
aggs[metricItem.ID+"_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
if metricItem.IsDerivative {
|
||||
aggs[metricItem.ID+"_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID,
|
||||
},
|
||||
}
|
||||
|
@ -218,8 +218,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
query["size"]=0
|
||||
query["aggs"]= util.MapStr{
|
||||
query["size"] = 0
|
||||
query["aggs"] = util.MapStr{
|
||||
"group_by_level": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.index_id",
|
||||
|
@ -227,11 +227,11 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
},
|
||||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram":util.MapStr{
|
||||
"field": "timestamp",
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs":aggs,
|
||||
"aggs": aggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -245,9 +245,8 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
|
|||
indexMetrics := map[string]util.MapStr{}
|
||||
for key, item := range metrics {
|
||||
for _, line := range item.Lines {
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok{
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{
|
||||
}
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok {
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{}
|
||||
}
|
||||
indexMetrics[line.Metric.Label][key] = line.Data
|
||||
}
|
||||
|
@ -292,11 +291,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
indexID := ps.MustGetParameter("index")
|
||||
parts := strings.Split(indexID, ":")
|
||||
if len(parts) > 1 && !h.IsIndexAllowed(req, clusterID, parts[1]) {
|
||||
h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
h.WriteError(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
h.WriteError(w, "invalid index id: "+ indexID, http.StatusInternalServerError)
|
||||
h.WriteError(w, "invalid index id: "+indexID, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -320,7 +319,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
return
|
||||
}
|
||||
q1 := orm.Query{
|
||||
Size: 1,
|
||||
Size: 1,
|
||||
WildcardIndex: true,
|
||||
}
|
||||
q1.Conds = orm.And(
|
||||
|
@ -340,7 +339,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
summary["aliases"] = aliases
|
||||
summary["timestamp"] = hit["timestamp"]
|
||||
summary["index_info"] = util.MapStr{
|
||||
"health":health,
|
||||
"health": health,
|
||||
"status": state,
|
||||
}
|
||||
}
|
||||
|
@ -361,11 +360,11 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
if tm, ok := result["timestamp"].(string); ok {
|
||||
issueTime, _ := time.Parse(time.RFC3339, tm)
|
||||
if time.Now().Sub(issueTime).Seconds() > 30 {
|
||||
health, _:= util.GetMapValueByKeys([]string{"metadata", "labels", "health_status"}, response.Hits.Hits[0].Source)
|
||||
health, _ := util.GetMapValueByKeys([]string{"metadata", "labels", "health_status"}, response.Hits.Hits[0].Source)
|
||||
infoM["health"] = health
|
||||
}
|
||||
}
|
||||
state, _:= util.GetMapValueByKeys([]string{"metadata", "labels", "state"}, response.Hits.Hits[0].Source)
|
||||
state, _ := util.GetMapValueByKeys([]string{"metadata", "labels", "state"}, response.Hits.Hits[0].Source)
|
||||
if state == "delete" {
|
||||
infoM["status"] = "delete"
|
||||
infoM["health"] = "N/A"
|
||||
|
@ -377,7 +376,7 @@ func (h *APIHandler) GetIndexInfo(w http.ResponseWriter, req *http.Request, ps h
|
|||
if sinfo, ok := shardInfo.([]interface{}); ok {
|
||||
unassignedCount := 0
|
||||
for _, item := range sinfo {
|
||||
if itemMap, ok := item.(map[string]interface{}); ok{
|
||||
if itemMap, ok := item.(map[string]interface{}); ok {
|
||||
if itemMap["state"] == "UNASSIGNED" {
|
||||
unassignedCount++
|
||||
}
|
||||
|
@ -398,7 +397,7 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
|
|||
clusterID := ps.MustGetParameter("id")
|
||||
indexName := ps.MustGetParameter("index")
|
||||
q1 := orm.Query{
|
||||
Size: 1,
|
||||
Size: 1,
|
||||
WildcardIndex: true,
|
||||
}
|
||||
q1.Conds = orm.And(
|
||||
|
@ -411,9 +410,9 @@ func (h *APIHandler) GetIndexShards(w http.ResponseWriter, req *http.Request, ps
|
|||
q1.AddSort("timestamp", orm.DESC)
|
||||
err, result := orm.Search(&event.Event{}, &q1)
|
||||
if err != nil {
|
||||
h.WriteJSON(w,util.MapStr{
|
||||
h.WriteJSON(w, util.MapStr{
|
||||
"error": err.Error(),
|
||||
}, http.StatusInternalServerError )
|
||||
}, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var shardInfo interface{} = []interface{}{}
|
||||
|
@ -512,7 +511,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
log.Error(err)
|
||||
}
|
||||
metrics["index_health"] = healthMetric
|
||||
}else {
|
||||
} else {
|
||||
switch metricKey {
|
||||
case IndexThroughputMetricKey:
|
||||
metricItem := newMetricItem("index_throughput", 1, OperationGroupKey)
|
||||
|
@ -582,7 +581,7 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
minBucketSize, err := GetMetricMinBucketSize(clusterID, MetricTypeIndexStats)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[metricKey].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -591,8 +590,8 @@ func (h *APIHandler) GetSingleIndexMetrics(w http.ResponseWriter, req *http.Requ
|
|||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int)(*common.MetricItem, error){
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName string, min, max int64, bucketSize int) (*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -645,14 +644,14 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str
|
|||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"group_status": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "payload.elasticsearch.index_health.status",
|
||||
"size": 5,
|
||||
"size": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -666,8 +665,8 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str
|
|||
return nil, err
|
||||
}
|
||||
|
||||
metricItem:=newMetricItem("index_health", 1, "")
|
||||
metricItem.AddLine("health","Health","","group1","payload.elasticsearch.index_health.status","max",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false)
|
||||
metricItem := newMetricItem("index_health", 1, "")
|
||||
metricItem.AddLine("health", "Health", "", "group1", "payload.elasticsearch.index_health.status", "max", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false)
|
||||
|
||||
metricData := []interface{}{}
|
||||
if response.StatusCode == 200 {
|
||||
|
@ -683,8 +682,7 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str
|
|||
return metricItem, nil
|
||||
}
|
||||
|
||||
|
||||
func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[string][]interface{}, error){
|
||||
func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string) (map[string][]interface{}, error) {
|
||||
q := orm.Query{
|
||||
WildcardIndex: true,
|
||||
}
|
||||
|
@ -698,53 +696,53 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[
|
|||
"ranges": []util.MapStr{
|
||||
{
|
||||
"from": "now-13d/d",
|
||||
"to": "now-12d/d",
|
||||
"to": "now-12d/d",
|
||||
}, {
|
||||
"from": "now-12d/d",
|
||||
"to": "now-11d/d",
|
||||
"to": "now-11d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-11d/d",
|
||||
"to": "now-10d/d",
|
||||
"to": "now-10d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-10d/d",
|
||||
"to": "now-9d/d",
|
||||
"to": "now-9d/d",
|
||||
}, {
|
||||
"from": "now-9d/d",
|
||||
"to": "now-8d/d",
|
||||
"to": "now-8d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-8d/d",
|
||||
"to": "now-7d/d",
|
||||
"to": "now-7d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-7d/d",
|
||||
"to": "now-6d/d",
|
||||
"to": "now-6d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-6d/d",
|
||||
"to": "now-5d/d",
|
||||
"to": "now-5d/d",
|
||||
}, {
|
||||
"from": "now-5d/d",
|
||||
"to": "now-4d/d",
|
||||
"to": "now-4d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-4d/d",
|
||||
"to": "now-3d/d",
|
||||
},{
|
||||
"to": "now-3d/d",
|
||||
}, {
|
||||
"from": "now-3d/d",
|
||||
"to": "now-2d/d",
|
||||
"to": "now-2d/d",
|
||||
}, {
|
||||
"from": "now-2d/d",
|
||||
"to": "now-1d/d",
|
||||
"to": "now-1d/d",
|
||||
}, {
|
||||
"from": "now-1d/d",
|
||||
"to": "now/d",
|
||||
"to": "now/d",
|
||||
},
|
||||
{
|
||||
"from": "now/d",
|
||||
"to": "now",
|
||||
"to": "now",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -817,16 +815,16 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[
|
|||
}
|
||||
healthMap := map[string]int{}
|
||||
status := "unknown"
|
||||
for _, hbkItem := range healthBks {
|
||||
for _, hbkItem := range healthBks {
|
||||
if hitem, ok := hbkItem.(map[string]interface{}); ok {
|
||||
healthMap[hitem["key"].(string)] = 1
|
||||
}
|
||||
}
|
||||
if _, ok = healthMap["red"]; ok {
|
||||
status = "red"
|
||||
}else if _, ok = healthMap["yellow"]; ok {
|
||||
} else if _, ok = healthMap["yellow"]; ok {
|
||||
status = "yellow"
|
||||
}else if _, ok = healthMap["green"]; ok {
|
||||
} else if _, ok = healthMap["green"]; ok {
|
||||
status = "green"
|
||||
}
|
||||
key := fmt.Sprintf("%s:%s", clusterID, indexName)
|
||||
|
@ -838,7 +836,7 @@ func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string)(map[
|
|||
}
|
||||
|
||||
func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string] interface{}{}
|
||||
resBody := map[string]interface{}{}
|
||||
id := ps.ByName("id")
|
||||
indexName := ps.ByName("index")
|
||||
if !h.IsIndexAllowed(req, id, indexName) {
|
||||
|
@ -847,7 +845,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
}, http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
q := &orm.Query{ Size: 1}
|
||||
q := &orm.Query{Size: 1}
|
||||
q.AddSort("timestamp", orm.DESC)
|
||||
q.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -859,13 +857,13 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result := orm.Search(event.Event{}, q)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
namesM := util.MapStr{}
|
||||
if len(result.Result) > 0 {
|
||||
if data, ok := result.Result[0].(map[string]interface{}); ok {
|
||||
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "index_routing_table"}, data); exists {
|
||||
if table, ok := routingTable.(map[string]interface{}); ok{
|
||||
if table, ok := routingTable.(map[string]interface{}); ok {
|
||||
if shardsM, ok := table["shards"].(map[string]interface{}); ok {
|
||||
for _, rows := range shardsM {
|
||||
if rowsArr, ok := rows.([]interface{}); ok {
|
||||
|
@ -887,12 +885,12 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
}
|
||||
|
||||
//node uuid
|
||||
nodeIds := make([]interface{}, 0, len(namesM) )
|
||||
nodeIds := make([]interface{}, 0, len(namesM))
|
||||
for name, _ := range namesM {
|
||||
nodeIds = append(nodeIds, name)
|
||||
}
|
||||
|
||||
q1 := &orm.Query{ Size: 100}
|
||||
q1 := &orm.Query{Size: 100}
|
||||
q1.AddSort("timestamp", orm.DESC)
|
||||
q1.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -902,7 +900,7 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result = orm.Search(elastic.NodeConfig{}, q1)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
nodes := []interface{}{}
|
||||
for _, hit := range result.Result {
|
||||
|
@ -922,11 +920,11 @@ func (h *APIHandler) getIndexNodes(w http.ResponseWriter, req *http.Request, ps
|
|||
|
||||
if v, ok := nodeId.(string); ok {
|
||||
ninfo := util.MapStr{
|
||||
"id": v,
|
||||
"name": nodeName,
|
||||
"ip": ip,
|
||||
"port": port,
|
||||
"status": status,
|
||||
"id": v,
|
||||
"name": nodeName,
|
||||
"ip": ip,
|
||||
"port": port,
|
||||
"status": status,
|
||||
"timestamp": hitM["timestamp"],
|
||||
}
|
||||
nodes = append(nodes, ninfo)
|
||||
|
@ -947,7 +945,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
|
|||
}
|
||||
var must = []util.MapStr{}
|
||||
|
||||
if !util.StringInArray(ids, "*"){
|
||||
if !util.StringInArray(ids, "*") {
|
||||
|
||||
must = append(must, util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
|
@ -958,9 +956,8 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
|
|||
|
||||
if keyword != "" {
|
||||
must = append(must, util.MapStr{
|
||||
"wildcard":util.MapStr{
|
||||
"metadata.index_name":
|
||||
util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
|
||||
"wildcard": util.MapStr{
|
||||
"metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
@ -986,7 +983,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
|
|||
},
|
||||
}
|
||||
|
||||
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
indexName := orm.GetIndexName(elastic.IndexConfig{})
|
||||
resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl))
|
||||
|
|
|
@ -545,7 +545,7 @@ func (h *APIHandler) HandleClusterMetricsAction(w http.ResponseWriter, req *http
|
|||
minBucketSize, err := GetMetricMinBucketSize(id, metricType)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[key].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -648,7 +648,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
|
|||
}
|
||||
}
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
metrics, err = h.getIndexMetrics(ctx, req, id, bucketSize, min, max, indexName, top, key)
|
||||
if err != nil {
|
||||
h.WriteError(w, err, http.StatusInternalServerError)
|
||||
|
@ -660,7 +660,7 @@ func (h *APIHandler) HandleIndexMetricsAction(w http.ResponseWriter, req *http.R
|
|||
minBucketSize, err := GetMetricMinBucketSize(id, MetricTypeNodeStats)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
metrics[key].MinBucketSize = int64(minBucketSize)
|
||||
}
|
||||
}
|
||||
|
@ -788,19 +788,20 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
ClusterStorageMetricKey = "cluster_storage"
|
||||
ClusterStorageMetricKey = "cluster_storage"
|
||||
ClusterDocumentsMetricKey = "cluster_documents"
|
||||
ClusterIndicesMetricKey = "cluster_indices"
|
||||
ClusterIndicesMetricKey = "cluster_indices"
|
||||
ClusterNodeCountMetricKey = "node_count"
|
||||
ClusterHealthMetricKey = "cluster_health"
|
||||
ShardCountMetricKey = "shard_count"
|
||||
CircuitBreakerMetricKey = "circuit_breaker"
|
||||
ClusterHealthMetricKey = "cluster_health"
|
||||
ShardCountMetricKey = "shard_count"
|
||||
CircuitBreakerMetricKey = "circuit_breaker"
|
||||
)
|
||||
|
||||
func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
|
||||
|
||||
var (
|
||||
clusterMetricsResult = map[string]*common.MetricItem {}
|
||||
err error
|
||||
clusterMetricsResult = map[string]*common.MetricItem{}
|
||||
err error
|
||||
)
|
||||
switch metricKey {
|
||||
case ClusterDocumentsMetricKey,
|
||||
|
@ -915,12 +916,14 @@ func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, buck
|
|||
}
|
||||
return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize)
|
||||
}
|
||||
|
||||
const (
|
||||
IndexThroughputMetricKey = "index_throughput"
|
||||
IndexThroughputMetricKey = "index_throughput"
|
||||
SearchThroughputMetricKey = "search_throughput"
|
||||
IndexLatencyMetricKey = "index_latency"
|
||||
SearchLatencyMetricKey = "search_latency"
|
||||
IndexLatencyMetricKey = "index_latency"
|
||||
SearchLatencyMetricKey = "search_latency"
|
||||
)
|
||||
|
||||
func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
metricItems := []*common.MetricItem{}
|
||||
|
|
|
@ -113,7 +113,7 @@ func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{}
|
|||
func (h *APIHandler) getMetrics(ctx context.Context, query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) (map[string]*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
queryDSL := util.MustToJSONBytes(query)
|
||||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
|
||||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).QueryDSL(ctx, getAllMetricsIndex(), nil, queryDSL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -229,11 +229,12 @@ func GetMinBucketSize() int {
|
|||
|
||||
const (
|
||||
MetricTypeClusterHealth = "cluster_health"
|
||||
MetricTypeClusterStats = "cluster_stats"
|
||||
MetricTypeNodeStats = "node_stats"
|
||||
MetricTypeIndexStats = "index_stats"
|
||||
MetricTypeClusterStats = "cluster_stats"
|
||||
MetricTypeNodeStats = "node_stats"
|
||||
MetricTypeIndexStats = "index_stats"
|
||||
)
|
||||
//GetMetricMinBucketSize returns twice the metrics collection interval based on the cluster ID and metric type
|
||||
|
||||
// GetMetricMinBucketSize returns twice the metrics collection interval based on the cluster ID and metric type
|
||||
func GetMetricMinBucketSize(clusterID, metricType string) (int, error) {
|
||||
meta := elastic.GetMetadata(clusterID)
|
||||
if meta == nil {
|
||||
|
@ -243,19 +244,19 @@ func GetMetricMinBucketSize(clusterID, metricType string) (int, error) {
|
|||
switch metricType {
|
||||
case MetricTypeClusterHealth:
|
||||
if meta.Config.MonitorConfigs != nil {
|
||||
interval = meta.Config.MonitorConfigs.ClusterHealth.Interval
|
||||
interval = meta.Config.MonitorConfigs.ClusterHealth.Interval
|
||||
}
|
||||
case MetricTypeClusterStats:
|
||||
if meta.Config.MonitorConfigs != nil {
|
||||
interval = meta.Config.MonitorConfigs.ClusterStats.Interval
|
||||
interval = meta.Config.MonitorConfigs.ClusterStats.Interval
|
||||
}
|
||||
case MetricTypeNodeStats:
|
||||
if meta.Config.MonitorConfigs != nil {
|
||||
interval = meta.Config.MonitorConfigs.NodeStats.Interval
|
||||
interval = meta.Config.MonitorConfigs.NodeStats.Interval
|
||||
}
|
||||
case MetricTypeIndexStats:
|
||||
if meta.Config.MonitorConfigs != nil {
|
||||
interval = meta.Config.MonitorConfigs.IndexStats.Interval
|
||||
interval = meta.Config.MonitorConfigs.IndexStats.Interval
|
||||
}
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid metric name: %s", metricType)
|
||||
|
@ -278,7 +279,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m
|
|||
}
|
||||
bucketSize := 0
|
||||
|
||||
bucketSizeStr := h.GetParameterOrDefault(req, "bucket_size", "") //默认 10,每个 bucket 的时间范围,单位秒
|
||||
bucketSizeStr := h.GetParameterOrDefault(req, "bucket_size", "") //默认 10,每个 bucket 的时间范围,单位秒
|
||||
if bucketSizeStr != "" {
|
||||
du, err := util.ParseDuration(bucketSizeStr)
|
||||
if err != nil {
|
||||
|
@ -293,7 +294,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m
|
|||
maxStr := h.Get(req, "max", "")
|
||||
var (
|
||||
minBucketSize = 0
|
||||
err error
|
||||
err error
|
||||
)
|
||||
//clusterID may be empty when querying host metrics
|
||||
if clusterID != "" {
|
||||
|
@ -301,7 +302,7 @@ func (h *APIHandler) GetMetricRangeAndBucketSize(req *http.Request, clusterID, m
|
|||
if err != nil {
|
||||
return 0, 0, 0, fmt.Errorf("failed to get min bucket size for cluster [%s]:%w", clusterID, err)
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
//default to 20
|
||||
minBucketSize = 20
|
||||
}
|
||||
|
|
|
@ -45,40 +45,40 @@ import (
|
|||
)
|
||||
|
||||
func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody:=util.MapStr{}
|
||||
reqBody := struct{
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
SearchField string `json:"search_field"`
|
||||
resBody := util.MapStr{}
|
||||
reqBody := struct {
|
||||
Keyword string `json:"keyword"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Aggregations []elastic.SearchAggParam `json:"aggs"`
|
||||
Highlight elastic.SearchHighlightParam `json:"highlight"`
|
||||
Filter elastic.SearchFilterParam `json:"filter"`
|
||||
Sort []string `json:"sort"`
|
||||
SearchField string `json:"search_field"`
|
||||
}{}
|
||||
err := h.DecodeJSON(req, &reqBody)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
|
||||
aggs["term_cluster_id"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.cluster_id",
|
||||
"size": 1000,
|
||||
"size": 1000,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"term_cluster_name": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.cluster_name",
|
||||
"size": 1,
|
||||
"size": 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var should =[]util.MapStr{}
|
||||
if reqBody.SearchField != ""{
|
||||
var should = []util.MapStr{}
|
||||
if reqBody.SearchField != "" {
|
||||
should = []util.MapStr{
|
||||
{
|
||||
"prefix": util.MapStr{
|
||||
|
@ -101,7 +101,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
},
|
||||
},
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
should = []util.MapStr{
|
||||
{
|
||||
"prefix": util.MapStr{
|
||||
|
@ -143,30 +143,25 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
}
|
||||
clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id")
|
||||
if !hasPrivilege && clusterFilter == nil {
|
||||
h.WriteJSON(w, elastic.SearchResponse{
|
||||
|
||||
}, http.StatusOK)
|
||||
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
|
||||
return
|
||||
}
|
||||
must := []interface{}{
|
||||
}
|
||||
must := []interface{}{}
|
||||
if !hasPrivilege && clusterFilter != nil {
|
||||
must = append(must, clusterFilter)
|
||||
}
|
||||
|
||||
|
||||
|
||||
query := util.MapStr{
|
||||
"aggs": aggs,
|
||||
"size": reqBody.Size,
|
||||
"from": reqBody.From,
|
||||
"from": reqBody.From,
|
||||
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
|
||||
"query": util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"minimum_should_match": 1,
|
||||
"filter": elastic.BuildSearchTermFilter(reqBody.Filter),
|
||||
"should": should,
|
||||
"must": must,
|
||||
"filter": elastic.BuildSearchTermFilter(reqBody.Filter),
|
||||
"should": should,
|
||||
"must": must,
|
||||
},
|
||||
},
|
||||
"sort": []util.MapStr{
|
||||
|
@ -178,7 +173,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
},
|
||||
}
|
||||
if len(reqBody.Sort) > 1 {
|
||||
query["sort"] = []util.MapStr{
|
||||
query["sort"] = []util.MapStr{
|
||||
{
|
||||
reqBody.Sort[0]: util.MapStr{
|
||||
"order": reqBody.Sort[1],
|
||||
|
@ -190,7 +185,7 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
|
|||
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetIndexName(elastic.NodeConfig{}), dsl)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(util.MustToJSONBytes(response))
|
||||
|
@ -293,7 +288,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
source["shard_info"] = shardInfo
|
||||
}
|
||||
if tempClusterID, ok := util.GetMapValueByKeys([]string{"metadata", "labels", "cluster_id"}, result); ok {
|
||||
if clusterID, ok = tempClusterID.(string); ok {
|
||||
if clusterID, ok = tempClusterID.(string); ok {
|
||||
if meta := elastic.GetMetadata(clusterID); meta != nil && meta.ClusterState != nil {
|
||||
source["is_master_node"] = meta.ClusterState.MasterNode == nodeID
|
||||
}
|
||||
|
@ -316,35 +311,35 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
return
|
||||
}
|
||||
// 索引速率
|
||||
indexMetric:=newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.AddAxi("indexing rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
indexMetric := newMetricItem("indexing", 1, OperationGroupKey)
|
||||
indexMetric.AddAxi("indexing rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems := []GroupMetricItem{}
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "indexing",
|
||||
Field: "payload.elasticsearch.node_stats.indices.indexing.index_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
MetricItem: indexMetric,
|
||||
FormatType: "num",
|
||||
Units: "Indexing/s",
|
||||
})
|
||||
queryMetric:=newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
nodeMetricItems=append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.node_stats.indices.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
queryMetric := newMetricItem("search", 2, OperationGroupKey)
|
||||
queryMetric.AddAxi("query rate", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
nodeMetricItems = append(nodeMetricItems, GroupMetricItem{
|
||||
Key: "search",
|
||||
Field: "payload.elasticsearch.node_stats.indices.search.query_total",
|
||||
ID: util.GetUUID(),
|
||||
IsDerivative: true,
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
MetricItem: queryMetric,
|
||||
FormatType: "num",
|
||||
Units: "Search/s",
|
||||
})
|
||||
|
||||
aggs:=map[string]interface{}{}
|
||||
query=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
aggs := map[string]interface{}{}
|
||||
query = map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
"must": []util.MapStr{
|
||||
{
|
||||
"term": util.MapStr{
|
||||
"metadata.category": util.MapStr{
|
||||
|
@ -378,15 +373,15 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
},
|
||||
}
|
||||
|
||||
for _,metricItem:=range nodeMetricItems{
|
||||
aggs[metricItem.ID]=util.MapStr{
|
||||
"max":util.MapStr{
|
||||
for _, metricItem := range nodeMetricItems {
|
||||
aggs[metricItem.ID] = util.MapStr{
|
||||
"max": util.MapStr{
|
||||
"field": metricItem.Field,
|
||||
},
|
||||
}
|
||||
if metricItem.IsDerivative{
|
||||
aggs[metricItem.ID+"_deriv"]=util.MapStr{
|
||||
"derivative":util.MapStr{
|
||||
if metricItem.IsDerivative {
|
||||
aggs[metricItem.ID+"_deriv"] = util.MapStr{
|
||||
"derivative": util.MapStr{
|
||||
"buckets_path": metricItem.ID,
|
||||
},
|
||||
}
|
||||
|
@ -398,8 +393,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
query["size"]=0
|
||||
query["aggs"]= util.MapStr{
|
||||
query["size"] = 0
|
||||
query["aggs"] = util.MapStr{
|
||||
"group_by_level": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.node_id",
|
||||
|
@ -407,11 +402,11 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
},
|
||||
"aggs": util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram":util.MapStr{
|
||||
"field": "timestamp",
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs":aggs,
|
||||
"aggs": aggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -425,9 +420,8 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
|
|||
indexMetrics := map[string]util.MapStr{}
|
||||
for key, item := range metrics {
|
||||
for _, line := range item.Lines {
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok{
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{
|
||||
}
|
||||
if _, ok := indexMetrics[line.Metric.Label]; !ok {
|
||||
indexMetrics[line.Metric.Label] = util.MapStr{}
|
||||
}
|
||||
indexMetrics[line.Metric.Label][key] = line.Data
|
||||
}
|
||||
|
@ -487,7 +481,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
// return
|
||||
//}
|
||||
q1 := orm.Query{
|
||||
Size: 1,
|
||||
Size: 1,
|
||||
WildcardIndex: true,
|
||||
}
|
||||
q1.Conds = orm.And(
|
||||
|
@ -512,7 +506,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
tt, _ := time.Parse(time.RFC3339, ts)
|
||||
if time.Now().Sub(tt).Seconds() > 30 {
|
||||
kvs["status"] = "unavailable"
|
||||
}else{
|
||||
} else {
|
||||
kvs["status"] = "available"
|
||||
}
|
||||
}
|
||||
|
@ -530,9 +524,9 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
|
||||
jvm, ok := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_stats", "jvm"}, vresult)
|
||||
if ok {
|
||||
if jvmVal, ok := jvm.(map[string]interface{});ok {
|
||||
if jvmVal, ok := jvm.(map[string]interface{}); ok {
|
||||
kvs["jvm"] = util.MapStr{
|
||||
"mem": jvmVal["mem"],
|
||||
"mem": jvmVal["mem"],
|
||||
"uptime": jvmVal["uptime_in_millis"],
|
||||
}
|
||||
}
|
||||
|
@ -553,7 +547,7 @@ func (h *APIHandler) GetNodeInfo(w http.ResponseWriter, req *http.Request, ps ht
|
|||
}
|
||||
}
|
||||
}
|
||||
if len( response.Hits.Hits) > 0 {
|
||||
if len(response.Hits.Hits) > 0 {
|
||||
hit := response.Hits.Hits[0]
|
||||
innerMetaData, _ := util.GetMapValueByKeys([]string{"metadata", "labels"}, hit.Source)
|
||||
if mp, ok := innerMetaData.(map[string]interface{}); ok {
|
||||
|
@ -583,8 +577,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
nodeID := ps.MustGetParameter("node_id")
|
||||
var must = []util.MapStr{
|
||||
{
|
||||
"term":util.MapStr{
|
||||
"metadata.labels.cluster_uuid":util.MapStr{
|
||||
"term": util.MapStr{
|
||||
"metadata.labels.cluster_uuid": util.MapStr{
|
||||
"value": clusterUUID,
|
||||
},
|
||||
},
|
||||
|
@ -612,15 +606,15 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
},
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req,clusterID, MetricTypeNodeStats,60)
|
||||
bucketSize, min, max, err := h.GetMetricRangeAndBucketSize(req, clusterID, MetricTypeNodeStats, 60)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
query:=map[string]interface{}{}
|
||||
query["query"]=util.MapStr{
|
||||
query := map[string]interface{}{}
|
||||
query["query"] = util.MapStr{
|
||||
"bool": util.MapStr{
|
||||
"must": must,
|
||||
"filter": []util.MapStr{
|
||||
|
@ -636,67 +630,67 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
},
|
||||
}
|
||||
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
metricItems:=[]*common.MetricItem{}
|
||||
metricItem:=newMetricItem("cpu", 1, SystemGroupKey)
|
||||
metricItem.AddAxi("cpu","group1",common.PositionLeft,"ratio","0.[0]","0.[0]",5,true)
|
||||
metricItem.AddLine("Process CPU","Process CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.process.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItem.AddLine("OS CPU","OS CPU","process cpu used percent of node.","group1","payload.elasticsearch.node_stats.os.cpu.percent","max",bucketSizeStr,"%","num","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem =newMetricItem("jvm", 2, SystemGroupKey)
|
||||
metricItem.AddAxi("JVM Heap","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
|
||||
metricItem.AddLine("Max Heap","Max Heap","JVM max Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItem.AddLine("Used Heap","Used Heap","JVM used Heap of node.","group1","payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes","max",bucketSizeStr,"","bytes","0,0.[00]","0,0.[00]",false,false)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem=newMetricItem("index_throughput", 3, OperationGroupKey)
|
||||
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
metricItem.AddLine("Indexing Rate","Total Shards","Number of documents being indexed for node.","group1","payload.elasticsearch.node_stats.indices.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem=newMetricItem("search_throughput", 4, OperationGroupKey)
|
||||
metricItem.AddAxi("searching","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
|
||||
metricItem.AddLine("Search Rate","Total Shards",
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
metricItems := []*common.MetricItem{}
|
||||
metricItem := newMetricItem("cpu", 1, SystemGroupKey)
|
||||
metricItem.AddAxi("cpu", "group1", common.PositionLeft, "ratio", "0.[0]", "0.[0]", 5, true)
|
||||
metricItem.AddLine("Process CPU", "Process CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.process.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItem.AddLine("OS CPU", "OS CPU", "process cpu used percent of node.", "group1", "payload.elasticsearch.node_stats.os.cpu.percent", "max", bucketSizeStr, "%", "num", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
metricItem = newMetricItem("jvm", 2, SystemGroupKey)
|
||||
metricItem.AddAxi("JVM Heap", "group1", common.PositionLeft, "bytes", "0.[0]", "0.[0]", 5, true)
|
||||
metricItem.AddLine("Max Heap", "Max Heap", "JVM max Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_max_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItem.AddLine("Used Heap", "Used Heap", "JVM used Heap of node.", "group1", "payload.elasticsearch.node_stats.jvm.mem.heap_used_in_bytes", "max", bucketSizeStr, "", "bytes", "0,0.[00]", "0,0.[00]", false, false)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
metricItem = newMetricItem("index_throughput", 3, OperationGroupKey)
|
||||
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
metricItem.AddLine("Indexing Rate", "Total Shards", "Number of documents being indexed for node.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_total", "max", bucketSizeStr, "doc/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
metricItem = newMetricItem("search_throughput", 4, OperationGroupKey)
|
||||
metricItem.AddAxi("searching", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
|
||||
metricItem.AddLine("Search Rate", "Total Shards",
|
||||
"Number of search requests being executed.",
|
||||
"group1","payload.elasticsearch.node_stats.indices.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
"group1", "payload.elasticsearch.node_stats.indices.search.query_total", "max", bucketSizeStr, "query/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
|
||||
metricItem=newMetricItem("index_latency", 5, LatencyGroupKey)
|
||||
metricItem.AddAxi("indexing","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
|
||||
metricItem = newMetricItem("index_latency", 5, LatencyGroupKey)
|
||||
metricItem.AddAxi("indexing", "group1", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, true)
|
||||
|
||||
metricItem.AddLine("Indexing","Indexing Latency","Average latency for indexing documents.","group1","payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Indexing", "Indexing Latency", "Average latency for indexing documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.index_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.index_total"
|
||||
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItem.AddLine("Indexing","Delete Latency","Average latency for delete documents.","group1","payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Indexing", "Delete Latency", "Average latency for delete documents.", "group1", "payload.elasticsearch.node_stats.indices.indexing.delete_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.indexing.delete_total"
|
||||
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
|
||||
metricItem=newMetricItem("search_latency", 6, LatencyGroupKey)
|
||||
metricItem.AddAxi("searching","group2",common.PositionLeft,"num","0,0","0,0.[00]",5,false)
|
||||
metricItem = newMetricItem("search_latency", 6, LatencyGroupKey)
|
||||
metricItem.AddAxi("searching", "group2", common.PositionLeft, "num", "0,0", "0,0.[00]", 5, false)
|
||||
|
||||
metricItem.AddLine("Searching","Query Latency","Average latency for searching query.","group2","payload.elasticsearch.node_stats.indices.search.query_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Searching", "Query Latency", "Average latency for searching query.", "group2", "payload.elasticsearch.node_stats.indices.search.query_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[0].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.query_total"
|
||||
metricItem.Lines[0].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItem.AddLine("Searching","Fetch Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Searching", "Fetch Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.fetch_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[1].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.fetch_total"
|
||||
metricItem.Lines[1].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItem.AddLine("Searching","Scroll Latency","Average latency for searching fetch.","group2","payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis","max",bucketSizeStr,"ms","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItem.AddLine("Searching", "Scroll Latency", "Average latency for searching fetch.", "group2", "payload.elasticsearch.node_stats.indices.search.scroll_time_in_millis", "max", bucketSizeStr, "ms", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItem.Lines[2].Metric.Field2 = "payload.elasticsearch.node_stats.indices.search.scroll_total"
|
||||
metricItem.Lines[2].Metric.Calc = func(value, value2 float64) float64 {
|
||||
return value/value2
|
||||
return value / value2
|
||||
}
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metricItem =newMetricItem("parent_breaker", 8, SystemGroupKey)
|
||||
metricItem.AddLine("Parent Breaker Tripped","Parent Breaker Tripped","Rate of the circuit breaker has been triggered and prevented an out of memory error.","group1","payload.elasticsearch.node_stats.breakers.parent.tripped","max",bucketSizeStr,"times/s","num","0,0.[00]","0,0.[00]",false,true)
|
||||
metricItems=append(metricItems,metricItem)
|
||||
metrics, err := h.getSingleMetrics(context.Background(), metricItems,query, bucketSize)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
metricItem = newMetricItem("parent_breaker", 8, SystemGroupKey)
|
||||
metricItem.AddLine("Parent Breaker Tripped", "Parent Breaker Tripped", "Rate of the circuit breaker has been triggered and prevented an out of memory error.", "group1", "payload.elasticsearch.node_stats.breakers.parent.tripped", "max", bucketSizeStr, "times/s", "num", "0,0.[00]", "0,0.[00]", false, true)
|
||||
metricItems = append(metricItems, metricItem)
|
||||
metrics, err := h.getSingleMetrics(context.Background(), metricItems, query, bucketSize)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
h.WriteError(w, err, http.StatusInternalServerError)
|
||||
|
@ -713,8 +707,8 @@ func (h *APIHandler) GetSingleNodeMetrics(w http.ResponseWriter, req *http.Reque
|
|||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem, error){
|
||||
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
|
||||
func getNodeHealthMetric(query util.MapStr, bucketSize int) (*common.MetricItem, error) {
|
||||
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
|
||||
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -722,7 +716,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
|
|||
query["aggs"] = util.MapStr{
|
||||
"dates": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"field": "timestamp",
|
||||
intervalField: bucketSizeStr,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
|
@ -740,8 +734,8 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
metricItem:=newMetricItem("node_health", 0, "")
|
||||
metricItem.AddLine("Node health","Node Health","","group1","payload.elasticsearch.node_stats.jvm.uptime_in_millis","min",bucketSizeStr,"%","ratio","0.[00]","0.[00]",false,false)
|
||||
metricItem := newMetricItem("node_health", 0, "")
|
||||
metricItem.AddLine("Node health", "Node Health", "", "group1", "payload.elasticsearch.node_stats.jvm.uptime_in_millis", "min", bucketSizeStr, "%", "ratio", "0.[00]", "0.[00]", false, false)
|
||||
|
||||
metricData := []interface{}{}
|
||||
if response.StatusCode == 200 {
|
||||
|
@ -770,7 +764,7 @@ func getNodeHealthMetric(query util.MapStr, bucketSize int)(*common.MetricItem,
|
|||
return metricItem, nil
|
||||
}
|
||||
|
||||
func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{}, error){
|
||||
func getNodeOnlineStatusOfRecentDay(nodeIDs []string) (map[string][]interface{}, error) {
|
||||
q := orm.Query{
|
||||
WildcardIndex: true,
|
||||
}
|
||||
|
@ -779,64 +773,64 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
|
|||
"group_by_node_id": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "metadata.labels.node_id",
|
||||
"size": 100,
|
||||
"size": 100,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"uptime_histogram": util.MapStr{
|
||||
"date_range": util.MapStr{
|
||||
"date_range": util.MapStr{
|
||||
"field": "timestamp",
|
||||
"format": "yyyy-MM-dd",
|
||||
"time_zone": "+08:00",
|
||||
"ranges": []util.MapStr{
|
||||
{
|
||||
"from": "now-13d/d",
|
||||
"to": "now-12d/d",
|
||||
"to": "now-12d/d",
|
||||
}, {
|
||||
"from": "now-12d/d",
|
||||
"to": "now-11d/d",
|
||||
"to": "now-11d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-11d/d",
|
||||
"to": "now-10d/d",
|
||||
"to": "now-10d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-10d/d",
|
||||
"to": "now-9d/d",
|
||||
"to": "now-9d/d",
|
||||
}, {
|
||||
"from": "now-9d/d",
|
||||
"to": "now-8d/d",
|
||||
"to": "now-8d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-8d/d",
|
||||
"to": "now-7d/d",
|
||||
"to": "now-7d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-7d/d",
|
||||
"to": "now-6d/d",
|
||||
"to": "now-6d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-6d/d",
|
||||
"to": "now-5d/d",
|
||||
"to": "now-5d/d",
|
||||
}, {
|
||||
"from": "now-5d/d",
|
||||
"to": "now-4d/d",
|
||||
"to": "now-4d/d",
|
||||
},
|
||||
{
|
||||
"from": "now-4d/d",
|
||||
"to": "now-3d/d",
|
||||
},{
|
||||
"to": "now-3d/d",
|
||||
}, {
|
||||
"from": "now-3d/d",
|
||||
"to": "now-2d/d",
|
||||
"to": "now-2d/d",
|
||||
}, {
|
||||
"from": "now-2d/d",
|
||||
"to": "now-1d/d",
|
||||
"to": "now-1d/d",
|
||||
}, {
|
||||
"from": "now-1d/d",
|
||||
"to": "now/d",
|
||||
"to": "now/d",
|
||||
},
|
||||
{
|
||||
"from": "now/d",
|
||||
"to": "now",
|
||||
"to": "now",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -865,7 +859,7 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
|
|||
{
|
||||
"range": util.MapStr{
|
||||
"timestamp": util.MapStr{
|
||||
"gte":"now-15d",
|
||||
"gte": "now-15d",
|
||||
"lte": "now",
|
||||
},
|
||||
},
|
||||
|
@ -903,13 +897,13 @@ func getNodeOnlineStatusOfRecentDay(nodeIDs []string)(map[string][]interface{},
|
|||
recentStatus[nodeKey] = []interface{}{}
|
||||
if histogramAgg, ok := bk["uptime_histogram"].(map[string]interface{}); ok {
|
||||
if bks, ok := histogramAgg["buckets"].([]interface{}); ok {
|
||||
for _, bkItem := range bks {
|
||||
for _, bkItem := range bks {
|
||||
if bkVal, ok := bkItem.(map[string]interface{}); ok {
|
||||
if minUptime, ok := util.GetMapValueByKeys([]string{"min_uptime", "value"}, bkVal); ok {
|
||||
//mark node status as offline when uptime less than 10m
|
||||
if v, ok := minUptime.(float64); ok && v >= 600000 {
|
||||
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "online"})
|
||||
}else{
|
||||
} else {
|
||||
recentStatus[nodeKey] = append(recentStatus[nodeKey], []interface{}{bkVal["key"], "offline"})
|
||||
}
|
||||
}
|
||||
|
@ -927,10 +921,10 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
max = h.GetParameterOrDefault(req, "max", "now")
|
||||
)
|
||||
|
||||
resBody := map[string] interface{}{}
|
||||
resBody := map[string]interface{}{}
|
||||
id := ps.ByName("id")
|
||||
nodeUUID := ps.ByName("node_id")
|
||||
q := &orm.Query{ Size: 1}
|
||||
q := &orm.Query{Size: 1}
|
||||
q.AddSort("timestamp", orm.DESC)
|
||||
q.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -942,16 +936,16 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result := orm.Search(event.Event{}, q)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
namesM := util.MapStr{}
|
||||
if len(result.Result) > 0 {
|
||||
if data, ok := result.Result[0].(map[string]interface{}); ok {
|
||||
if routingTable, exists := util.GetMapValueByKeys([]string{"payload", "elasticsearch", "node_routing_table"}, data); exists {
|
||||
if rows, ok := routingTable.([]interface{}); ok{
|
||||
if rows, ok := routingTable.([]interface{}); ok {
|
||||
for _, row := range rows {
|
||||
if v, ok := row.(map[string]interface{}); ok {
|
||||
if indexName, ok := v["index"].(string); ok{
|
||||
if indexName, ok := v["index"].(string); ok {
|
||||
namesM[indexName] = true
|
||||
}
|
||||
}
|
||||
|
@ -961,12 +955,12 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
}
|
||||
}
|
||||
|
||||
indexNames := make([]interface{}, 0, len(namesM) )
|
||||
indexNames := make([]interface{}, 0, len(namesM))
|
||||
for name, _ := range namesM {
|
||||
indexNames = append(indexNames, name)
|
||||
}
|
||||
|
||||
q1 := &orm.Query{ Size: 100}
|
||||
q1 := &orm.Query{Size: 100}
|
||||
q1.AddSort("timestamp", orm.DESC)
|
||||
q1.Conds = orm.And(
|
||||
orm.Eq("metadata.category", "elasticsearch"),
|
||||
|
@ -977,13 +971,13 @@ func (h *APIHandler) getNodeIndices(w http.ResponseWriter, req *http.Request, ps
|
|||
err, result = orm.Search(elastic.IndexConfig{}, q1)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
indices, err := h.getLatestIndices(req, min, max, id, &result)
|
||||
if err != nil {
|
||||
resBody["error"] = err.Error()
|
||||
h.WriteJSON(w,resBody, http.StatusInternalServerError )
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
h.WriteJSON(w, indices, http.StatusOK)
|
||||
|
@ -1069,7 +1063,7 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
|
|||
}
|
||||
indices := []interface{}{}
|
||||
var indexPattern *radix.Pattern
|
||||
if !hasAllPrivilege{
|
||||
if !hasAllPrivilege {
|
||||
indexPattern = radix.Compile(allowedIndices...)
|
||||
}
|
||||
|
||||
|
@ -1102,12 +1096,11 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
|
|||
return indices, nil
|
||||
}
|
||||
|
||||
|
||||
func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
clusterID := ps.MustGetParameter("id")
|
||||
nodeID := ps.MustGetParameter("node_id")
|
||||
q1 := orm.Query{
|
||||
Size: 1,
|
||||
Size: 1,
|
||||
WildcardIndex: true,
|
||||
}
|
||||
q1.Conds = orm.And(
|
||||
|
@ -1119,9 +1112,9 @@ func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps
|
|||
q1.AddSort("timestamp", orm.DESC)
|
||||
err, result := orm.Search(&event.Event{}, &q1)
|
||||
if err != nil {
|
||||
h.WriteJSON(w,util.MapStr{
|
||||
h.WriteJSON(w, util.MapStr{
|
||||
"error": err.Error(),
|
||||
}, http.StatusInternalServerError )
|
||||
}, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var shardInfo interface{} = []interface{}{}
|
||||
|
|
|
@ -28,15 +28,15 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"github.com/crewjam/saml"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"github.com/crewjam/saml"
|
||||
|
||||
"github.com/crewjam/saml/samlsp"
|
||||
)
|
||||
|
||||
var metdataurl = "https://sso.infini.ltd/metadata" //Metadata of the IDP
|
||||
var sessioncert = "./sessioncert" //Key pair used for creating a signed session
|
||||
var sessioncert = "./sessioncert" //Key pair used for creating a signed session
|
||||
var sessionkey = "./sessionkey"
|
||||
var serverkey = "./serverkey" //Server TLS
|
||||
var servercert = "./servercert"
|
||||
|
@ -68,13 +68,13 @@ func main() {
|
|||
rootURL, err := url.Parse(serverurl)
|
||||
panicIfError(err)
|
||||
samlSP, _ := samlsp.New(samlsp.Options{
|
||||
URL: *rootURL,
|
||||
Key: keyPair.PrivateKey.(*rsa.PrivateKey),
|
||||
Certificate: keyPair.Leaf,
|
||||
URL: *rootURL,
|
||||
Key: keyPair.PrivateKey.(*rsa.PrivateKey),
|
||||
Certificate: keyPair.Leaf,
|
||||
IDPMetadata: &saml.EntityDescriptor{
|
||||
//EntityID:
|
||||
}, // you can also have Metadata XML instead of URL
|
||||
EntityID: entityId,
|
||||
//EntityID:
|
||||
}, // you can also have Metadata XML instead of URL
|
||||
EntityID: entityId,
|
||||
})
|
||||
app := http.HandlerFunc(hello)
|
||||
http.Handle("/hello", samlSP.RequireAccount(app))
|
||||
|
|
|
@ -49,7 +49,7 @@ func (h *AlertAPI) getAlert(w http.ResponseWriter, req *http.Request, ps httprou
|
|||
|
||||
q := orm.Query{
|
||||
WildcardIndex: true,
|
||||
Size: 1,
|
||||
Size: 1,
|
||||
}
|
||||
q.Conds = orm.And(orm.Eq("id", id))
|
||||
err, result := orm.Search(obj, &q)
|
||||
|
@ -76,16 +76,16 @@ func (h *AlertAPI) getAlert(w http.ResponseWriter, req *http.Request, ps httprou
|
|||
func (h *AlertAPI) searchAlert(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
||||
var (
|
||||
keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||
keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||
queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
state = h.GetParameterOrDefault(req, "state", "")
|
||||
priority = h.GetParameterOrDefault(req, "priority", "")
|
||||
sort = h.GetParameterOrDefault(req, "sort", "")
|
||||
ruleID = h.GetParameterOrDefault(req, "rule_id", "")
|
||||
min = h.GetParameterOrDefault(req, "min", "")
|
||||
max = h.GetParameterOrDefault(req, "max", "")
|
||||
state = h.GetParameterOrDefault(req, "state", "")
|
||||
priority = h.GetParameterOrDefault(req, "priority", "")
|
||||
sort = h.GetParameterOrDefault(req, "sort", "")
|
||||
ruleID = h.GetParameterOrDefault(req, "rule_id", "")
|
||||
min = h.GetParameterOrDefault(req, "min", "")
|
||||
max = h.GetParameterOrDefault(req, "max", "")
|
||||
mustBuilder = &strings.Builder{}
|
||||
sortBuilder = strings.Builder{}
|
||||
)
|
||||
|
@ -160,13 +160,13 @@ func (h *AlertAPI) getAlertStats(w http.ResponseWriter, req *http.Request, ps ht
|
|||
"terms_by_state": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "priority",
|
||||
"size": 5,
|
||||
"size": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl) )
|
||||
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl))
|
||||
if err != nil {
|
||||
h.WriteJSON(w, util.MapStr{
|
||||
"error": err.Error(),
|
||||
|
|
|
@ -209,17 +209,17 @@ func (h *AlertAPI) deleteChannel(w http.ResponseWriter, req *http.Request, ps ht
|
|||
func (h *AlertAPI) searchChannel(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
||||
var (
|
||||
keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
subType = h.GetParameterOrDefault(req, "sub_type", "")
|
||||
keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
subType = h.GetParameterOrDefault(req, "sub_type", "")
|
||||
typ = h.GetParameterOrDefault(req, "type", "")
|
||||
sort = h.GetParameterOrDefault(req, "sort", "updated:desc")
|
||||
sort = h.GetParameterOrDefault(req, "sort", "updated:desc")
|
||||
)
|
||||
mustQ := []interface{}{}
|
||||
if keyword != "" {
|
||||
mustQ = append(mustQ, util.MapStr{
|
||||
"query_string": util.MapStr{"default_field":"*","query": keyword},
|
||||
"query_string": util.MapStr{"default_field": "*", "query": keyword},
|
||||
})
|
||||
}
|
||||
if typ != "" {
|
||||
|
@ -249,7 +249,7 @@ func (h *AlertAPI) searchChannel(w http.ResponseWriter, req *http.Request, ps ht
|
|||
from = 0
|
||||
}
|
||||
var (
|
||||
sortField string
|
||||
sortField string
|
||||
sortDirection string
|
||||
)
|
||||
sortParts := strings.Split(sort, ":")
|
||||
|
@ -306,26 +306,26 @@ func (h *AlertAPI) testChannel(w http.ResponseWriter, req *http.Request, ps http
|
|||
}
|
||||
firstGrpValue := global.MustLookupString(elastic.GlobalSystemElasticsearchID)
|
||||
ctx := map[string]interface{}{
|
||||
"title": "INFINI platform test alert message",
|
||||
"message": "This is just a test message, do not reply!",
|
||||
"objects": []string{".infini_metrics"},
|
||||
"trigger_at": time.Now().UnixMilli(),
|
||||
"duration": "5m10s",
|
||||
"rule_id": util.GetUUID(),
|
||||
"rule_name": "test rule",
|
||||
"resource_id": util.GetUUID(),
|
||||
"resource_name": "test resource",
|
||||
"event_id": util.GetUUID(),
|
||||
"timestamp": time.Now().UnixMilli(),
|
||||
"title": "INFINI platform test alert message",
|
||||
"message": "This is just a test message, do not reply!",
|
||||
"objects": []string{".infini_metrics"},
|
||||
"trigger_at": time.Now().UnixMilli(),
|
||||
"duration": "5m10s",
|
||||
"rule_id": util.GetUUID(),
|
||||
"rule_name": "test rule",
|
||||
"resource_id": util.GetUUID(),
|
||||
"resource_name": "test resource",
|
||||
"event_id": util.GetUUID(),
|
||||
"timestamp": time.Now().UnixMilli(),
|
||||
"first_group_value": firstGrpValue,
|
||||
"first_threshold": "90",
|
||||
"priority": "critical",
|
||||
"first_threshold": "90",
|
||||
"priority": "critical",
|
||||
"results": []util.MapStr{
|
||||
{"threshold": "90",
|
||||
"priority": "critical",
|
||||
"group_values": []string{firstGrpValue, "group_value2" },
|
||||
"issue_timestamp": time.Now().UnixMilli()-500,
|
||||
"result_value": 90,
|
||||
"priority": "critical",
|
||||
"group_values": []string{firstGrpValue, "group_value2"},
|
||||
"issue_timestamp": time.Now().UnixMilli() - 500,
|
||||
"result_value": 90,
|
||||
"relation_values": util.MapStr{"a": 100, "b": 90},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -47,11 +47,11 @@ import (
|
|||
|
||||
func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
body := struct {
|
||||
Messages []alerting.AlertMessage `json:"messages"`
|
||||
IgnoredReason string `json:"ignored_reason"`
|
||||
IsReset bool `json:"is_reset"`
|
||||
Messages []alerting.AlertMessage `json:"messages"`
|
||||
IgnoredReason string `json:"ignored_reason"`
|
||||
IsReset bool `json:"is_reset"`
|
||||
}{}
|
||||
err := h.DecodeJSON(req, &body)
|
||||
err := h.DecodeJSON(req, &body)
|
||||
if err != nil {
|
||||
h.WriteError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -83,7 +83,7 @@ func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request,
|
|||
},
|
||||
})
|
||||
source = fmt.Sprintf("ctx._source['status'] = '%s'", alerting.MessageStateAlerting)
|
||||
}else {
|
||||
} else {
|
||||
must = append(must, util.MapStr{
|
||||
"term": util.MapStr{
|
||||
"status": util.MapStr{
|
||||
|
@ -114,9 +114,8 @@ func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request,
|
|||
_ = kv.DeleteKey(alerting2.KVLastMessageState, []byte(msg.RuleID))
|
||||
}
|
||||
|
||||
|
||||
h.WriteJSON(w, util.MapStr{
|
||||
"ids": messageIDs,
|
||||
"ids": messageIDs,
|
||||
"result": "updated",
|
||||
}, 200)
|
||||
}
|
||||
|
@ -138,7 +137,7 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
|
|||
return
|
||||
}
|
||||
if !hasAllPrivilege {
|
||||
must = append(must,clusterFilter)
|
||||
must = append(must, clusterFilter)
|
||||
}
|
||||
queryDsl := util.MapStr{
|
||||
"size": 0,
|
||||
|
@ -151,13 +150,13 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
|
|||
"terms_by_priority": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "priority",
|
||||
"size": 5,
|
||||
"size": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
indexName := orm.GetWildcardIndexName(alerting.AlertMessage{})
|
||||
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl) )
|
||||
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl))
|
||||
if err != nil {
|
||||
h.WriteJSON(w, util.MapStr{
|
||||
"error": err.Error(),
|
||||
|
@ -172,7 +171,7 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
|
|||
}
|
||||
}
|
||||
}
|
||||
for _, status := range []string{"info", "low","medium","high", "critical"} {
|
||||
for _, status := range []string{"info", "low", "medium", "high", "critical"} {
|
||||
if _, ok := statusCounts[status]; !ok {
|
||||
statusCounts[status] = 0
|
||||
}
|
||||
|
@ -206,18 +205,18 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
|
|||
"terms_by_category": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "category",
|
||||
"size": 100,
|
||||
"size": 100,
|
||||
},
|
||||
},
|
||||
"terms_by_tags": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "tags",
|
||||
"size": 100,
|
||||
"size": 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
searchRes, err = esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl) )
|
||||
searchRes, err = esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(queryDsl))
|
||||
if err != nil {
|
||||
h.WriteJSON(w, util.MapStr{
|
||||
"error": err.Error(),
|
||||
|
@ -245,15 +244,14 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
|
|||
"current": statusCounts,
|
||||
},
|
||||
"categories": categories,
|
||||
"tags": tags,
|
||||
"tags": tags,
|
||||
}, http.StatusOK)
|
||||
}
|
||||
|
||||
|
||||
func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
||||
var (
|
||||
queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d,"aggs": {
|
||||
queryDSL = `{"sort":[%s],"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d,"aggs": {
|
||||
"max_updated": {
|
||||
"max": {
|
||||
"field": "updated"
|
||||
|
@ -267,16 +265,16 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request,
|
|||
}}`
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
status = h.GetParameterOrDefault(req, "status", "")
|
||||
priority = h.GetParameterOrDefault(req, "priority", "")
|
||||
sort = h.GetParameterOrDefault(req, "sort", "")
|
||||
ruleID = h.GetParameterOrDefault(req, "rule_id", "")
|
||||
min = h.GetParameterOrDefault(req, "min", "")
|
||||
max = h.GetParameterOrDefault(req, "max", "")
|
||||
status = h.GetParameterOrDefault(req, "status", "")
|
||||
priority = h.GetParameterOrDefault(req, "priority", "")
|
||||
sort = h.GetParameterOrDefault(req, "sort", "")
|
||||
ruleID = h.GetParameterOrDefault(req, "rule_id", "")
|
||||
min = h.GetParameterOrDefault(req, "min", "")
|
||||
max = h.GetParameterOrDefault(req, "max", "")
|
||||
mustBuilder = &strings.Builder{}
|
||||
sortBuilder = strings.Builder{}
|
||||
category = h.GetParameterOrDefault(req, "category", "")
|
||||
tags = h.GetParameterOrDefault(req, "tags", "")
|
||||
category = h.GetParameterOrDefault(req, "category", "")
|
||||
tags = h.GetParameterOrDefault(req, "tags", "")
|
||||
)
|
||||
timeRange := util.MapStr{}
|
||||
if min != "" {
|
||||
|
@ -292,7 +290,7 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request,
|
|||
},
|
||||
}
|
||||
mustBuilder.Write(util.MustToJSONBytes(timeFilter))
|
||||
}else{
|
||||
} else {
|
||||
mustBuilder.WriteString(`{"match_all":{}}`)
|
||||
}
|
||||
|
||||
|
@ -374,7 +372,7 @@ func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request,
|
|||
h.WriteJSON(w, esRes, http.StatusOK)
|
||||
}
|
||||
|
||||
func parseTime( t interface{}, layout string) (time.Time, error){
|
||||
func parseTime(t interface{}, layout string) (time.Time, error) {
|
||||
switch t.(type) {
|
||||
case string:
|
||||
return time.Parse(layout, t.(string))
|
||||
|
@ -384,7 +382,7 @@ func parseTime( t interface{}, layout string) (time.Time, error){
|
|||
}
|
||||
|
||||
func (h *AlertAPI) getAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
message := &alerting.AlertMessage{
|
||||
message := &alerting.AlertMessage{
|
||||
ID: ps.ByName("message_id"),
|
||||
}
|
||||
exists, err := orm.Get(message)
|
||||
|
@ -417,36 +415,36 @@ func (h *AlertAPI) getAlertMessage(w http.ResponseWriter, req *http.Request, ps
|
|||
var duration time.Duration
|
||||
if message.Status == alerting.MessageStateRecovered {
|
||||
duration = message.Updated.Sub(message.Created)
|
||||
}else{
|
||||
} else {
|
||||
duration = time.Now().Sub(message.Created)
|
||||
}
|
||||
detailObj := util.MapStr{
|
||||
"message_id": message.ID,
|
||||
"rule_id": message.RuleID,
|
||||
"rule_name": rule.Name,
|
||||
"rule_enabled": rule.Enabled,
|
||||
"title": message.Title,
|
||||
"message": message.Message,
|
||||
"priority": message.Priority,
|
||||
"created": message.Created,
|
||||
"updated": message.Updated,
|
||||
"resource_name": rule.Resource.Name,
|
||||
"resource_id": rule.Resource.ID,
|
||||
"message_id": message.ID,
|
||||
"rule_id": message.RuleID,
|
||||
"rule_name": rule.Name,
|
||||
"rule_enabled": rule.Enabled,
|
||||
"title": message.Title,
|
||||
"message": message.Message,
|
||||
"priority": message.Priority,
|
||||
"created": message.Created,
|
||||
"updated": message.Updated,
|
||||
"resource_name": rule.Resource.Name,
|
||||
"resource_id": rule.Resource.ID,
|
||||
"resource_objects": rule.Resource.Objects,
|
||||
"conditions": rule.Conditions,
|
||||
"duration": duration.Milliseconds(),
|
||||
"ignored_time": message.IgnoredTime,
|
||||
"ignored_reason": message.IgnoredReason,
|
||||
"ignored_user": message.IgnoredUser,
|
||||
"status": message.Status,
|
||||
"expression": rule.Metrics.Expression,
|
||||
"hit_condition": hitCondition,
|
||||
"conditions": rule.Conditions,
|
||||
"duration": duration.Milliseconds(),
|
||||
"ignored_time": message.IgnoredTime,
|
||||
"ignored_reason": message.IgnoredReason,
|
||||
"ignored_user": message.IgnoredUser,
|
||||
"status": message.Status,
|
||||
"expression": rule.Metrics.Expression,
|
||||
"hit_condition": hitCondition,
|
||||
}
|
||||
h.WriteJSON(w, detailObj, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
message := &alerting.AlertMessage{
|
||||
message := &alerting.AlertMessage{
|
||||
ID: ps.ByName("message_id"),
|
||||
}
|
||||
exists, err := orm.Get(message)
|
||||
|
@ -481,12 +479,12 @@ func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.R
|
|||
}
|
||||
if rule.NotificationConfig != nil {
|
||||
notificationInfo["alerting"] = util.MapStr{
|
||||
"accept_time_range": rule.NotificationConfig.AcceptTimeRange,
|
||||
"throttle_period": rule.NotificationConfig.ThrottlePeriod,
|
||||
"escalation_enabled": rule.NotificationConfig.EscalationEnabled,
|
||||
"accept_time_range": rule.NotificationConfig.AcceptTimeRange,
|
||||
"throttle_period": rule.NotificationConfig.ThrottlePeriod,
|
||||
"escalation_enabled": rule.NotificationConfig.EscalationEnabled,
|
||||
"escalation_throttle_period": rule.NotificationConfig.EscalationThrottlePeriod,
|
||||
"normal_stats": stats["normal"],
|
||||
"escalation_stats": stats["escalation"],
|
||||
"normal_stats": stats["normal"],
|
||||
"escalation_stats": stats["escalation"],
|
||||
}
|
||||
}
|
||||
if rule.RecoveryNotificationConfig != nil {
|
||||
|
@ -497,7 +495,7 @@ func (h *AlertAPI) getMessageNotificationInfo(w http.ResponseWriter, req *http.R
|
|||
h.WriteJSON(w, notificationInfo, http.StatusOK)
|
||||
}
|
||||
|
||||
func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error){
|
||||
func getMessageNotificationStats(msg *alerting.AlertMessage) (util.MapStr, error) {
|
||||
rangeQ := util.MapStr{
|
||||
"gte": msg.Created.UnixMilli(),
|
||||
}
|
||||
|
@ -508,7 +506,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
|
|||
"grp_normal_channel": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "action_execution_results.channel_type",
|
||||
"size": 20,
|
||||
"size": 20,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"top": util.MapStr{
|
||||
|
@ -531,7 +529,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
|
|||
"grp_escalation_channel": util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "escalation_action_results.channel_type",
|
||||
"size": 20,
|
||||
"size": 20,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"top": util.MapStr{
|
||||
|
@ -556,7 +554,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
|
|||
aggs["grp_recover_channel"] = util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": "recover_action_results.channel_type",
|
||||
"size": 20,
|
||||
"size": 20,
|
||||
},
|
||||
"aggs": util.MapStr{
|
||||
"top": util.MapStr{
|
||||
|
@ -610,7 +608,7 @@ func getMessageNotificationStats(msg *alerting.AlertMessage )(util.MapStr, error
|
|||
var normalStats = extractStatsFromRaw(result.Raw, "grp_normal_channel", "action_execution_results")
|
||||
var escalationStats = extractStatsFromRaw(result.Raw, "grp_escalation_channel", "escalation_action_results")
|
||||
stats := util.MapStr{
|
||||
"normal": normalStats,
|
||||
"normal": normalStats,
|
||||
"escalation": escalationStats,
|
||||
}
|
||||
if msg.Status == alerting.MessageStateRecovered {
|
||||
|
@ -627,14 +625,14 @@ func extractStatsFromRaw(searchRawRes []byte, grpKey string, actionKey string) [
|
|||
statsItem["channel_type"], _ = jsonparser.GetString(value, "key")
|
||||
statsItem["count"], _ = jsonparser.GetInt(value, "doc_count")
|
||||
jsonparser.ArrayEach(value, func(v []byte, dataType jsonparser.ValueType, offset int, err error) {
|
||||
ck, _ := jsonparser.GetString(v, "channel_type")
|
||||
cn, _ := jsonparser.GetString(v, "channel_name")
|
||||
ck, _ := jsonparser.GetString(v, "channel_type")
|
||||
cn, _ := jsonparser.GetString(v, "channel_name")
|
||||
if ck == statsItem["channel_type"] {
|
||||
statsItem["channel_name"] = cn
|
||||
statsItem["error"], _ = jsonparser.GetString(v, "error")
|
||||
statsItem["error"], _ = jsonparser.GetString(v, "error")
|
||||
}
|
||||
}, "top", "hits","hits", "[0]", "_source",actionKey)
|
||||
statsItem["last_time"], _ = jsonparser.GetString(value, "top", "hits","hits", "[0]", "_source","created")
|
||||
}, "top", "hits", "hits", "[0]", "_source", actionKey)
|
||||
statsItem["last_time"], _ = jsonparser.GetString(value, "top", "hits", "hits", "[0]", "_source", "created")
|
||||
stats = append(stats, statsItem)
|
||||
}, "aggregations", grpKey, "buckets")
|
||||
return stats
|
||||
|
|
|
@ -48,9 +48,9 @@ func InitAPI() {
|
|||
api.HandleAPIMethod(api.POST, "/email/server/_test", email.RequirePermission(email.testEmailServer, enum.PermissionSmtpServerRead))
|
||||
api.HandleAPIMethod(api.GET, "/email/server/:email_server_id", email.RequirePermission(email.getEmailServer, enum.PermissionAlertRuleRead))
|
||||
api.HandleAPIMethod(api.POST, "/email/server", email.RequirePermission(email.createEmailServer, enum.PermissionSmtpServerWrite))
|
||||
api.HandleAPIMethod(api.PUT, "/email/server/:email_server_id", email.RequirePermission(email.updateEmailServer, enum.PermissionSmtpServerWrite))
|
||||
api.HandleAPIMethod(api.DELETE, "/email/server/:email_server_id", email.RequirePermission(email.deleteEmailServer, enum.PermissionSmtpServerWrite))
|
||||
api.HandleAPIMethod(api.GET, "/email/server/_search", email.RequirePermission(email.searchEmailServer, enum.PermissionSmtpServerRead))
|
||||
api.HandleAPIMethod(api.PUT, "/email/server/:email_server_id", email.RequirePermission(email.updateEmailServer, enum.PermissionSmtpServerWrite))
|
||||
api.HandleAPIMethod(api.DELETE, "/email/server/:email_server_id", email.RequirePermission(email.deleteEmailServer, enum.PermissionSmtpServerWrite))
|
||||
api.HandleAPIMethod(api.GET, "/email/server/_search", email.RequirePermission(email.searchEmailServer, enum.PermissionSmtpServerRead))
|
||||
|
||||
credential.RegisterChangeEvent(func(cred *credential.Credential) {
|
||||
query := util.MapStr{
|
||||
|
|
|
@ -38,8 +38,7 @@ import (
|
|||
)
|
||||
|
||||
func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
|
||||
reqParams := elastic.CommonCommand{}
|
||||
err := h.DecodeJSON(req, &reqParams)
|
||||
|
@ -54,8 +53,8 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht
|
|||
reqParams.ID = util.GetUUID()
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
|
||||
queryDSL :=[]byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title))
|
||||
var indexName = orm.GetIndexName(reqParams)
|
||||
queryDSL := []byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title))
|
||||
var indexName = orm.GetIndexName(reqParams)
|
||||
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -63,13 +62,13 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht
|
|||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
return
|
||||
}
|
||||
if len(searchRes.Hits.Hits) > 0 {
|
||||
if len(searchRes.Hits.Hits) > 0 {
|
||||
resBody["error"] = "title already exists"
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
return
|
||||
}
|
||||
_, err = esClient.Index(indexName,"", reqParams.ID, reqParams, "wait_for")
|
||||
_, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
|
@ -81,12 +80,11 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht
|
|||
resBody["result"] = "created"
|
||||
resBody["_source"] = reqParams
|
||||
|
||||
h.WriteJSON(w, resBody,http.StatusOK)
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
|
||||
reqParams := elastic.CommonCommand{}
|
||||
err := h.DecodeJSON(req, &reqParams)
|
||||
|
@ -99,8 +97,8 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h
|
|||
reqParams.ID = ps.ByName("cid")
|
||||
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
|
||||
|
||||
queryDSL :=[]byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title))
|
||||
var indexName = orm.GetIndexName(reqParams)
|
||||
queryDSL := []byte(fmt.Sprintf(`{"size":1, "query":{"bool":{"must":{"match":{"title.keyword":"%s"}}}}}`, reqParams.Title))
|
||||
var indexName = orm.GetIndexName(reqParams)
|
||||
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, queryDSL)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
|
@ -108,13 +106,13 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h
|
|||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if len(searchRes.Hits.Hits) > 0 && searchRes.Hits.Hits[0].ID != reqParams.ID {
|
||||
if len(searchRes.Hits.Hits) > 0 && searchRes.Hits.Hits[0].ID != reqParams.ID {
|
||||
resBody["error"] = "title already exists"
|
||||
log.Error(resBody["error"])
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
_, err = esClient.Index(indexName,"", reqParams.ID, reqParams, "wait_for")
|
||||
_, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
|
@ -126,21 +124,20 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h
|
|||
resBody["result"] = "updated"
|
||||
resBody["_source"] = reqParams
|
||||
|
||||
h.WriteJSON(w, resBody,http.StatusOK)
|
||||
h.WriteJSON(w, resBody, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
resBody := map[string]interface{}{
|
||||
}
|
||||
resBody := map[string]interface{}{}
|
||||
|
||||
var (
|
||||
keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||
keyword = h.GetParameterOrDefault(req, "keyword", "")
|
||||
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
|
||||
strSize = h.GetParameterOrDefault(req, "size", "20")
|
||||
strFrom = h.GetParameterOrDefault(req, "from", "0")
|
||||
filterBuilder = &strings.Builder{}
|
||||
)
|
||||
if keyword != ""{
|
||||
if keyword != "" {
|
||||
filterBuilder.WriteString(fmt.Sprintf(`{"query_string": {
|
||||
"default_field": "*",
|
||||
"query": "%s"
|
||||
|
@ -167,7 +164,7 @@ func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req *
|
|||
return
|
||||
}
|
||||
|
||||
h.WriteJSON(w, searchRes,http.StatusOK)
|
||||
h.WriteJSON(w, searchRes, http.StatusOK)
|
||||
}
|
||||
|
||||
func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
@ -178,9 +175,9 @@ func (h *APIHandler) HandleDeleteCommonCommandAction(w http.ResponseWriter, req
|
|||
if err != nil {
|
||||
log.Error(err)
|
||||
resBody["error"] = err.Error()
|
||||
if delRes!=nil{
|
||||
if delRes != nil {
|
||||
h.WriteJSON(w, resBody, delRes.StatusCode)
|
||||
}else{
|
||||
} else {
|
||||
h.WriteJSON(w, resBody, http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
|
|
|
@ -34,8 +34,8 @@ import (
|
|||
)
|
||||
|
||||
type docReqBody struct {
|
||||
From int `json:"from"`
|
||||
Size int `json:"size"`
|
||||
From int `json:"from"`
|
||||
Size int `json:"size"`
|
||||
Filter string `json:"filter"`
|
||||
Cluster string `json:"cluster"`
|
||||
Keyword string `json:"keyword"`
|
||||
|
@ -155,7 +155,7 @@ func (handler APIHandler) HandleSearchDocumentAction(w http.ResponseWriter, req
|
|||
}
|
||||
indexName := ps.ByName("index")
|
||||
var (
|
||||
sort = ""
|
||||
sort = ""
|
||||
)
|
||||
if reqBody.From < 0 {
|
||||
reqBody.From = 0
|
||||
|
@ -206,7 +206,7 @@ func (handler APIHandler) ValidateDocIDAction(w http.ResponseWriter, req *http.R
|
|||
var (
|
||||
index = handler.GetParameter(req, "index")
|
||||
docID = handler.GetParameter(req, "doc_id")
|
||||
typ = handler.GetParameter(req, "type")
|
||||
typ = handler.GetParameter(req, "type")
|
||||
)
|
||||
getRes, err := client.Get(index, typ, docID)
|
||||
if err != nil {
|
||||
|
|
|
@ -28,13 +28,13 @@
|
|||
package insight
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
log "github.com/cihub/seelog"
|
||||
insight2 "infini.sh/console/model/insight"
|
||||
httprouter "infini.sh/framework/core/api/router"
|
||||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/core/util"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (h *InsightAPI) createDashboard(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
|
||||
|
|
|
@ -70,8 +70,8 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
|
|||
kv := strings.Split(part, "=")
|
||||
if len(kv) == 2 {
|
||||
k := strings.TrimSpace(kv[0])
|
||||
kvs[k]= strings.TrimSpace(kv[1])
|
||||
}else{
|
||||
kvs[k] = strings.TrimSpace(kv[1])
|
||||
} else {
|
||||
log.Debugf("got unexpected directory part: %s", part)
|
||||
}
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
|
|||
}
|
||||
|
||||
}
|
||||
valueField = kvs["property"]
|
||||
valueField = kvs["property"]
|
||||
if indexName == "" || keyField == "" || valueField == "" {
|
||||
return kvs["default"]
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
|
|||
}
|
||||
var (
|
||||
cacheLabels map[string]string
|
||||
ok bool
|
||||
ok bool
|
||||
)
|
||||
if cacheLabels, ok = cacheLabelsMap[cacheKey]; !ok {
|
||||
var keyFieldValues []string
|
||||
|
@ -120,7 +120,7 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
|
|||
cacheLabels, err = common2.GetLabelMaps(indexName, keyField, valueField, client, keyFieldValues, len(keyFieldValues))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}else{
|
||||
} else {
|
||||
cacheLabelsMap[cacheKey] = cacheLabels
|
||||
}
|
||||
}
|
||||
|
@ -155,10 +155,10 @@ func (h *InsightAPI) renderMapLabelTemplate(w http.ResponseWriter, req *http.Req
|
|||
|
||||
type RenderTemplateRequest struct {
|
||||
Contexts []RenderTemplateContext `json:"contexts"`
|
||||
Template string `json:"template"`
|
||||
Template string `json:"template"`
|
||||
}
|
||||
|
||||
type RenderTemplateContext struct {
|
||||
Key string `json:"key"`
|
||||
Key string `json:"key"`
|
||||
Value map[string]interface{} `json:"value"`
|
||||
}
|
|
@ -280,7 +280,7 @@ func getMetricData(metric *insight.Metric) (interface{}, error) {
|
|||
params := map[string]interface{}{}
|
||||
if metric.BucketSize != "" {
|
||||
bucketSize := metric.BucketSize
|
||||
if metric.BucketSize == "auto" && interval != "" {
|
||||
if metric.BucketSize == "auto" && interval != "" {
|
||||
bucketSize = interval
|
||||
}
|
||||
if interval != "" || bucketSize != "auto" {
|
||||
|
@ -348,13 +348,13 @@ func getMetricData(metric *insight.Metric) (interface{}, error) {
|
|||
}
|
||||
}
|
||||
retMetricDataItem.Timestamp = timestamp
|
||||
if len(metric.Formulas) <= 1 && metric.Formula != ""{
|
||||
if len(metric.Formulas) <= 1 && metric.Formula != "" {
|
||||
//support older versions by returning the result for a single formula.
|
||||
retMetricDataItem.Value = result
|
||||
} else {
|
||||
if v, ok := retMetricDataItem.Value.(map[string]interface{}); ok {
|
||||
v[formula] = result
|
||||
}else{
|
||||
} else {
|
||||
retMetricDataItem.Value = map[string]interface{}{formula: result}
|
||||
}
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ func getMetricData(metric *insight.Metric) (interface{}, error) {
|
|||
}
|
||||
}
|
||||
return util.MapStr{
|
||||
"data": result,
|
||||
"data": result,
|
||||
"request": string(queryDSL),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -115,8 +115,8 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
|
|||
}
|
||||
}
|
||||
var (
|
||||
useDateHistogram = false
|
||||
dateHistogramAgg util.MapStr
|
||||
useDateHistogram = false
|
||||
dateHistogramAgg util.MapStr
|
||||
dateHistogramAggName string
|
||||
)
|
||||
if metric.BucketSize != "" && metric.TimeField != "" {
|
||||
|
@ -128,10 +128,10 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
|
|||
buckets = 2
|
||||
}
|
||||
dateHistogramAgg = util.MapStr{
|
||||
"field": metric.TimeField,
|
||||
"field": metric.TimeField,
|
||||
"buckets": buckets,
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
dateHistogramAggName = "date_histogram"
|
||||
verInfo := elastic.GetClient(metric.ClusterId).GetVersion()
|
||||
|
||||
|
@ -144,7 +144,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
|
|||
return nil, fmt.Errorf("get interval field error: %w", err)
|
||||
}
|
||||
dateHistogramAgg = util.MapStr{
|
||||
"field": metric.TimeField,
|
||||
"field": metric.TimeField,
|
||||
intervalField: metric.BucketSize,
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
|
|||
basicAggs = util.MapStr{
|
||||
"time_buckets": util.MapStr{
|
||||
dateHistogramAggName: dateHistogramAgg,
|
||||
"aggs": basicAggs,
|
||||
"aggs": basicAggs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
|
|||
"field": groups[i].Field,
|
||||
"size": limit,
|
||||
}
|
||||
if i == grpLength - 1 && len(metric.Sort) > 0 {
|
||||
if i == grpLength-1 && len(metric.Sort) > 0 {
|
||||
//use bucket sort instead of terms order when time after group
|
||||
if !timeBeforeGroup && len(metric.Sort) > 0 {
|
||||
basicAggs["sort_field"] = util.MapStr{
|
||||
|
@ -197,7 +197,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
|
|||
},
|
||||
},
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
var termsOrder []interface{}
|
||||
percentAggs := []string{"p99", "p95", "p90", "p80", "p50"}
|
||||
for _, sortItem := range metric.Sort {
|
||||
|
@ -250,7 +250,7 @@ func GenerateQuery(metric *insight.Metric) (interface{}, error) {
|
|||
basicAggs = util.MapStr{
|
||||
"time_buckets": util.MapStr{
|
||||
dateHistogramAggName: dateHistogramAgg,
|
||||
"aggs": basicAggs,
|
||||
"aggs": basicAggs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ func CollectMetricData(agg interface{}, timeBeforeGroup bool) ([]insight.MetricD
|
|||
}
|
||||
|
||||
// timeBeforeGroup => false
|
||||
func collectMetricData(agg interface{}, groupValues []string, metricData *[]insight.MetricData) (interval string){
|
||||
func collectMetricData(agg interface{}, groupValues []string, metricData *[]insight.MetricData) (interval string) {
|
||||
if aggM, ok := agg.(map[string]interface{}); ok {
|
||||
if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok {
|
||||
interval, _ = timeBks["interval"].(string)
|
||||
|
@ -351,7 +351,7 @@ func collectMetricData(agg interface{}, groupValues []string, metricData *[]insi
|
|||
}
|
||||
|
||||
// timeBeforeGroup => true
|
||||
func collectMetricDataOther(agg interface{}, groupValues []string, metricData *[]insight.MetricData, timeKey interface{}) (interval string){
|
||||
func collectMetricDataOther(agg interface{}, groupValues []string, metricData *[]insight.MetricData, timeKey interface{}) (interval string) {
|
||||
if aggM, ok := agg.(map[string]interface{}); ok {
|
||||
if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok {
|
||||
interval, _ = timeBks["interval"].(string)
|
||||
|
|
|
@ -34,8 +34,8 @@ import (
|
|||
"strings"
|
||||
|
||||
log "github.com/cihub/seelog"
|
||||
httprouter "infini.sh/framework/core/api/router"
|
||||
"infini.sh/console/model/insight"
|
||||
httprouter "infini.sh/framework/core/api/router"
|
||||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/core/util"
|
||||
)
|
||||
|
|
|
@ -213,7 +213,7 @@ func GetCollectionMetas() map[string]CollectionMeta {
|
|||
MatchObject: &alerting.Rule{},
|
||||
},
|
||||
"metric": {
|
||||
Name: "metric",
|
||||
Name: "metric",
|
||||
MatchObject: &insight.MetricBase{},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -29,13 +29,13 @@ package server
|
|||
|
||||
import (
|
||||
log "github.com/cihub/seelog"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"infini.sh/framework/modules/configs/config"
|
||||
httprouter "infini.sh/framework/core/api/router"
|
||||
config3 "infini.sh/framework/core/config"
|
||||
"infini.sh/framework/core/global"
|
||||
"infini.sh/framework/core/model"
|
||||
"infini.sh/framework/core/util"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"infini.sh/framework/modules/configs/config"
|
||||
"net/http"
|
||||
"path"
|
||||
"sync"
|
||||
|
|
|
@ -37,13 +37,13 @@ import (
|
|||
|
||||
log "github.com/cihub/seelog"
|
||||
"infini.sh/console/core/security/enum"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"infini.sh/framework/core/api"
|
||||
httprouter "infini.sh/framework/core/api/router"
|
||||
elastic2 "infini.sh/framework/core/elastic"
|
||||
"infini.sh/framework/core/model"
|
||||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/core/util"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"infini.sh/framework/modules/elastic"
|
||||
common2 "infini.sh/framework/modules/elastic/common"
|
||||
)
|
||||
|
|
|
@ -32,11 +32,11 @@ import (
|
|||
"fmt"
|
||||
log "github.com/cihub/seelog"
|
||||
"infini.sh/console/core"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"infini.sh/framework/core/api"
|
||||
"infini.sh/framework/core/errors"
|
||||
"infini.sh/framework/core/global"
|
||||
"infini.sh/framework/core/util"
|
||||
"infini.sh/framework/modules/configs/common"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
|
@ -35,27 +35,27 @@ import (
|
|||
)
|
||||
|
||||
type EmailAction struct {
|
||||
Data *alerting.Email
|
||||
Data *alerting.Email
|
||||
Subject string
|
||||
Body string
|
||||
Body string
|
||||
}
|
||||
|
||||
const EmailQueueName = "email_messages"
|
||||
|
||||
func (act *EmailAction) Execute()([]byte, error){
|
||||
func (act *EmailAction) Execute() ([]byte, error) {
|
||||
queueCfg := queue.GetOrInitConfig(EmailQueueName)
|
||||
if act.Data.ServerID == "" {
|
||||
return nil, fmt.Errorf("parameter server_id must not be empty")
|
||||
}
|
||||
emailMsg := util.MapStr{
|
||||
"server_id": act.Data.ServerID,
|
||||
"email": act.Data.Recipients.To,
|
||||
"template": "raw",
|
||||
"email": act.Data.Recipients.To,
|
||||
"template": "raw",
|
||||
"variables": util.MapStr{
|
||||
"subject": act.Subject,
|
||||
"body": act.Body,
|
||||
"subject": act.Subject,
|
||||
"body": act.Body,
|
||||
"content_type": act.Data.ContentType,
|
||||
"cc": act.Data.Recipients.CC,
|
||||
"cc": act.Data.Recipients.CC,
|
||||
},
|
||||
}
|
||||
emailMsgBytes := util.MustToJSONBytes(emailMsg)
|
||||
|
|
|
@ -40,7 +40,7 @@ type Action interface {
|
|||
}
|
||||
|
||||
type WebhookAction struct {
|
||||
Data *alerting.CustomWebhook
|
||||
Data *alerting.CustomWebhook
|
||||
Message string
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ var actionClient = http.Client{
|
|||
},
|
||||
}
|
||||
|
||||
func (act *WebhookAction) Execute()([]byte, error){
|
||||
func (act *WebhookAction) Execute() ([]byte, error) {
|
||||
var reqURL = act.Data.URL
|
||||
reqBody := strings.NewReader(act.Message)
|
||||
req, err := http.NewRequest(http.MethodPost, reqURL, reqBody)
|
||||
|
@ -67,4 +67,3 @@ func (act *WebhookAction) Execute()([]byte, error){
|
|||
defer res.Body.Close()
|
||||
return ioutil.ReadAll(res.Body)
|
||||
}
|
||||
|
||||
|
|
|
@ -42,9 +42,9 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by
|
|||
return nil, fmt.Errorf("empty channel"), nil
|
||||
}
|
||||
var (
|
||||
act action.Action
|
||||
act action.Action
|
||||
message []byte
|
||||
err error
|
||||
err error
|
||||
)
|
||||
switch channel.Type {
|
||||
|
||||
|
@ -75,7 +75,7 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by
|
|||
act = &action.EmailAction{
|
||||
Data: channel.Email,
|
||||
Subject: string(subjectBytes),
|
||||
Body: string(message),
|
||||
Body: string(message),
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported action type: %s", channel.Type), message
|
||||
|
@ -84,10 +84,10 @@ func PerformChannel(channel *alerting.Channel, ctx map[string]interface{}) ([]by
|
|||
return executeResult, err, message
|
||||
}
|
||||
|
||||
func ResolveMessage(messageTemplate string, ctx map[string]interface{}) ([]byte, error){
|
||||
msg := messageTemplate
|
||||
func ResolveMessage(messageTemplate string, ctx map[string]interface{}) ([]byte, error) {
|
||||
msg := messageTemplate
|
||||
tmpl, err := template.New("alert-message").Funcs(funcs.GenericFuncMap()).Parse(msg)
|
||||
if err !=nil {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse message temlate error: %w", err)
|
||||
}
|
||||
msgBuffer := &bytes.Buffer{}
|
||||
|
@ -120,14 +120,14 @@ func RetrieveChannel(ch *alerting.Channel, raiseChannelEnabledErr bool) (*alerti
|
|||
case alerting.ChannelEmail:
|
||||
if ch.Email == nil {
|
||||
ch.Email = refCh.Email
|
||||
}else{
|
||||
} else {
|
||||
ch.Email.ServerID = refCh.Email.ServerID
|
||||
ch.Email.Recipients = refCh.Email.Recipients
|
||||
}
|
||||
case alerting.ChannelWebhook:
|
||||
if ch.Webhook == nil {
|
||||
ch.Webhook = refCh.Webhook
|
||||
}else {
|
||||
} else {
|
||||
ch.Webhook.URL = refCh.Webhook.URL
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,12 +29,11 @@ package alerting
|
|||
|
||||
const (
|
||||
KVLastNotificationTime = "alert_last_notification_time"
|
||||
KVLastTermStartTime = "alert_last_term_start_time"
|
||||
KVLastEscalationTime = "alert_last_escalation_time"
|
||||
KVLastMessageState = "alert_last_message_state"
|
||||
KVLastTermStartTime = "alert_last_term_start_time"
|
||||
KVLastEscalationTime = "alert_last_escalation_time"
|
||||
KVLastMessageState = "alert_last_message_state"
|
||||
)
|
||||
|
||||
|
||||
const (
|
||||
ParamRuleID = "rule_id" //规则 UUID
|
||||
ParamResourceID = "resource_id" // 资源 UUID
|
||||
|
@ -50,6 +49,7 @@ const (
|
|||
ParamGroupValues = "group_values"
|
||||
ParamIssueTimestamp = "issue_timestamp"
|
||||
ParamRelationValues = "relation_values"
|
||||
//rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
|
||||
//check_status ,timestamp,
|
||||
|
||||
// rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
|
||||
// check_status ,timestamp,
|
||||
)
|
|
@ -34,10 +34,10 @@ import (
|
|||
log "github.com/cihub/seelog"
|
||||
"infini.sh/console/model"
|
||||
"infini.sh/console/model/alerting"
|
||||
"infini.sh/console/model/insight"
|
||||
alerting2 "infini.sh/console/service/alerting"
|
||||
"infini.sh/console/service/alerting/common"
|
||||
"infini.sh/framework/core/elastic"
|
||||
"infini.sh/console/model/insight"
|
||||
"infini.sh/framework/core/kv"
|
||||
"infini.sh/framework/core/orm"
|
||||
"infini.sh/framework/core/util"
|
||||
|
@ -50,14 +50,14 @@ import (
|
|||
)
|
||||
|
||||
type Engine struct {
|
||||
|
||||
}
|
||||
//GenerateQuery generate a final elasticsearch query dsl object
|
||||
//when RawFilter of rule is not empty, priority use it, otherwise to covert from Filter of rule (todo)
|
||||
//auto generate time filter query and then attach to final query
|
||||
//auto generate elasticsearch aggregations by metrics of rule
|
||||
//group of metric item converted to terms aggregation and TimeField of rule converted to date_histogram aggregation
|
||||
//convert statistic of metric item to elasticsearch aggregation
|
||||
|
||||
// GenerateQuery generate a final elasticsearch query dsl object
|
||||
// when RawFilter of rule is not empty, priority use it, otherwise to covert from Filter of rule (todo)
|
||||
// auto generate time filter query and then attach to final query
|
||||
// auto generate elasticsearch aggregations by metrics of rule
|
||||
// group of metric item converted to terms aggregation and TimeField of rule converted to date_histogram aggregation
|
||||
// convert statistic of metric item to elasticsearch aggregation
|
||||
func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error) {
|
||||
filter, err := engine.GenerateRawFilter(rule, filterParam)
|
||||
if err != nil {
|
||||
|
@ -72,7 +72,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
|
|||
}
|
||||
basicAggs := util.MapStr{}
|
||||
//todo bucket sort (es 6.1) bucket script (es 2.0)
|
||||
for _, metricItem := range rule.Metrics.Items {
|
||||
for _, metricItem := range rule.Metrics.Items {
|
||||
metricAggs := engine.generateAgg(&metricItem)
|
||||
if err = util.MergeFields(basicAggs, metricAggs, true); err != nil {
|
||||
return nil, err
|
||||
|
@ -81,21 +81,21 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
|
|||
verInfo := elastic.GetClient(rule.Resource.ID).GetVersion()
|
||||
var periodInterval = rule.Metrics.BucketSize
|
||||
if filterParam != nil && filterParam.BucketSize != "" {
|
||||
periodInterval = filterParam.BucketSize
|
||||
periodInterval = filterParam.BucketSize
|
||||
}
|
||||
|
||||
if verInfo.Number==""{
|
||||
if verInfo.Number == "" {
|
||||
panic("invalid version")
|
||||
}
|
||||
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(verInfo.Distribution,verInfo.Number, periodInterval )
|
||||
intervalField, err := elastic.GetDateHistogramIntervalField(verInfo.Distribution, verInfo.Number, periodInterval)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get interval field error: %w", err)
|
||||
}
|
||||
timeAggs := util.MapStr{
|
||||
"time_buckets": util.MapStr{
|
||||
"date_histogram": util.MapStr{
|
||||
"field": rule.Resource.TimeField,
|
||||
"field": rule.Resource.TimeField,
|
||||
intervalField: periodInterval,
|
||||
},
|
||||
"aggs": basicAggs,
|
||||
|
@ -107,7 +107,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
|
|||
if grpLength := len(groups); grpLength > 0 {
|
||||
var lastGroupAgg util.MapStr
|
||||
|
||||
for i := grpLength-1; i>=0; i-- {
|
||||
for i := grpLength - 1; i >= 0; i-- {
|
||||
limit := groups[i].Limit
|
||||
//top group 10
|
||||
if limit <= 0 {
|
||||
|
@ -116,7 +116,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
|
|||
groupAgg := util.MapStr{
|
||||
"terms": util.MapStr{
|
||||
"field": groups[i].Field,
|
||||
"size": limit,
|
||||
"size": limit,
|
||||
},
|
||||
}
|
||||
groupID := util.GetUUID()
|
||||
|
@ -124,7 +124,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
|
|||
groupAgg["aggs"] = util.MapStr{
|
||||
groupID: lastGroupAgg,
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
groupAgg["aggs"] = timeAggs
|
||||
}
|
||||
lastGroupAgg = groupAgg
|
||||
|
@ -132,29 +132,30 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
|
|||
rootAggs = util.MapStr{
|
||||
util.GetUUID(): lastGroupAgg,
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
rootAggs = timeAggs
|
||||
}
|
||||
if len(filter) > 0 {
|
||||
rootAggs = util.MapStr{
|
||||
"filter_agg": util.MapStr{
|
||||
"filter": filter,
|
||||
"aggs": rootAggs,
|
||||
"aggs": rootAggs,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return util.MapStr{
|
||||
"size": 0,
|
||||
"size": 0,
|
||||
"query": timeFilter,
|
||||
"aggs": rootAggs,
|
||||
"aggs": rootAggs,
|
||||
}, nil
|
||||
}
|
||||
//generateAgg convert statistic of metric item to elasticsearch aggregation
|
||||
func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]interface{}{
|
||||
|
||||
// generateAgg convert statistic of metric item to elasticsearch aggregation
|
||||
func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]interface{} {
|
||||
var (
|
||||
aggType = "value_count"
|
||||
field = metricItem.Field
|
||||
field = metricItem.Field
|
||||
)
|
||||
if field == "" || field == "*" {
|
||||
field = "_id"
|
||||
|
@ -171,7 +172,7 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int
|
|||
isPipeline = true
|
||||
case "medium": // from es version 6.6
|
||||
aggType = "median_absolute_deviation"
|
||||
case "p99", "p95","p90","p80","p50":
|
||||
case "p99", "p95", "p90", "p80", "p50":
|
||||
aggType = "percentiles"
|
||||
percentStr := strings.TrimPrefix(metricItem.Statistic, "p")
|
||||
percent, _ = strconv.ParseFloat(percentStr, 32)
|
||||
|
@ -187,7 +188,7 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int
|
|||
aggType: aggValue,
|
||||
},
|
||||
}
|
||||
if !isPipeline{
|
||||
if !isPipeline {
|
||||
return aggs
|
||||
}
|
||||
pipelineAggID := util.GetUUID()
|
||||
|
@ -200,8 +201,8 @@ func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]int
|
|||
return aggs
|
||||
}
|
||||
|
||||
func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[string]interface{}, error){
|
||||
if !fq.IsComplex(){
|
||||
func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[string]interface{}, error) {
|
||||
if !fq.IsComplex() {
|
||||
q := map[string]interface{}{}
|
||||
if len(fq.Values) == 0 {
|
||||
return nil, fmt.Errorf("values should not be empty")
|
||||
|
@ -267,14 +268,14 @@ func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[str
|
|||
filterQueries []alerting.FilterQuery
|
||||
)
|
||||
|
||||
if len(fq.Not) >0 {
|
||||
if len(fq.Not) > 0 {
|
||||
boolOperator = "must_not"
|
||||
filterQueries = fq.Not
|
||||
|
||||
}else if len(fq.Or) > 0 {
|
||||
} else if len(fq.Or) > 0 {
|
||||
boolOperator = "should"
|
||||
filterQueries = fq.Or
|
||||
}else {
|
||||
} else {
|
||||
boolOperator = "must"
|
||||
filterQueries = fq.And
|
||||
}
|
||||
|
@ -299,15 +300,15 @@ func (engine *Engine) ConvertFilterQueryToDsl(fq *alerting.FilterQuery) (map[str
|
|||
return resultQuery, nil
|
||||
}
|
||||
|
||||
func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (start, end interface{}){
|
||||
func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (start, end interface{}) {
|
||||
var (
|
||||
timeStart interface{}
|
||||
timeEnd interface{}
|
||||
timeEnd interface{}
|
||||
)
|
||||
if filterParam != nil {
|
||||
timeStart = filterParam.Start
|
||||
timeEnd = filterParam.End
|
||||
}else{
|
||||
} else {
|
||||
var (
|
||||
units string
|
||||
value int
|
||||
|
@ -316,23 +317,23 @@ func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("parse bucket size of rule [%s] error: %v", rule.Name, err)
|
||||
}
|
||||
if intervalDuration / time.Hour >= 1 {
|
||||
if intervalDuration/time.Hour >= 1 {
|
||||
units = "h"
|
||||
value = int(intervalDuration / time.Hour)
|
||||
}else if intervalDuration / time.Minute >= 1{
|
||||
} else if intervalDuration/time.Minute >= 1 {
|
||||
units = "m"
|
||||
value = int(intervalDuration / time.Minute)
|
||||
}else if intervalDuration / time.Second >= 1 {
|
||||
} else if intervalDuration/time.Second >= 1 {
|
||||
units = "s"
|
||||
value = int(intervalDuration / time.Second)
|
||||
}else{
|
||||
} else {
|
||||
return nil, fmt.Errorf("period interval: %s is too small", rule.Metrics.BucketSize)
|
||||
}
|
||||
bucketCount := rule.Conditions.GetMinimumPeriodMatch() + 1
|
||||
if bucketCount <= 0 {
|
||||
bucketCount = 1
|
||||
}
|
||||
duration, err := time.ParseDuration(fmt.Sprintf("%d%s", value * bucketCount, units))
|
||||
duration, err := time.ParseDuration(fmt.Sprintf("%d%s", value*bucketCount, units))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -342,7 +343,7 @@ func getQueryTimeRange(rule *alerting.Rule, filterParam *alerting.FilterParam) (
|
|||
return timeStart, timeEnd
|
||||
}
|
||||
|
||||
func (engine *Engine) generateTimeFilter(rule *alerting.Rule, filterParam *alerting.FilterParam) (map[string]interface{}, error){
|
||||
func (engine *Engine) generateTimeFilter(rule *alerting.Rule, filterParam *alerting.FilterParam) (map[string]interface{}, error) {
|
||||
timeStart, timeEnd := getQueryTimeRange(rule, filterParam)
|
||||
timeQuery := util.MapStr{
|
||||
"range": util.MapStr{
|
||||
|
@ -360,8 +361,8 @@ func (engine *Engine) GenerateRawFilter(rule *alerting.Rule, filterParam *alerti
|
|||
var err error
|
||||
if rule.Resource.RawFilter != nil {
|
||||
query = util.DeepCopy(rule.Resource.RawFilter).(map[string]interface{})
|
||||
}else{
|
||||
if !rule.Resource.Filter.IsEmpty(){
|
||||
} else {
|
||||
if !rule.Resource.Filter.IsEmpty() {
|
||||
query, err = engine.ConvertFilterQueryToDsl(&rule.Resource.Filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -405,7 +406,7 @@ func (engine *Engine) GenerateRawFilter(rule *alerting.Rule, filterParam *alerti
|
|||
return query, nil
|
||||
}
|
||||
|
||||
func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam)(*alerting.QueryResult, error){
|
||||
func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.QueryResult, error) {
|
||||
esClient := elastic.GetClient(rule.Resource.ID)
|
||||
queryResult := &alerting.QueryResult{}
|
||||
indexName := strings.Join(rule.Resource.Objects, ",")
|
||||
|
@ -470,7 +471,7 @@ func (engine *Engine) ExecuteQuery(rule *alerting.Rule, filterParam *alerting.Fi
|
|||
queryResult.MetricData = metricData
|
||||
return queryResult, nil
|
||||
}
|
||||
func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam)([]alerting.MetricData, *alerting.QueryResult, error){
|
||||
func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam) ([]alerting.MetricData, *alerting.QueryResult, error) {
|
||||
queryResult, err := engine.ExecuteQuery(rule, filterParam)
|
||||
if err != nil {
|
||||
return nil, queryResult, err
|
||||
|
@ -525,7 +526,7 @@ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool,
|
|||
return nil, queryResult, err
|
||||
}
|
||||
if r, ok := result.(float64); ok {
|
||||
if math.IsNaN(r) || math.IsInf(r, 0 ){
|
||||
if math.IsNaN(r) || math.IsInf(r, 0) {
|
||||
if !isFilterNaN {
|
||||
targetData.Data["result"] = append(targetData.Data["result"], []interface{}{timestamp, math.NaN()})
|
||||
}
|
||||
|
@ -540,10 +541,11 @@ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool,
|
|||
}
|
||||
return targetMetricData, queryResult, nil
|
||||
}
|
||||
//CheckCondition check whether rule conditions triggered or not
|
||||
//if triggered returns an ConditionResult
|
||||
//sort conditions by priority desc before check , and then if condition is true, then continue check another group
|
||||
func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionResult, error){
|
||||
|
||||
// CheckCondition check whether rule conditions triggered or not
|
||||
// if triggered returns an ConditionResult
|
||||
// sort conditions by priority desc before check , and then if condition is true, then continue check another group
|
||||
func (engine *Engine) CheckCondition(rule *alerting.Rule) (*alerting.ConditionResult, error) {
|
||||
var resultItems []alerting.ConditionResultItem
|
||||
targetMetricData, queryResult, err := engine.GetTargetMetricData(rule, true, nil)
|
||||
conditionResult := &alerting.ConditionResult{
|
||||
|
@ -558,7 +560,7 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes
|
|||
return alerting.PriorityWeights[rule.Conditions.Items[i].Priority] > alerting.PriorityWeights[rule.Conditions.Items[j].Priority]
|
||||
})
|
||||
}
|
||||
LoopCondition:
|
||||
LoopCondition:
|
||||
for _, cond := range rule.Conditions.Items {
|
||||
conditionExpression, err := cond.GenerateConditionExpression()
|
||||
if err != nil {
|
||||
|
@ -580,8 +582,8 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes
|
|||
if targetData.Data[dataKey][i][1] == nil {
|
||||
continue
|
||||
}
|
||||
if r, ok := targetData.Data[dataKey][i][1].(float64); ok {
|
||||
if math.IsNaN(r){
|
||||
if r, ok := targetData.Data[dataKey][i][1].(float64); ok {
|
||||
if math.IsNaN(r) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -593,19 +595,19 @@ func (engine *Engine) CheckCondition(rule *alerting.Rule)(*alerting.ConditionRes
|
|||
}
|
||||
if evaluateResult == true {
|
||||
triggerCount += 1
|
||||
}else {
|
||||
} else {
|
||||
triggerCount = 0
|
||||
}
|
||||
if triggerCount >= cond.MinimumPeriodMatch {
|
||||
log.Debugf("triggered condition %v, groups: %v\n", cond, targetData.GroupValues)
|
||||
resultItem := alerting.ConditionResultItem{
|
||||
GroupValues: targetData.GroupValues,
|
||||
ConditionItem: &cond,
|
||||
ResultValue: targetData.Data[dataKey][i][1],
|
||||
GroupValues: targetData.GroupValues,
|
||||
ConditionItem: &cond,
|
||||
ResultValue: targetData.Data[dataKey][i][1],
|
||||
IssueTimestamp: targetData.Data[dataKey][i][0],
|
||||
RelationValues: map[string]interface{}{},
|
||||
}
|
||||
for _, metric := range rule.Metrics.Items{
|
||||
for _, metric := range rule.Metrics.Items {
|
||||
resultItem.RelationValues[metric.Name] = queryResult.MetricData[idx].Data[metric.Name][i][1]
|
||||
}
|
||||
resultItems = append(resultItems, resultItem)
|
||||
|
@ -623,30 +625,30 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
|
||||
var (
|
||||
alertItem *alerting.Alert
|
||||
err error
|
||||
err error
|
||||
)
|
||||
defer func() {
|
||||
if err != nil && alertItem == nil {
|
||||
alertItem = &alerting.Alert{
|
||||
ID: util.GetUUID(),
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
RuleID: rule.ID,
|
||||
RuleName: rule.Name,
|
||||
ResourceID: rule.Resource.ID,
|
||||
ID: util.GetUUID(),
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
RuleID: rule.ID,
|
||||
RuleName: rule.Name,
|
||||
ResourceID: rule.Resource.ID,
|
||||
ResourceName: rule.Resource.Name,
|
||||
Expression: rule.Metrics.Expression,
|
||||
Objects: rule.Resource.Objects,
|
||||
State: alerting.AlertStateError,
|
||||
Expression: rule.Metrics.Expression,
|
||||
Objects: rule.Resource.Objects,
|
||||
State: alerting.AlertStateError,
|
||||
//Priority: "undefine",
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
if alertItem != nil {
|
||||
if err != nil{
|
||||
if err != nil {
|
||||
alertItem.State = alerting.AlertStateError
|
||||
alertItem.Error = err.Error()
|
||||
}else {
|
||||
} else {
|
||||
for _, actionResult := range alertItem.ActionExecutionResults {
|
||||
if actionResult.Error != "" {
|
||||
alertItem.Error = actionResult.Error
|
||||
|
@ -670,24 +672,24 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
rule.Conditions.Items[i].Expression = strings.ReplaceAll(expression, "result", metricExpression)
|
||||
}
|
||||
alertItem = &alerting.Alert{
|
||||
ID: util.GetUUID(),
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
RuleID: rule.ID,
|
||||
RuleName: rule.Name,
|
||||
ResourceID: rule.Resource.ID,
|
||||
ID: util.GetUUID(),
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
RuleID: rule.ID,
|
||||
RuleName: rule.Name,
|
||||
ResourceID: rule.Resource.ID,
|
||||
ResourceName: rule.Resource.Name,
|
||||
Expression: rule.Metrics.Expression,
|
||||
Objects: rule.Resource.Objects,
|
||||
Conditions: rule.Conditions,
|
||||
State: alerting.AlertStateOK,
|
||||
Expression: rule.Metrics.Expression,
|
||||
Objects: rule.Resource.Objects,
|
||||
Conditions: rule.Conditions,
|
||||
State: alerting.AlertStateOK,
|
||||
}
|
||||
checkResults, err := engine.CheckCondition(rule)
|
||||
alertItem.ConditionResult = checkResults
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
alertMessage, err := getLastAlertMessage(rule.ID, 2 * time.Minute)
|
||||
alertMessage, err := getLastAlertMessage(rule.ID, 2*time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get alert message error: %w", err)
|
||||
}
|
||||
|
@ -696,12 +698,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
if len(conditionResults) == 0 {
|
||||
alertItem.Priority = ""
|
||||
if checkResults.QueryResult.Nodata {
|
||||
alertItem.State = alerting.AlertStateNodata
|
||||
alertItem.State = alerting.AlertStateNodata
|
||||
}
|
||||
|
||||
if alertMessage != nil && alertMessage.Status != alerting.MessageStateRecovered && !checkResults.QueryResult.Nodata {
|
||||
if alertMessage != nil && alertMessage.Status != alerting.MessageStateRecovered && !checkResults.QueryResult.Nodata {
|
||||
alertMessage.Status = alerting.MessageStateRecovered
|
||||
alertMessage.ResourceID = rule.Resource.ID
|
||||
alertMessage.ResourceID = rule.Resource.ID
|
||||
alertMessage.ResourceName = rule.Resource.Name
|
||||
err = saveAlertMessage(alertMessage)
|
||||
if err != nil {
|
||||
|
@ -710,12 +712,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
// todo add recover notification to inner system message
|
||||
// send recover message to channel
|
||||
recoverCfg := rule.RecoveryNotificationConfig
|
||||
if recoverCfg != nil && recoverCfg.EventEnabled && recoverCfg.Enabled {
|
||||
if recoverCfg != nil && recoverCfg.EventEnabled && recoverCfg.Enabled {
|
||||
paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{
|
||||
alerting2.ParamEventID: alertMessage.ID,
|
||||
alerting2.ParamTimestamp: alertItem.Created.Unix(),
|
||||
"duration": alertItem.Created.Sub(alertMessage.Created).String(),
|
||||
"trigger_at": alertMessage.Created.Unix(),
|
||||
alerting2.ParamEventID: alertMessage.ID,
|
||||
alerting2.ParamTimestamp: alertItem.Created.Unix(),
|
||||
"duration": alertItem.Created.Sub(alertMessage.Created).String(),
|
||||
"trigger_at": alertMessage.Created.Unix(),
|
||||
})
|
||||
err = attachTitleMessageToCtx(recoverCfg.Title, recoverCfg.Message, paramsCtx)
|
||||
if err != nil {
|
||||
|
@ -747,9 +749,9 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
triggerAt = alertMessage.Created
|
||||
}
|
||||
paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{
|
||||
alerting2.ParamTimestamp: alertItem.Created.Unix(),
|
||||
"duration": alertItem.Created.Sub(triggerAt).String(),
|
||||
"trigger_at": triggerAt.Unix(),
|
||||
alerting2.ParamTimestamp: alertItem.Created.Unix(),
|
||||
"duration": alertItem.Created.Sub(triggerAt).String(),
|
||||
"trigger_at": triggerAt.Unix(),
|
||||
})
|
||||
|
||||
alertItem.Priority = priority
|
||||
|
@ -772,8 +774,8 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
Priority: priority,
|
||||
Title: alertItem.Title,
|
||||
Message: alertItem.Message,
|
||||
Tags: rule.Tags,
|
||||
Category: rule.Category,
|
||||
Tags: rule.Tags,
|
||||
Category: rule.Category,
|
||||
}
|
||||
alertMessage = msg
|
||||
err = saveAlertMessage(msg)
|
||||
|
@ -797,18 +799,18 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create notification, err: %w", err)
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
alertMessage.Title = alertItem.Title
|
||||
alertMessage.Message = alertItem.Message
|
||||
alertMessage.ResourceID = rule.Resource.ID
|
||||
alertMessage.ResourceName= rule.Resource.Name
|
||||
alertMessage.ResourceID = rule.Resource.ID
|
||||
alertMessage.ResourceName = rule.Resource.Name
|
||||
alertMessage.Priority = priority
|
||||
err = saveAlertMessage(alertMessage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("save alert message error: %w", err)
|
||||
}
|
||||
}
|
||||
log.Debugf("check condition result of rule %s is %v", conditionResults, rule.ID )
|
||||
log.Debugf("check condition result of rule %s is %v", conditionResults, rule.ID)
|
||||
|
||||
// if alert message status equals ignored , then skip sending message to channel
|
||||
if alertMessage != nil && alertMessage.Status == alerting.MessageStateIgnored {
|
||||
|
@ -834,7 +836,7 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("get last notification time from kv error: %w", err)
|
||||
}
|
||||
if !tm.IsZero(){
|
||||
if !tm.IsZero() {
|
||||
rule.LastNotificationTime = tm
|
||||
}
|
||||
}
|
||||
|
@ -845,8 +847,8 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
paramsCtx = newParameterCtx(rule, checkResults, util.MapStr{
|
||||
alerting2.ParamTimestamp: alertItem.Created.Unix(),
|
||||
"priority": priority,
|
||||
"duration": alertItem.Created.Sub(alertMessage.Created).String(),
|
||||
"trigger_at": alertMessage.Created.Unix(),
|
||||
"duration": alertItem.Created.Sub(alertMessage.Created).String(),
|
||||
"trigger_at": alertMessage.Created.Unix(),
|
||||
})
|
||||
if alertMessage != nil {
|
||||
paramsCtx[alerting2.ParamEventID] = alertMessage.ID
|
||||
|
@ -874,12 +876,12 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
rule.LastTermStartTime = alertMessage.Created
|
||||
}
|
||||
if time.Now().Sub(rule.LastTermStartTime.Local()) > throttlePeriod {
|
||||
if rule.LastEscalationTime.IsZero(){
|
||||
if rule.LastEscalationTime.IsZero() {
|
||||
tm, err := readTimeFromKV(alerting2.KVLastEscalationTime, []byte(rule.ID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("get last escalation time from kv error: %w", err)
|
||||
}
|
||||
if !tm.IsZero(){
|
||||
if !tm.IsZero() {
|
||||
rule.LastEscalationTime = tm
|
||||
}
|
||||
}
|
||||
|
@ -899,10 +901,10 @@ func (engine *Engine) Do(rule *alerting.Rule) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interface{}) error{
|
||||
func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interface{}) error {
|
||||
var (
|
||||
tplBytes []byte
|
||||
err error
|
||||
err error
|
||||
)
|
||||
tplBytes, err = common.ResolveMessage(message, paramsCtx)
|
||||
if err != nil {
|
||||
|
@ -917,23 +919,23 @@ func attachTitleMessageToCtx(title, message string, paramsCtx map[string]interfa
|
|||
return nil
|
||||
}
|
||||
|
||||
func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult, extraParams map[string]interface{} ) map[string]interface{}{
|
||||
func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult, extraParams map[string]interface{}) map[string]interface{} {
|
||||
var (
|
||||
conditionParams []util.MapStr
|
||||
firstGroupValue string
|
||||
firstThreshold string
|
||||
priority string
|
||||
firstThreshold string
|
||||
priority string
|
||||
)
|
||||
if len(checkResults.ResultItems) > 0 {
|
||||
priority = checkResults.ResultItems[0].ConditionItem.Priority
|
||||
sort.Slice(checkResults.ResultItems, func(i, j int) bool {
|
||||
if alerting.PriorityWeights[checkResults.ResultItems[i].ConditionItem.Priority] > alerting.PriorityWeights[checkResults.ResultItems[j].ConditionItem.Priority] {
|
||||
if alerting.PriorityWeights[checkResults.ResultItems[i].ConditionItem.Priority] > alerting.PriorityWeights[checkResults.ResultItems[j].ConditionItem.Priority] {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
sort.Slice(checkResults.ResultItems, func(i, j int) bool {
|
||||
if vi, ok := checkResults.ResultItems[i].ResultValue.(float64); ok {
|
||||
if vi, ok := checkResults.ResultItems[i].ResultValue.(float64); ok {
|
||||
if vj, ok := checkResults.ResultItems[j].ResultValue.(float64); ok {
|
||||
return vi > vj
|
||||
}
|
||||
|
@ -972,10 +974,10 @@ func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult
|
|||
max = checkResults.QueryResult.Max
|
||||
if v, ok := min.(int64); ok {
|
||||
//expand 60s
|
||||
min = time.UnixMilli(v).Add(-time.Second*60).UTC().Format("2006-01-02T15:04:05.999Z")
|
||||
min = time.UnixMilli(v).Add(-time.Second * 60).UTC().Format("2006-01-02T15:04:05.999Z")
|
||||
}
|
||||
if v, ok := max.(int64); ok {
|
||||
max = time.UnixMilli(v).Add(time.Second*60).UTC().Format("2006-01-02T15:04:05.999Z")
|
||||
max = time.UnixMilli(v).Add(time.Second * 60).UTC().Format("2006-01-02T15:04:05.999Z")
|
||||
}
|
||||
}
|
||||
paramsCtx := util.MapStr{
|
||||
|
@ -983,14 +985,14 @@ func newParameterCtx(rule *alerting.Rule, checkResults *alerting.ConditionResult
|
|||
alerting2.ParamResourceID: rule.Resource.ID,
|
||||
alerting2.ParamResourceName: rule.Resource.Name,
|
||||
alerting2.ParamResults: conditionParams,
|
||||
"objects": rule.Resource.Objects,
|
||||
"objects": rule.Resource.Objects,
|
||||
"first_group_value": firstGroupValue,
|
||||
"first_threshold": firstThreshold,
|
||||
"rule_name": rule.Name,
|
||||
"priority": priority,
|
||||
"min": min,
|
||||
"max": max,
|
||||
"env": envVariables,
|
||||
"min": min,
|
||||
"max": max,
|
||||
"env": envVariables,
|
||||
}
|
||||
err = util.MergeFields(paramsCtx, extraParams, true)
|
||||
if err != nil {
|
||||
|
@ -1004,30 +1006,30 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("check condition error:%w", err)
|
||||
}
|
||||
alertMessage, err := getLastAlertMessage(rule.ID, 2 * time.Minute)
|
||||
alertMessage, err := getLastAlertMessage(rule.ID, 2*time.Minute)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get alert message error: %w", err)
|
||||
}
|
||||
var actionResults []alerting.ActionExecutionResult
|
||||
|
||||
now := time.Now()
|
||||
now := time.Now()
|
||||
triggerAt := now
|
||||
if alertMessage != nil {
|
||||
triggerAt = alertMessage.Created
|
||||
}
|
||||
paramsCtx := newParameterCtx(rule, checkResults,util.MapStr{
|
||||
alerting2.ParamEventID: util.GetUUID(),
|
||||
alerting2.ParamTimestamp: now.Unix(),
|
||||
"duration": now.Sub(triggerAt).String(),
|
||||
"trigger_at": triggerAt.Unix(),
|
||||
} )
|
||||
paramsCtx := newParameterCtx(rule, checkResults, util.MapStr{
|
||||
alerting2.ParamEventID: util.GetUUID(),
|
||||
alerting2.ParamTimestamp: now.Unix(),
|
||||
"duration": now.Sub(triggerAt).String(),
|
||||
"trigger_at": triggerAt.Unix(),
|
||||
})
|
||||
if msgType == "escalation" || msgType == "notification" {
|
||||
title, message := rule.GetNotificationTitleAndMessage()
|
||||
err = attachTitleMessageToCtx(title, message, paramsCtx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}else if msgType == "recover_notification" {
|
||||
} else if msgType == "recover_notification" {
|
||||
if rule.RecoveryNotificationConfig == nil {
|
||||
return nil, fmt.Errorf("recovery notification must not be empty")
|
||||
}
|
||||
|
@ -1035,7 +1037,7 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}else{
|
||||
} else {
|
||||
return nil, fmt.Errorf("unkonwn parameter msg type")
|
||||
}
|
||||
|
||||
|
@ -1060,7 +1062,7 @@ func (engine *Engine) Test(rule *alerting.Rule, msgType string) ([]alerting.Acti
|
|||
}
|
||||
if len(channels) > 0 {
|
||||
actionResults, _ = performChannels(channels, paramsCtx, true)
|
||||
}else{
|
||||
} else {
|
||||
return nil, fmt.Errorf("no useable channel")
|
||||
}
|
||||
return actionResults, nil
|
||||
|
@ -1071,8 +1073,8 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra
|
|||
var actionResults []alerting.ActionExecutionResult
|
||||
for _, channel := range channels {
|
||||
var (
|
||||
errStr string
|
||||
resBytes []byte
|
||||
errStr string
|
||||
resBytes []byte
|
||||
messageBytes []byte
|
||||
)
|
||||
_, err := common.RetrieveChannel(&channel, raiseChannelEnabledErr)
|
||||
|
@ -1080,7 +1082,7 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra
|
|||
log.Error(err)
|
||||
errCount++
|
||||
errStr = err.Error()
|
||||
}else{
|
||||
} else {
|
||||
if !channel.Enabled {
|
||||
continue
|
||||
}
|
||||
|
@ -1094,17 +1096,15 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra
|
|||
Result: string(resBytes),
|
||||
Error: errStr,
|
||||
Message: string(messageBytes),
|
||||
ExecutionTime: int(time.Now().UnixNano()/1e6),
|
||||
ExecutionTime: int(time.Now().UnixNano() / 1e6),
|
||||
ChannelType: channel.SubType,
|
||||
ChannelName: channel.Name,
|
||||
ChannelID: channel.ID,
|
||||
ChannelID: channel.ID,
|
||||
})
|
||||
}
|
||||
return actionResults, errCount
|
||||
}
|
||||
|
||||
|
||||
|
||||
func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context) {
|
||||
return func(ctx context.Context) {
|
||||
defer func() {
|
||||
|
@ -1120,29 +1120,29 @@ func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context)
|
|||
}
|
||||
}
|
||||
|
||||
func CollectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData){
|
||||
func CollectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData) {
|
||||
if aggM, ok := agg.(map[string]interface{}); ok {
|
||||
if targetAgg, ok := aggM["filter_agg"]; ok {
|
||||
collectMetricData(targetAgg, groupValues, metricData)
|
||||
}else{
|
||||
} else {
|
||||
collectMetricData(aggM, groupValues, metricData)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData){
|
||||
func collectMetricData(agg interface{}, groupValues string, metricData *[]alerting.MetricData) {
|
||||
if aggM, ok := agg.(map[string]interface{}); ok {
|
||||
if timeBks, ok := aggM["time_buckets"].(map[string]interface{}); ok {
|
||||
if bks, ok := timeBks["buckets"].([]interface{}); ok {
|
||||
md := alerting.MetricData{
|
||||
Data: map[string][]alerting.TimeMetricData{},
|
||||
Data: map[string][]alerting.TimeMetricData{},
|
||||
GroupValues: strings.Split(groupValues, "*"),
|
||||
}
|
||||
for _, bk := range bks {
|
||||
if bkM, ok := bk.(map[string]interface{}); ok{
|
||||
if bkM, ok := bk.(map[string]interface{}); ok {
|
||||
|
||||
for k, v := range bkM {
|
||||
if k == "key" || k == "key_as_string" || k== "doc_count"{
|
||||
if k == "key" || k == "key_as_string" || k == "doc_count" {
|
||||
continue
|
||||
}
|
||||
if len(k) > 5 { //just store a,b,c
|
||||
|
@ -1151,9 +1151,9 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti
|
|||
if vm, ok := v.(map[string]interface{}); ok {
|
||||
if metricVal, ok := vm["value"]; ok {
|
||||
md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], metricVal})
|
||||
}else{
|
||||
} else {
|
||||
//percentiles agg type
|
||||
switch vm["values"].(type) {
|
||||
switch vm["values"].(type) {
|
||||
case []interface{}:
|
||||
for _, val := range vm["values"].([]interface{}) {
|
||||
if valM, ok := val.(map[string]interface{}); ok {
|
||||
|
@ -1163,7 +1163,7 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti
|
|||
}
|
||||
case map[string]interface{}:
|
||||
for _, val := range vm["values"].(map[string]interface{}) {
|
||||
md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], val})
|
||||
md.Data[k] = append(md.Data[k], alerting.TimeMetricData{bkM["key"], val})
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -1176,18 +1176,18 @@ func collectMetricData(agg interface{}, groupValues string, metricData *[]alerti
|
|||
}
|
||||
|
||||
}
|
||||
*metricData = append(*metricData,md)
|
||||
*metricData = append(*metricData, md)
|
||||
}
|
||||
|
||||
}else{
|
||||
} else {
|
||||
for k, v := range aggM {
|
||||
if k == "key" || k== "doc_count"{
|
||||
if k == "key" || k == "doc_count" {
|
||||
continue
|
||||
}
|
||||
if vm, ok := v.(map[string]interface{}); ok {
|
||||
if bks, ok := vm["buckets"].([]interface{}); ok {
|
||||
for _, bk := range bks {
|
||||
if bkVal, ok := bk.(map[string]interface{}); ok {
|
||||
if bkVal, ok := bk.(map[string]interface{}); ok {
|
||||
currentGroup := bkVal["key"].(string)
|
||||
newGroupValues := currentGroup
|
||||
if groupValues != "" {
|
||||
|
@ -1227,20 +1227,20 @@ func getLastAlertMessageFromES(ruleID string) (*alerting.AlertMessage, error) {
|
|||
q := orm.Query{
|
||||
RawQuery: util.MustToJSONBytes(queryDsl),
|
||||
}
|
||||
err, searchResult := orm.Search(alerting.AlertMessage{}, &q )
|
||||
err, searchResult := orm.Search(alerting.AlertMessage{}, &q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, err
|
||||
}
|
||||
if len(searchResult.Result) == 0 {
|
||||
return nil, nil
|
||||
return nil, nil
|
||||
}
|
||||
messageBytes := util.MustToJSONBytes(searchResult.Result[0])
|
||||
message := &alerting.AlertMessage{}
|
||||
err = util.FromJSONBytes(messageBytes, message)
|
||||
err = util.FromJSONBytes(messageBytes, message)
|
||||
return message, err
|
||||
}
|
||||
|
||||
func getLastAlertMessage(ruleID string, duration time.Duration) (*alerting.AlertMessage, error ){
|
||||
func getLastAlertMessage(ruleID string, duration time.Duration) (*alerting.AlertMessage, error) {
|
||||
messageBytes, err := kv.GetValue(alerting2.KVLastMessageState, []byte(ruleID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1280,15 +1280,14 @@ func saveAlertMessage(message *alerting.AlertMessage) error {
|
|||
return err
|
||||
}
|
||||
|
||||
|
||||
func readTimeFromKV(bucketKey string, key []byte)(time.Time, error){
|
||||
func readTimeFromKV(bucketKey string, key []byte) (time.Time, error) {
|
||||
timeBytes, err := kv.GetValue(bucketKey, key)
|
||||
zeroTime := time.Time{}
|
||||
if err != nil {
|
||||
return zeroTime, err
|
||||
}
|
||||
timeStr := string(timeBytes)
|
||||
if timeStr != ""{
|
||||
timeStr := string(timeBytes)
|
||||
if timeStr != "" {
|
||||
return time.ParseInLocation(time.RFC3339, string(timeBytes), time.UTC)
|
||||
}
|
||||
return zeroTime, nil
|
||||
|
|
|
@ -40,16 +40,16 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func TestEngine( t *testing.T) {
|
||||
func TestEngine(t *testing.T) {
|
||||
rule := alerting.Rule{
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
Enabled: true,
|
||||
Resource: alerting.Resource{
|
||||
ID: "c8i18llath2blrusdjng",
|
||||
Type: "elasticsearch",
|
||||
Objects: []string{".infini_metrics*"},
|
||||
ID: "c8i18llath2blrusdjng",
|
||||
Type: "elasticsearch",
|
||||
Objects: []string{".infini_metrics*"},
|
||||
TimeField: "timestamp",
|
||||
Filter: alerting.FilterQuery{
|
||||
And: []alerting.FilterQuery{
|
||||
|
@ -123,9 +123,9 @@ func TestEngine( t *testing.T) {
|
|||
ThrottlePeriod: "1h",
|
||||
AcceptTimeRange: alerting.TimeRange{
|
||||
Start: "8:00",
|
||||
End: "21:00",
|
||||
End: "21:00",
|
||||
},
|
||||
EscalationEnabled: true,
|
||||
EscalationEnabled: true,
|
||||
EscalationThrottlePeriod: "30m",
|
||||
},
|
||||
}
|
||||
|
@ -143,11 +143,11 @@ func TestEngine( t *testing.T) {
|
|||
//fmt.Println(util.MustToJSON(filter))
|
||||
}
|
||||
|
||||
func TestGenerateAgg(t *testing.T) {
|
||||
func TestGenerateAgg(t *testing.T) {
|
||||
eng := &Engine{}
|
||||
agg := eng.generateAgg(&insight.MetricItem{
|
||||
Name: "a",
|
||||
Field: "cpu.percent",
|
||||
Name: "a",
|
||||
Field: "cpu.percent",
|
||||
Statistic: "p99",
|
||||
})
|
||||
fmt.Println(util.MustToJSON(agg))
|
||||
|
@ -210,20 +210,20 @@ func TestGeneratePercentilesAggQuery(t *testing.T) {
|
|||
esClient := elasticsearch.ESAPIV7{}
|
||||
esClient.Elasticsearch = cfg.ID
|
||||
esClient.Version = elastic.Version{
|
||||
Number: "7.10.2",
|
||||
Major: 7,
|
||||
Number: "7.10.2",
|
||||
Major: 7,
|
||||
Distribution: elastic.Elasticsearch,
|
||||
}
|
||||
elastic.UpdateClient(cfg, &esClient)
|
||||
rule := alerting.Rule{
|
||||
ID: util.GetUUID(),
|
||||
ID: util.GetUUID(),
|
||||
Created: time.Now(),
|
||||
Updated: time.Now(),
|
||||
Enabled: true,
|
||||
Resource: alerting.Resource{
|
||||
ID: cfg.ID,
|
||||
Type: "elasticsearch",
|
||||
Objects: []string{".infini_metrics*"},
|
||||
ID: cfg.ID,
|
||||
Type: "elasticsearch",
|
||||
Objects: []string{".infini_metrics*"},
|
||||
TimeField: "timestamp",
|
||||
RawFilter: map[string]interface{}{
|
||||
"bool": map[string]interface{}{
|
||||
|
@ -271,9 +271,9 @@ func TestGeneratePercentilesAggQuery(t *testing.T) {
|
|||
ThrottlePeriod: "1h",
|
||||
AcceptTimeRange: alerting.TimeRange{
|
||||
Start: "08:00",
|
||||
End: "21:00",
|
||||
End: "21:00",
|
||||
},
|
||||
EscalationEnabled: true,
|
||||
EscalationEnabled: true,
|
||||
EscalationThrottlePeriod: "30m",
|
||||
},
|
||||
}
|
||||
|
@ -289,21 +289,21 @@ func TestConvertFilterQuery(t *testing.T) {
|
|||
fq := alerting.FilterQuery{
|
||||
And: []alerting.FilterQuery{
|
||||
{
|
||||
Field: "metadata.category",
|
||||
Values: []string{"elasticsearch"},
|
||||
Field: "metadata.category",
|
||||
Values: []string{"elasticsearch"},
|
||||
Operator: "equals",
|
||||
},
|
||||
{
|
||||
Field: "metadata.name",
|
||||
Values: []string{"index_stats", "node_stats"},
|
||||
Field: "metadata.name",
|
||||
Values: []string{"index_stats", "node_stats"},
|
||||
Operator: "in",
|
||||
},
|
||||
{
|
||||
Not: []alerting.FilterQuery{
|
||||
{
|
||||
Field: "timestamp",
|
||||
Field: "timestamp",
|
||||
Operator: "gt",
|
||||
Values: []string{"2022-04-16T16:16:39.168605+08:00"},
|
||||
Values: []string{"2022-04-16T16:16:39.168605+08:00"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -29,7 +29,7 @@ package elasticsearch
|
|||
|
||||
import "infini.sh/console/service/alerting"
|
||||
|
||||
func init(){
|
||||
func init() {
|
||||
eng := Engine{}
|
||||
alerting.RegistEngine("elasticsearch", &eng)
|
||||
}
|
||||
|
|
|
@ -36,17 +36,18 @@ import (
|
|||
|
||||
type Engine interface {
|
||||
GenerateQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (interface{}, error)
|
||||
ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam)(*alerting.QueryResult, error)
|
||||
CheckCondition(rule *alerting.Rule)(*alerting.ConditionResult, error)
|
||||
ExecuteQuery(rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.QueryResult, error)
|
||||
CheckCondition(rule *alerting.Rule) (*alerting.ConditionResult, error)
|
||||
GenerateTask(rule alerting.Rule) func(ctx context.Context)
|
||||
Test(rule *alerting.Rule, msgType string) ([]alerting.ActionExecutionResult, error)
|
||||
GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam)([]alerting.MetricData, *alerting.QueryResult, error)
|
||||
GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool, filterParam *alerting.FilterParam) ([]alerting.MetricData, *alerting.QueryResult, error)
|
||||
}
|
||||
|
||||
var (
|
||||
alertEngines = map[string] Engine{}
|
||||
alertEngines = map[string]Engine{}
|
||||
alertEnginesMutex = sync.RWMutex{}
|
||||
)
|
||||
|
||||
func RegistEngine(typ string, engine Engine) {
|
||||
alertEnginesMutex.Lock()
|
||||
defer alertEnginesMutex.Unlock()
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
log "src/github.com/cihub/seelog"
|
||||
)
|
||||
|
||||
func GetEnvVariables() (map[string]interface{}, error){
|
||||
func GetEnvVariables() (map[string]interface{}, error) {
|
||||
configFile := global.Env().GetConfigFile()
|
||||
envVariables, err := config.LoadEnvVariables(configFile)
|
||||
if err != nil {
|
||||
|
@ -64,7 +64,7 @@ func GetEnvVariables() (map[string]interface{}, error){
|
|||
return envVariables, nil
|
||||
}
|
||||
|
||||
func GetInnerConsoleEndpoint() (string, error){
|
||||
func GetInnerConsoleEndpoint() (string, error) {
|
||||
appConfig := &config2.AppConfig{
|
||||
UI: config2.UIConfig{},
|
||||
}
|
||||
|
|
|
@ -32,10 +32,10 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func datetimeInZone(zone string, date interface{}) string{
|
||||
func datetimeInZone(zone string, date interface{}) string {
|
||||
return _dateInZone("2006-01-02 15:04:05", date, zone)
|
||||
}
|
||||
func datetime(date interface{}) string{
|
||||
func datetime(date interface{}) string {
|
||||
return _dateInZone("2006-01-02 15:04:05", date, "Local")
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ func _dateInZone(fmt string, date interface{}, zone string) string {
|
|||
t = *date
|
||||
case int64:
|
||||
if date > 1e12 {
|
||||
date = date/1000
|
||||
date = date / 1000
|
||||
}
|
||||
t = time.Unix(date, 0)
|
||||
case int:
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
func lookup(directory string, id string) interface{}{
|
||||
func lookup(directory string, id string) interface{} {
|
||||
directory = strings.TrimSpace(directory)
|
||||
if directory == "" {
|
||||
return "empty_directory"
|
||||
|
@ -46,8 +46,8 @@ func lookup(directory string, id string) interface{}{
|
|||
kv := strings.Split(part, "=")
|
||||
if len(kv) == 2 {
|
||||
k := strings.TrimSpace(kv[0])
|
||||
kvs[k]= strings.TrimSpace(kv[1])
|
||||
}else{
|
||||
kvs[k] = strings.TrimSpace(kv[1])
|
||||
} else {
|
||||
log.Debugf("got unexpected directory part: %s", part)
|
||||
}
|
||||
}
|
||||
|
@ -59,10 +59,10 @@ func lookup(directory string, id string) interface{}{
|
|||
return kvs["default"]
|
||||
}
|
||||
|
||||
func lookupMetadata(object string, property string, defaultValue string, id string) interface{}{
|
||||
func lookupMetadata(object string, property string, defaultValue string, id string) interface{} {
|
||||
var (
|
||||
cfgM = util.MapStr{}
|
||||
buf []byte
|
||||
buf []byte
|
||||
)
|
||||
switch object {
|
||||
case "cluster":
|
||||
|
|
|
@ -41,21 +41,21 @@ func GenericFuncMap() template.FuncMap {
|
|||
}
|
||||
|
||||
var genericMap = map[string]interface{}{
|
||||
"hello": func() string { return "Hello!" },
|
||||
"format_bytes": formatBytes,
|
||||
"to_fixed": toFixed,
|
||||
"date": date,
|
||||
"date_in_zone": dateInZone,
|
||||
"datetime": datetime,
|
||||
"hello": func() string { return "Hello!" },
|
||||
"format_bytes": formatBytes,
|
||||
"to_fixed": toFixed,
|
||||
"date": date,
|
||||
"date_in_zone": dateInZone,
|
||||
"datetime": datetime,
|
||||
"datetime_in_zone": datetimeInZone,
|
||||
"to_upper": strings.ToUpper,
|
||||
"to_lower": strings.ToLower,
|
||||
"add": add,
|
||||
"sub": sub,
|
||||
"div": div,
|
||||
"mul": mul,
|
||||
"lookup": lookup,
|
||||
"str_replace": replace,
|
||||
"md_to_html": mdToHTML,
|
||||
"to_upper": strings.ToUpper,
|
||||
"to_lower": strings.ToLower,
|
||||
"add": add,
|
||||
"sub": sub,
|
||||
"div": div,
|
||||
"mul": mul,
|
||||
"lookup": lookup,
|
||||
"str_replace": replace,
|
||||
"md_to_html": mdToHTML,
|
||||
//"get_keystore_secret": getKeystoreSecret,
|
||||
}
|
||||
|
|
|
@ -35,10 +35,10 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
func toFixed(precision int, num float64) float64{
|
||||
func toFixed(precision int, num float64) float64 {
|
||||
return util.ToFixed(num, precision)
|
||||
}
|
||||
func add(a, b interface{}) float64{
|
||||
func add(a, b interface{}) float64 {
|
||||
av := ToFloat64(a)
|
||||
bv := ToFloat64(b)
|
||||
return av + bv
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
func substring(start, end int, s string) string {
|
||||
runes := []rune(s)
|
||||
length := len(runes)
|
||||
if start < 0 || start > length || end < 0 || end > length{
|
||||
if start < 0 || start > length || end < 0 || end > length {
|
||||
return s
|
||||
}
|
||||
return string(runes[start:end])
|
||||
|
|
|
@ -28,11 +28,11 @@
|
|||
package alerting
|
||||
|
||||
type ParameterMeta struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"` //int, float, string, date, array, object
|
||||
Description string `json:"description"`
|
||||
Eg string `json:"eg,omitempty"`
|
||||
Properties []ParameterMeta `json:"properties,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"` //int, float, string, date, array, object
|
||||
Description string `json:"description"`
|
||||
Eg string `json:"eg,omitempty"`
|
||||
Properties []ParameterMeta `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
func GetTemplateParameters() []ParameterMeta {
|
||||
|
|
Loading…
Reference in New Issue