chore: code format (#72)

* chore: code format

* chore: remove fetch-depth

* chore: add format and lint

* chore: add pr_check

* fix: lint with config

* chore: this pr only unit test

* fix: code format error
This commit is contained in:
Hardy 2025-01-14 14:29:31 +08:00 committed by GitHub
parent fb4dafecb3
commit 8da176bea8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
88 changed files with 3497 additions and 3365 deletions

307
.github/workflows/pr_check.yml vendored Normal file
View File

@ -0,0 +1,307 @@
name: Unit Test
on:
pull_request:
branches: [ "main" ]
defaults:
run:
shell: bash
env:
GO_VERSION: 1.23.4
NODEJS_VERSION: 16.20.2
PNAME: console
jobs:
format_check:
runs-on: ubuntu-latest
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
path: ${{ env.PNAME }}
- name: Checkout framework repository
uses: actions/checkout@v4
with:
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Code format
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/console
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for code format
cd $WORK
echo Formating code at $PWD ...
make format
if [ $? -ne 0 ]; then
echo "make format failed, please check make output"
exit 1
fi
- name: Check for changes after format
id: check-changes
shell: bash
run: |
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/$PNAME
# for foramt check
cd $WORK
if [[ $(git status --porcelain | grep -c " M .*\.go$") -gt 0 ]]; then
echo "go format detected formatting changes"
echo "changes=true" >> $GITHUB_OUTPUT
else
echo "go format no changes found"
echo "changes=false" >> $GITHUB_OUTPUT
fi
- name: Fail workflow if changes after format
if: steps.check-changes.outputs.changes == 'true'
run: exit 1
unit_test:
runs-on: ubuntu-latest
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
path: ${{ env.PNAME }}
- name: Checkout framework repository
uses: actions/checkout@v4
with:
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Unit test
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/$PNAME
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for unit test
cd $WORK
echo Testing code at $PWD ...
make test
code_lint:
runs-on: ubuntu-latest
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
path: ${{ env.PNAME }}
- name: Checkout framework repository
uses: actions/checkout@v4
with:
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Code lint
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/$PNAME
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for code lint
cd $WORK
echo Testing code at $PWD ...
# make lint

View File

@ -1,105 +0,0 @@
name: Unit Test
on:
pull_request:
branches: [ "main" ]
defaults:
run:
shell: bash
jobs:
build:
runs-on: ubuntu-latest
env:
GO_VERSION: 1.23.4
NODEJS_VERSION: 16.20.2
steps:
- name: Checkout current repository
uses: actions/checkout@v4
with:
fetch-depth: 0
path: console
- name: Checkout framework repository
uses: actions/checkout@v4
with:
fetch-depth: 0
repository: infinilabs/framework
path: framework
- name: Checkout framework-vendor
uses: actions/checkout@v4
with:
ref: main
fetch-depth: 0
repository: infinilabs/framework-vendor
path: vendor
- name: Set up nodejs toolchain
uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Cache dependencies
uses: actions/cache@v4
with:
path: |
node_modules
key: ${{ runner.os }}-cnpm-${{ hashFiles('**/package.json') }}
restore-keys: |
${{ runner.os }}-cnpm-
- name: Check nodejs toolchain
run: |
if ! command -v cnpm >/dev/null 2>&1; then
npm install -g rimraf --quiet --no-progress
npm install -g cnpm@9.2.0 --quiet --no-progress
fi
node -v && npm -v && cnpm -v
- name: Set up go toolchain
uses: actions/setup-go@v5
with:
go-version: ${{ env.GO_VERSION }}
check-latest: false
cache: true
- name: Check go toolchain
run: go version
- name: Cache Build Output
uses: actions/cache@v4
with:
path: |
.public
key: ${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-${{ github.sha }}
restore-keys: |
${{ runner.os }}-build-${{ hashFiles('**/package.json') }}-
${{ runner.os }}-build-
- name: Unit test
env:
GOFLAGS: -tags=ci
run: |
echo Home path is $HOME
export WORKBASE=$HOME/go/src/infini.sh
export WORK=$WORKBASE/console
# for test workspace
mkdir -p $HOME/go/src/
ln -s $GITHUB_WORKSPACE $WORKBASE
# for web build
cd $WORK/web
cnpm install --quiet --no-progress
cnpm run build --quiet
# check work folder
ls -lrt $WORKBASE/
ls -alrt $WORK
# for unit test
cd $WORK
echo Testing code at $PWD ...
make test

2
.gitignore vendored
View File

@ -32,5 +32,7 @@ appveyor.yml
log/
.env
generated_*.go
config/generated.go
config/generat*.go
config/initialization.dsl
config/system_config.yml

View File

@ -1,10 +0,0 @@
package config
const LastCommitLog = "N/A"
const BuildDate = "N/A"
const EOLDate = "N/A"
const Version = "0.0.1-SNAPSHOT"
const BuildNumber = "001"

View File

@ -33,6 +33,7 @@ type Condition struct {
Operator string `json:"operator"`
Items []ConditionItem `json:"items"`
}
func (cond *Condition) GetMinimumPeriodMatch() int {
var minPeriodMatch = 0
for _, citem := range cond.Items {

View File

@ -43,7 +43,6 @@ type Channel struct {
Enabled bool `json:"enabled" elastic_mapping:"enabled:{type:boolean}"`
}
const (
ChannelEmail = "email"
ChannelWebhook = "webhook"

View File

@ -41,7 +41,6 @@ type Metric struct {
Expression string `json:"expression,omitempty" elastic_mapping:"expression:{type:keyword,copy_to:search_text}"` //告警表达式,自动生成 eg: avg(cpu) > 80
}
func (m *Metric) GenerateExpression() (string, error) {
if len(m.Items) == 1 {
return fmt.Sprintf("%s(%s)", m.Items[0].Statistic, m.Items[0].Field), nil

View File

@ -48,4 +48,3 @@ func (r Resource) Validate() error{
}
return nil
}

View File

@ -81,6 +81,7 @@ func (rule *Rule) GetOrInitExpression() (string, error){
rule.Expression = strings.ReplaceAll(sb.String(), "result", metricExp)
return rule.Expression, nil
}
// GetNotificationConfig for adapter old version config
func (rule *Rule) GetNotificationConfig() *NotificationConfig {
if rule.NotificationConfig != nil {
@ -139,6 +140,7 @@ type FilterParam struct {
End interface{} `json:"end"`
BucketSize string `json:"bucket_size"`
}
//ctx
//rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
//check_status ,timestamp,

View File

@ -145,9 +145,6 @@ func TestCreateRule( t *testing.T) {
fmt.Println(exp)
}
func TestTimeRange_Include(t *testing.T) {
tr := TimeRange{
Start: "08:00",

View File

@ -36,6 +36,3 @@ type Cron struct {
Expression string `json:"expression" elastic_mapping:"expression:{type:text}"`
Timezone string `json:"timezone" elastic_mapping:"timezone:{type:keyword}"`
}

View File

@ -27,7 +27,6 @@
package insight
type SeriesItem struct {
Type string `json:"type"`
Options map[string]interface{} `json:"options"`

View File

@ -29,9 +29,10 @@ package insight
import (
"fmt"
"regexp"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"regexp"
)
type Metric struct {

View File

@ -45,6 +45,7 @@ type Layout struct {
}
type LayoutType string
const (
LayoutTypeWorkspace LayoutType = "workspace"
)

View File

@ -31,13 +31,13 @@ import (
"bytes"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/kv"
"infini.sh/framework/core/model"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
common2 "infini.sh/framework/modules/elastic/common"
metadata2 "infini.sh/framework/modules/elastic/metadata"
"time"

View File

@ -30,8 +30,8 @@ package common
import (
log "github.com/cihub/seelog"
"infini.sh/console/modules/agent/model"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/env"
"infini.sh/framework/modules/configs/common"
)
func GetAgentConfig() *model.AgentConfig {

View File

@ -86,9 +86,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
clusterFilter, hasAllPrivilege := h.GetClusterFilter(req, "metadata.labels.cluster_id")
if !hasAllPrivilege && clusterFilter == nil {
h.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
if !hasAllPrivilege && clusterFilter != nil {
@ -97,9 +95,7 @@ func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http
hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req)
if !hasAllPrivilege && len(indexPrivilege) == 0 {
h.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
if !hasAllPrivilege {

View File

@ -166,7 +166,6 @@ func (h *APIHandler) FetchClusterInfo(w http.ResponseWriter, req *http.Request,
Units: "query/s",
})
clusterID := global.MustLookupString(elastic.GlobalSystemElasticsearchID)
intervalField, err := getDateHistogramIntervalField(clusterID, bucketSizeStr)
if err != nil {
@ -633,7 +632,6 @@ func (h *APIHandler) GetClusterNodes(w http.ResponseWriter, req *http.Request, p
}
}
if v, ok := nodeID.(string); ok {
nodeInfos[v] = util.MapStr{
"timestamp": hitM["timestamp"],

View File

@ -207,10 +207,8 @@ func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Requ
h.Write(w, searchRes.RawResult.Body)
}
func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
@ -340,4 +338,3 @@ func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.
}
h.WriteJSON(w, indexNames, http.StatusOK)
}

View File

@ -211,8 +211,7 @@ func (h *APIHandler) getDiscoverHosts(w http.ResponseWriter, req *http.Request,
func getHostSummary(agentIDs []string, metricName string, summary map[string]util.MapStr) error {
if summary == nil {
summary = map[string]util.MapStr{
}
summary = map[string]util.MapStr{}
}
if len(agentIDs) == 0 {
@ -506,8 +505,7 @@ func (h *APIHandler) FetchHostInfo(w http.ResponseWriter, req *http.Request, ps
for key, item := range hostMetrics {
for _, line := range item.Lines {
if _, ok := networkMetrics[line.Metric.Label]; !ok {
networkMetrics[line.Metric.Label] = util.MapStr{
}
networkMetrics[line.Metric.Label] = util.MapStr{}
}
networkMetrics[line.Metric.Label][key] = line.Data
}
@ -694,8 +692,8 @@ const (
DiskWriteRateMetricKey = "disk_write_rate"
DiskPartitionUsageMetricKey = "disk_partition_usage"
NetworkInterfaceOutputRateMetricKey = "network_interface_output_rate"
)
func (h *APIHandler) GetSingleHostMetrics(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
hostID := ps.MustGetParameter("host_id")
hostInfo := &host.HostInfo{}

View File

@ -657,7 +657,6 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
})
}
aggs := map[string]interface{}{}
sumAggs := util.MapStr{}
@ -1050,6 +1049,7 @@ type TopTerm struct {
Value float64
}
type TopTermOrder []TopTerm
func (t TopTermOrder) Len() int {
return len(t)
}

View File

@ -149,15 +149,13 @@ func (h *APIHandler) SearchIndexMetadata(w http.ResponseWriter, req *http.Reques
}
}
must := []interface{}{
}
must := []interface{}{}
if indexFilter, hasIndexPri := h.getAllowedIndexFilter(req); hasIndexPri {
if indexFilter != nil {
must = append(must, indexFilter)
}
} else {
h.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
boolQuery := util.MapStr{
@ -550,8 +548,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, req *http.Request, p
for key, item := range metrics {
for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{
}
indexMetrics[line.Metric.Label] = util.MapStr{}
}
indexMetrics[line.Metric.Label][key] = line.Data
}
@ -1261,8 +1258,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
if keyword != "" {
must = append(must, util.MapStr{
"wildcard": util.MapStr{
"metadata.index_name":
util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
"metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
},
})
}
@ -1288,7 +1284,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
},
}
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
indexName := orm.GetIndexName(elastic.IndexConfig{})
resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl))

View File

@ -27,6 +27,13 @@ import (
"context"
"encoding/json"
"fmt"
"math"
"net/http"
"strconv"
"strings"
"sync"
"time"
log "github.com/cihub/seelog"
"infini.sh/console/core"
v1 "infini.sh/console/modules/elastic/api/v1"
@ -39,12 +46,6 @@ import (
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
"math"
"net/http"
"strconv"
"strings"
"sync"
"time"
)
type APIHandler struct {

View File

@ -56,7 +56,6 @@ func TestConvertBucketItemsToAggQueryParams(t *testing.T) {
bucketItem.Parameters["field"] = "metadata.labels.cluster_id"
bucketItem.Parameters["size"] = 2
nestBucket := common.BucketItem{}
nestBucket.Key = "key2"
nestBucket.Type = common.DateHistogramBucket
@ -88,7 +87,6 @@ func TestConvertBucketItemsToAggQueryParams(t *testing.T) {
bucketItem.Buckets = []*common.BucketItem{}
bucketItem.Buckets = append(bucketItem.Buckets, &nestBucket)
aggs := ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem}, nil)
fmt.Println(util.MustToJSON(aggs))

View File

@ -181,7 +181,6 @@ func (h *APIHandler) getNodeMetrics(ctx context.Context, clusterID string, bucke
},
},
},
})
}

View File

@ -143,19 +143,14 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
}
clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id")
if !hasPrivilege && clusterFilter == nil {
h.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
must := []interface{}{
}
must := []interface{}{}
if !hasPrivilege && clusterFilter != nil {
must = append(must, clusterFilter)
}
query := util.MapStr{
"aggs": aggs,
"size": reqBody.Size,
@ -431,8 +426,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
for key, item := range metrics {
for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{
}
indexMetrics[line.Metric.Label] = util.MapStr{}
}
indexMetrics[line.Metric.Label][key] = line.Data
}
@ -1152,6 +1146,7 @@ type ShardsSummary struct {
PriStoreInBytes int64 `json:"pri_store_in_bytes"`
Timestamp interface{} `json:"timestamp"`
}
func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string, clusterID string, result *orm.Result) ([]interface{}, error) {
//filter indices
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID)
@ -1297,7 +1292,6 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
return indices, nil
}
func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id")
if GetMonitorState(clusterID) == elastic.ModeAgentless {

View File

@ -278,5 +278,3 @@ func rewriteTableNamesOfSqlRequest(req *http.Request, distribution string) (stri
}
return strings.Join(unescapedTableNames, ","), nil
}

View File

@ -39,8 +39,7 @@ import (
)
func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
@ -106,8 +105,7 @@ func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req
}
func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
@ -211,8 +209,7 @@ func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req
}
func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil {
@ -274,8 +271,7 @@ func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req
}
func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
var (
name = h.GetParameterOrDefault(req, "name", "")
strFrom = h.GetParameterOrDefault(req, "from", "0")
@ -325,8 +321,7 @@ func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *h
}
func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
var (
templateID = h.GetParameterOrDefault(req, "template_id", "")
strFrom = h.GetParameterOrDefault(req, "from", "0")
@ -357,8 +352,7 @@ func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWrit
}
func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil {
@ -395,8 +389,7 @@ func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http
}
func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil {

View File

@ -36,8 +36,7 @@ import (
)
func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
@ -63,7 +62,6 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques
_, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
}
if err != nil {
log.Error(err)
resBody["error"] = err
@ -75,8 +73,7 @@ func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Reques
}
func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))

View File

@ -28,12 +28,12 @@
package api
import (
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/event"
"infini.sh/framework/core/orm"
"infini.sh/framework/modules/elastic/adapter"
"net/http"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
)
func (h *APIHandler) GetShardInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {

View File

@ -131,7 +131,6 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
},
},
},
})
}
should := []util.MapStr{
@ -589,7 +588,6 @@ func (h *APIHandler) getThreadPoolMetrics(ctx context.Context, clusterID string,
})
}
//Get Thread Pool queue
aggs := map[string]interface{}{}

View File

@ -38,8 +38,7 @@ import (
)
func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
@ -57,9 +56,7 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *
return
}
var traceReq = &elastic.TraceTemplate{
}
var traceReq = &elastic.TraceTemplate{}
err = h.DecodeJSON(req, traceReq)
if err != nil {
@ -88,8 +85,7 @@ func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *
}
func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
resBody := map[string]interface{}{}
var (
name = h.GetParameterOrDefault(req, "name", "")
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
@ -126,8 +122,7 @@ func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req
}
func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
resBody := map[string]interface{}{}
reqParams := elastic.TraceTemplate{}
err := h.DecodeJSON(req, &reqParams)

View File

@ -77,7 +77,6 @@ const (
SegmentPointsMetricKey = "segment_points_memory"
VersionMapMetricKey = "segment_version_map"
FixedBitSetMetricKey = "segment_fixed_bit_set"
)
func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, metricKey string) (map[string]*common.MetricItem, error) {
@ -682,7 +681,6 @@ func (h *APIHandler) getIndexMetrics(ctx context.Context, req *http.Request, clu
})
}
aggs := map[string]interface{}{}
for _, metricItem := range indexMetricItems {
@ -954,6 +952,7 @@ type TopTerm struct {
Value float64
}
type TopTermOrder []TopTerm
func (t TopTermOrder) Len() int {
return len(t)
}

View File

@ -246,8 +246,7 @@ func (h *APIHandler) FetchIndexInfo(w http.ResponseWriter, ctx context.Context,
for key, item := range metrics {
for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{
}
indexMetrics[line.Metric.Label] = util.MapStr{}
}
indexMetrics[line.Metric.Label][key] = line.Data
}
@ -683,7 +682,6 @@ func (h *APIHandler) GetIndexHealthMetric(ctx context.Context, id, indexName str
return metricItem, nil
}
func (h *APIHandler) GetIndexStatusOfRecentDay(clusterID, indexName string) (map[string][]interface{}, error) {
q := orm.Query{
WildcardIndex: true,
@ -959,8 +957,7 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
if keyword != "" {
must = append(must, util.MapStr{
"wildcard": util.MapStr{
"metadata.index_name":
util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
"metadata.index_name": util.MapStr{"value": fmt.Sprintf("*%s*", keyword)},
},
})
}
@ -986,7 +983,6 @@ func (h APIHandler) ListIndex(w http.ResponseWriter, req *http.Request, ps httpr
},
}
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
indexName := orm.GetIndexName(elastic.IndexConfig{})
resp, err := esClient.SearchWithRawQueryDSL(indexName, util.MustToJSONBytes(dsl))

View File

@ -796,6 +796,7 @@ const (
ShardCountMetricKey = "shard_count"
CircuitBreakerMetricKey = "circuit_breaker"
)
func (h *APIHandler) GetClusterMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
var (
@ -915,12 +916,14 @@ func (h *APIHandler) getClusterMetricsByKey(ctx context.Context, id string, buck
}
return h.getSingleMetrics(ctx, clusterMetricItems, query, bucketSize)
}
const (
IndexThroughputMetricKey = "index_throughput"
SearchThroughputMetricKey = "search_throughput"
IndexLatencyMetricKey = "index_latency"
SearchLatencyMetricKey = "search_latency"
)
func (h *APIHandler) GetClusterIndexMetrics(ctx context.Context, id string, bucketSize int, min, max int64, metricKey string) (map[string]*common.MetricItem, error) {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
metricItems := []*common.MetricItem{}

View File

@ -233,6 +233,7 @@ const (
MetricTypeNodeStats = "node_stats"
MetricTypeIndexStats = "index_stats"
)
// GetMetricMinBucketSize returns twice the metrics collection interval based on the cluster ID and metric type
func GetMetricMinBucketSize(clusterID, metricType string) (int, error) {
meta := elastic.GetMetadata(clusterID)

View File

@ -143,19 +143,14 @@ func (h *APIHandler) SearchNodeMetadata(w http.ResponseWriter, req *http.Request
}
clusterFilter, hasPrivilege := h.GetClusterFilter(req, "metadata.cluster_id")
if !hasPrivilege && clusterFilter == nil {
h.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
must := []interface{}{
}
must := []interface{}{}
if !hasPrivilege && clusterFilter != nil {
must = append(must, clusterFilter)
}
query := util.MapStr{
"aggs": aggs,
"size": reqBody.Size,
@ -426,8 +421,7 @@ func (h *APIHandler) FetchNodeInfo(w http.ResponseWriter, req *http.Request, ps
for key, item := range metrics {
for _, line := range item.Lines {
if _, ok := indexMetrics[line.Metric.Label]; !ok {
indexMetrics[line.Metric.Label] = util.MapStr{
}
indexMetrics[line.Metric.Label] = util.MapStr{}
}
indexMetrics[line.Metric.Label][key] = line.Data
}
@ -1102,7 +1096,6 @@ func (h *APIHandler) getLatestIndices(req *http.Request, min string, max string,
return indices, nil
}
func (h *APIHandler) GetNodeShards(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id")
nodeID := ps.MustGetParameter("node_id")

View File

@ -28,9 +28,9 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/crewjam/saml"
"net/http"
"net/url"
"github.com/crewjam/saml"
"github.com/crewjam/saml/samlsp"
)

View File

@ -114,7 +114,6 @@ func (h *AlertAPI) ignoreAlertMessage(w http.ResponseWriter, req *http.Request,
_ = kv.DeleteKey(alerting2.KVLastMessageState, []byte(msg.RuleID))
}
h.WriteJSON(w, util.MapStr{
"ids": messageIDs,
"result": "updated",
@ -249,7 +248,6 @@ func (h *AlertAPI) getAlertMessageStats(w http.ResponseWriter, req *http.Request
}, http.StatusOK)
}
func (h *AlertAPI) searchAlertMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var (

View File

@ -38,8 +38,7 @@ import (
)
func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
resBody := map[string]interface{}{}
reqParams := elastic.CommonCommand{}
err := h.DecodeJSON(req, &reqParams)
@ -85,8 +84,7 @@ func (h *APIHandler) HandleAddCommonCommandAction(w http.ResponseWriter, req *ht
}
func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
resBody := map[string]interface{}{}
reqParams := elastic.CommonCommand{}
err := h.DecodeJSON(req, &reqParams)
@ -130,8 +128,7 @@ func (h *APIHandler) HandleSaveCommonCommandAction(w http.ResponseWriter, req *h
}
func (h *APIHandler) HandleQueryCommonCommandAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
resBody := map[string]interface{}{}
var (
keyword = h.GetParameterOrDefault(req, "keyword", "")

View File

@ -28,13 +28,13 @@
package insight
import (
"net/http"
"strconv"
log "github.com/cihub/seelog"
insight2 "infini.sh/console/model/insight"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"net/http"
"strconv"
)
func (h *InsightAPI) createDashboard(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {

View File

@ -34,8 +34,8 @@ import (
"strings"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/console/model/insight"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
)

View File

@ -29,13 +29,13 @@ package server
import (
log "github.com/cihub/seelog"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/modules/configs/config"
httprouter "infini.sh/framework/core/api/router"
config3 "infini.sh/framework/core/config"
"infini.sh/framework/core/global"
"infini.sh/framework/core/model"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/modules/configs/config"
"net/http"
"path"
"sync"

View File

@ -37,13 +37,13 @@ import (
log "github.com/cihub/seelog"
"infini.sh/console/core/security/enum"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
elastic2 "infini.sh/framework/core/elastic"
"infini.sh/framework/core/model"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/modules/elastic"
common2 "infini.sh/framework/modules/elastic/common"
)

View File

@ -32,11 +32,11 @@ import (
"fmt"
log "github.com/cihub/seelog"
"infini.sh/console/core"
"infini.sh/framework/modules/configs/common"
"infini.sh/framework/core/api"
"infini.sh/framework/core/errors"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/configs/common"
"net"
"net/http"
"net/url"

View File

@ -67,4 +67,3 @@ func (act *WebhookAction) Execute()([]byte, error){
defer res.Body.Close()
return ioutil.ReadAll(res.Body)
}

View File

@ -34,7 +34,6 @@ const (
KVLastMessageState = "alert_last_message_state"
)
const (
ParamRuleID = "rule_id" //规则 UUID
ParamResourceID = "resource_id" // 资源 UUID
@ -50,6 +49,7 @@ const (
ParamGroupValues = "group_values"
ParamIssueTimestamp = "issue_timestamp"
ParamRelationValues = "relation_values"
// rule expression, rule_id, resource_id, resource_name, event_id, condition_name, preset_value,[group_tags, check_values],
// check_status ,timestamp,
)

View File

@ -34,10 +34,10 @@ import (
log "github.com/cihub/seelog"
"infini.sh/console/model"
"infini.sh/console/model/alerting"
"infini.sh/console/model/insight"
alerting2 "infini.sh/console/service/alerting"
"infini.sh/console/service/alerting/common"
"infini.sh/framework/core/elastic"
"infini.sh/console/model/insight"
"infini.sh/framework/core/kv"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
@ -50,8 +50,8 @@ import (
)
type Engine struct {
}
// GenerateQuery generate a final elasticsearch query dsl object
// when RawFilter of rule is not empty, priority use it, otherwise to covert from Filter of rule (todo)
// auto generate time filter query and then attach to final query
@ -150,6 +150,7 @@ func (engine *Engine) GenerateQuery(rule *alerting.Rule, filterParam *alerting.F
"aggs": rootAggs,
}, nil
}
// generateAgg convert statistic of metric item to elasticsearch aggregation
func (engine *Engine) generateAgg(metricItem *insight.MetricItem) map[string]interface{} {
var (
@ -540,6 +541,7 @@ func (engine *Engine) GetTargetMetricData(rule *alerting.Rule, isFilterNaN bool,
}
return targetMetricData, queryResult, nil
}
// CheckCondition check whether rule conditions triggered or not
// if triggered returns an ConditionResult
// sort conditions by priority desc before check , and then if condition is true, then continue check another group
@ -1103,8 +1105,6 @@ func performChannels(channels []alerting.Channel, ctx map[string]interface{}, ra
return actionResults, errCount
}
func (engine *Engine) GenerateTask(rule alerting.Rule) func(ctx context.Context) {
return func(ctx context.Context) {
defer func() {
@ -1280,7 +1280,6 @@ func saveAlertMessage(message *alerting.AlertMessage) error {
return err
}
func readTimeFromKV(bucketKey string, key []byte) (time.Time, error) {
timeBytes, err := kv.GetValue(bucketKey, key)
zeroTime := time.Time{}

View File

@ -47,6 +47,7 @@ var (
alertEngines = map[string]Engine{}
alertEnginesMutex = sync.RWMutex{}
)
func RegistEngine(typ string, engine Engine) {
alertEnginesMutex.Lock()
defer alertEnginesMutex.Unlock()