chore: change to framework main branch and reflactor the dependcy

This commit is contained in:
hardy 2024-12-01 21:36:06 +08:00
parent ae3b792f7f
commit a936a8fa7c
No known key found for this signature in database
GPG Key ID: D1ED7F7A9ED520C3
150 changed files with 27834 additions and 7199 deletions

View File

@ -1,6 +0,0 @@
.git/
node_modules
/web
docker/
db/
bin/

65
Jenkinsfile vendored
View File

@ -1,65 +0,0 @@
pipeline {
agent none
environment {
CI = 'true'
}
stages {
stage('Prepare Web Packages') {
agent {
label 'linux'
}
steps {
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE'){
sh 'cd /home/jenkins/go/src/infini.sh/console && git stash && git pull origin master && make clean'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true|| rm -rif web'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true || git clone ssh://git@git.infini.ltd:64221/infini/console-ui.git web'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git pull origin master'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git stash'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm install'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm run build'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build-linux'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build-arm'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build-darwin'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build-win'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && GOROOT="/infini/go-pkgs/go-loongarch" GOPATH="/home/jenkins/go" make build-linux-loong64'
sh "cd /home/jenkins/go/src/infini.sh/console/docker && chmod a+x *.sh && perl -pi -e 's/\r\n/\n/g' *.sh && \
cd /home/jenkins/go/src/infini.sh/console/web/docker && chmod a+x *.sh && perl -pi -e 's/\r\n/\n/g' *.sh"
sh label: 'copy-license', script: 'cd /home/jenkins/go/src/infini.sh/console && cp ../framework/LICENSE bin && cat ../framework/NOTICE NOTICE > bin/NOTICE'
sh label: 'copy-configs', script: 'cd /home/jenkins/go/src/infini.sh/console && mkdir -p bin/config && cp config/*.json bin/config && cp config/*.tpl bin/config'
sh label: 'package-linux-amd64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-amd64.tar.gz console-linux-amd64 console.yml LICENSE NOTICE config'
sh label: 'package-linux-386', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-386.tar.gz console-linux-386 console.yml LICENSE NOTICE config'
sh label: 'package-linux-mips', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-mips.tar.gz console-linux-mips console.yml LICENSE NOTICE config'
sh label: 'package-linux-mipsle', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-mipsle.tar.gz console-linux-mipsle console.yml LICENSE NOTICE config'
sh label: 'package-linux-mips64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-mips64.tar.gz console-linux-mips64 console.yml LICENSE NOTICE config'
sh label: 'package-linux-mips64le', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-mips64le.tar.gz console-linux-mips64le console.yml LICENSE NOTICE config'
sh label: 'package-linux-loong64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-loong64.tar.gz console-linux-loong64 console.yml LICENSE NOTICE config'
sh label: 'package-linux-riscv64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-riscv64.tar.gz console-linux-riscv64 console.yml LICENSE NOTICE config'
sh label: 'package-linux-arm5', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-arm5.tar.gz console-linux-armv5 console.yml LICENSE NOTICE config'
sh label: 'package-linux-arm6', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-arm6.tar.gz console-linux-armv6 console.yml LICENSE NOTICE config'
sh label: 'package-linux-arm7', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-arm7.tar.gz console-linux-armv7 console.yml LICENSE NOTICE config'
sh label: 'package-linux-arm64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-arm64.tar.gz console-linux-arm64 console.yml LICENSE NOTICE config'
sh label: 'package-mac-amd64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && zip -r ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-mac-amd64.zip console-mac-amd64 console.yml LICENSE NOTICE config'
sh label: 'package-mac-arm64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && zip -r ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-mac-arm64.zip console-mac-arm64 console.yml LICENSE NOTICE config'
sh label: 'package-win-amd64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && zip -r ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-windows-amd64.zip console-windows-amd64.exe console.yml LICENSE NOTICE config'
sh label: 'package-win-386', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && zip -r ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-windows-386.zip console-windows-386.exe console.yml LICENSE NOTICE config'
archiveArtifacts artifacts: 'console-$VERSION-$BUILD_NUMBER-*.*', fingerprint: true, followSymlinks: true, onlyIfSuccessful: false
}
}
}
}
}

View File

@ -1,41 +0,0 @@
pipeline {
agent none
environment {
CI = 'true'
}
stages {
stage('build') {
parallel {
stage('Build Docker Images') {
agent {
label 'linux'
}
steps {
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE'){
sh 'cd /home/jenkins/go/src/infini.sh/console && git stash && git pull origin master && make clean'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true|| rm -rif web'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true || git clone ssh://git@git.infini.ltd:64221/infini/console-ui.git web'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git pull origin master'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git stash'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm install'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm run build'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build && chmod a+x bin/console'
sh label: 'copy-configs', script: 'cd /home/jenkins/go/src/infini.sh/console && mkdir -p bin/config && cp config/*.json bin/config && cp config/*.tpl bin/config'
sh label: 'docker-build', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && docker build -t infini-console -f ../docker/Dockerfile .'
sh label: 'docker-tagging', script: 'docker tag infini-console infinilabs/console:latest && docker tag infini-console infinilabs/console:$VERSION-$BUILD_NUMBER'
sh label: 'docker-push', script: 'docker push infinilabs/console:$VERSION-$BUILD_NUMBER && docker push infinilabs/console:latest'
}
}
}
} }
}
}

View File

@ -1,49 +0,0 @@
pipeline {
agent none
environment {
CI = 'true'
}
stages {
stage('Prepare Web Packages') {
agent {
label 'linux'
}
steps {
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE'){
sh 'cd /home/jenkins/go/src/infini.sh/console && git stash && git pull origin master && make clean'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true || rm -rif web'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true || git clone ssh://git@git.infini.ltd:64221/infini/console-ui.git web'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git pull origin master'
sh 'cd /home/jenkins/go/src/infini.sh/console/web/src && true || git clone ssh://git@git.infini.ltd:64221/infini/common-ui.git common'
sh 'cd /home/jenkins/go/src/infini.sh/console/web/src/common && git pull origin master'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git stash'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm install'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm run build'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build-linux-amd64'
sh label: 'copy-license', script: 'cd /home/jenkins/go/src/infini.sh/console && cp ../framework/LICENSE bin && cat ../framework/NOTICE NOTICE > bin/NOTICE'
sh label: 'copy-configs', script: 'cd /home/jenkins/go/src/infini.sh/console && mkdir -p bin/config && cp config/*.json bin/config && cp config/*.tpl bin/config'
sh label: 'package-linux-amd64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-amd64.tar.gz console-linux-amd64 console.yml LICENSE NOTICE config'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build-arm'
sh label: 'copy-license', script: 'cd /home/jenkins/go/src/infini.sh/console && cp ../framework/LICENSE bin && cat ../framework/NOTICE NOTICE > bin/NOTICE'
sh label: 'copy-configs', script: 'cd /home/jenkins/go/src/infini.sh/console && mkdir -p bin/config && cp config/*.json bin/config && cp config/*.tpl bin/config'
sh label: 'package-linux-arm64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-arm64.tar.gz console-linux-arm64 console.yml LICENSE NOTICE config'
archiveArtifacts artifacts: 'console-$VERSION-$BUILD_NUMBER-*.*', fingerprint: true, followSymlinks: true, onlyIfSuccessful: false
}
}
}
}
}

View File

@ -1,43 +0,0 @@
pipeline {
agent none
environment {
CI = 'true'
}
stages {
stage('Prepare Web Packages') {
agent {
label 'linux'
}
steps {
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE'){
sh 'cd /home/jenkins/go/src/infini.sh/console && git stash && git pull origin master && make clean'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true || rm -rif web'
sh 'cd /home/jenkins/go/src/infini.sh/console/ && true || git clone ssh://git@git.infini.ltd:64221/infini/console-ui.git web'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git pull origin master'
sh 'cd /home/jenkins/go/src/infini.sh/console/web/src && true || git clone ssh://git@git.infini.ltd:64221/infini/common-ui.git common'
sh 'cd /home/jenkins/go/src/infini.sh/console/web/src/common && git pull origin master'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && git stash'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm install'
sh 'cd /home/jenkins/go/src/infini.sh/console/web && cnpm run build'
sh 'cd /home/jenkins/go/src/infini.sh/console && git pull origin master && make config build-linux-amd64'
sh label: 'copy-license', script: 'cd /home/jenkins/go/src/infini.sh/console && cp ../framework/LICENSE bin && cat ../framework/NOTICE NOTICE > bin/NOTICE'
sh label: 'copy-configs', script: 'cd /home/jenkins/go/src/infini.sh/console && mkdir -p bin/config && cp -rf config/*.json bin/config && cp -rf config/*.tpl bin/config && cp -rf config/setup bin/config'
sh label: 'package-linux-amd64', script: 'cd /home/jenkins/go/src/infini.sh/console/bin && tar cfz ${WORKSPACE}/console-$VERSION-$BUILD_NUMBER-linux-amd64.tar.gz console-linux-amd64 console.yml LICENSE NOTICE config'
archiveArtifacts artifacts: 'console-$VERSION-$BUILD_NUMBER-*.*', fingerprint: true, followSymlinks: true, onlyIfSuccessful: false
}
}
}
}
}

View File

@ -9,7 +9,6 @@ APP_STATIC_FOLDER := .public
APP_STATIC_PACKAGE := public
APP_UI_FOLDER := ui
APP_PLUGIN_FOLDER := plugin
FRAMEWORK_BRANCH := console
# GO15VENDOREXPERIMENT="1" GO111MODULE=off easyjson -all domain.go
include ../framework/Makefile

View File

@ -1,46 +1,11 @@
#INFINI Cloud
#INFINI Console
INFINI Cloud for Elasticsearch
INFINI Console for Elasticsearch/OpenSearch/Easysearch
## 前端开发说明
前端采用 React 开发,最终输出为 `.public` 目录的纯静态资源,可以独立部署无需依赖 Node 环境。
### Docker 开发环境准备
#### 安装 Docker
#### 设置 Docker 国内镜像
修改 Docker engine 的设置Windows 在 Docker Desktop 的 setting 里面Linux 在 /etc/docker/daemon.json
```
{
"registry-mirrors": [
"https://registry.docker-cn.com",
"https://docker.mirrors.ustc.edu.cn/"
],
"insecure-registries": [],
"debug": true,
"experimental": false
}
```
#### 启动开发环境
```
cnpm run docker:dev
```
启动完成,稍等片刻,打开 http://localhost:8000/,手动刷新即可看到最新的更改。
#### 手动更新开发镜像
```
docker login -u infini -p ltd docker.infini.ltd:64443
docker pull docker.infini.ltd:64443/nodejs-dev:latest
```
### 本地开发环境准备
确保已经安装好`nodejs`(版本大于等于 8.5.0)环境:
@ -51,7 +16,7 @@ npm -v
在国内,你可以安装 `cnpm` 获得更快速、更安全的包管理体验。使用如下命令安装:
```sh
npm install -g cnpm --registry=https://registry.npm.taobao.org
npm install -g cnpm@9.2.0 --registry=https://registry.npm.taobao.org
```
### 下载项目依赖包
@ -75,10 +40,6 @@ cnpm run build
执行该命令后会生成最终的 HTML、CSS 和 JS 到 `/.public` 目录下。它们是浏览器可以直接识别并运行的代码,这样你就可以将它们部署到你想要的服务器上了。
或者使用 Docker 来打包生成。
```
cnpm run docker:build
```
### 新增项目依赖包
```

View File

@ -1,32 +0,0 @@
#!/bin/bash
WORKBASE=/home/jenkins/go/src/infini.sh/console
if [ -d $WORKBASE/.public ]; then
echo "clean exists .pulbic folder."
rm -rf $WORKBASE/.public
fi
if [ ! -d $WORKBASE/web ]; then
git clone ssh://git@git.infini.ltd:64221/infini/console-ui.git web
fi
if [ ! -d $WORKBASE/web/src/common ]; then
cd $WORKBASE/web/src
git clone ssh://git@git.infini.ltd:64221/infini/common-ui.git common
fi
cd $WORKBASE/web
git pull origin master
cd $WORKBASE/web/src/common
git pull origin master
git log --pretty=oneline -5
cd $WORKBASE/web
#--quiet
cnpm install --quiet --no-progress
cnpm run clean
cnpm run build --quiet &>/dev/null

104
build.sh
View File

@ -1,104 +0,0 @@
#!/bin/bash
#set -eo pipefail
#init
WORKBASE=/home/jenkins/go/src/infini.sh
WORKDIR=$WORKBASE/$PNAME
DEST=/infini/Sync/Release/$PNAME/stable
if [[ $VERSION =~ NIGHTLY ]]; then
BUILD_NUMBER=$BUILD_DAY
DEST=/infini/Sync/Release/$PNAME/snapshot
fi
export DOCKER_CLI_EXPERIMENTAL=enabled
#clean all
cd $WORKSPACE && git clean -fxd
#pull code
cd $WORKDIR && git clean -fxd -e ".public"
git stash && git pull origin master
#build
make clean config build-linux
make config build-arm
make config build-darwin
make config build-win
GOROOT="/infini/go-pkgs/go-loongarch" PATH=$GOROOT/bin:$PATH make build-linux-loong64
#GOROOT="/infini/go-pkgs/go-swarch" PATH=$GOROOT/bin:$PATH make build-linux-sw64
#copy-configs
cp -rf $WORKBASE/framework/LICENSE $WORKDIR/bin && cat $WORKBASE/framework/NOTICE $WORKDIR/NOTICE > $WORKDIR/bin/NOTICE
mkdir -p $WORKDIR/bin/config
cp $WORKDIR/config/*.json $WORKDIR/bin/config
cp -rf $WORKDIR/config/*.tpl $WORKDIR/bin/config
[ -d $WORKDIR/config/setup ] && cp -rf $WORKDIR/config/setup $WORKDIR/bin/config
cd $WORKDIR/bin
#编译出错后,根据文件是否存在判断是否进行下一步骤
[ -f "$WORKDIR/bin/${PNAME}-linux-amd64" ] || exit
for t in 386 amd64 arm64 armv5 armv6 armv7 loong64 mips mips64 mips64le mipsle riscv64 ; do
tar zcf ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-linux-$t.tar.gz "${PNAME}-linux-$t" $PNAME.yml LICENSE NOTICE config
done
for t in mac-amd64 mac-arm64 windows-amd64 windows-386 ; do
zip -qr ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-$t.zip $PNAME-$t $PNAME.yml LICENSE NOTICE config
done
for t in windows-amd64 windows-386 ; do
zip -qr ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-$t.zip $PNAME-$t.exe $PNAME.yml LICENSE NOTICE config
done
#build image & push
for t in amd64 arm64 ; do
cat <<EOF>Dockerfile
FROM --platform=linux/$t alpine:3.16.5
MAINTAINER "hardy <luohf@infinilabs.com>"
ARG APP_NAME=$PNAME
ARG APP_HOME=/
ENV APP=\${APP_NAME}
WORKDIR /
COPY ["$PNAME-linux-$t", "$PNAME.yml", "\${APP_HOME}/"]
COPY ["config", "\${APP_HOME}/config"]
CMD ["/${PNAME}-linux-$t"]
EOF
docker buildx build -t infinilabs/$PNAME-$t:latest --platform=linux/$t -o type=docker .
docker push infinilabs/$PNAME-$t:latest
docker tag infinilabs/$PNAME-$t:latest infinilabs/$PNAME-$t:$VERSION-$BUILD_NUMBER
docker push infinilabs/$PNAME-$t:$VERSION-$BUILD_NUMBER
done
#composite tag
docker buildx imagetools create -t infinilabs/$PNAME:latest \
infinilabs/$PNAME-arm64:latest \
infinilabs/$PNAME-amd64:latest
docker buildx imagetools create -t infinilabs/$PNAME:$VERSION-$BUILD_NUMBER \
infinilabs/$PNAME-arm64:$VERSION-$BUILD_NUMBER \
infinilabs/$PNAME-amd64:$VERSION-$BUILD_NUMBER
#publish
for t in 386 amd64 arm64 armv5 armv6 armv7 loong64 mips mips64 mips64le mipsle riscv64 ; do
[ -f ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-linux-$t.tar.gz ] && ossuploader upload -p $PNAME -f ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-linux-$t.tar.gz
#cp -rf ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-linux-$t.tar.gz $DEST
done
for t in mac-amd64 mac-arm64 windows-amd64 windows-386 ; do
[ -f ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-$t.zip ] && ossuploader upload -p $PNAME -f ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-$t.zip
#cp -rf ${WORKSPACE}/$PNAME-$VERSION-$BUILD_NUMBER-$t.zip $DEST
done
#git reset
cd $WORKSPACE && git reset --hard
cd $WORKDIR && git reset --hard
#clean weeks ago image
NEEDCLEN=$(docker images |grep "$PNAME" |grep "weeks ago")
if [ ! -z "$NEEDCLEN" ]; then
docker images |grep "$PNAME" |grep "weeks ago" |awk '{print $3}' |xargs docker rmi -f >/dev/null 2>&1
fi

View File

@ -78,7 +78,7 @@ pipeline:
processor:
- bulk_indexing:
max_connection_per_node: 100
num_of_slices: 3
num_of_slices: 1
max_worker_size: 30
idle_timeout_in_seconds: 10
bulk:

View File

@ -63,18 +63,6 @@ pipeline:
processor:
- activity:
elasticsearch: "$[[CLUSTER_ID]]"
- name: migration_task_dispatcher
auto_start: true
keep_running: true
retry_delay_in_ms: 1000
processor:
- migration_dispatcher:
elasticsearch: "$[[CLUSTER_ID]]"
check_instance_available: true
max_tasks_per_instance: 10
task_batch_size: 50
when:
cluster_available: ["$[[CLUSTER_ID]]"]
- name: merge_logging
auto_start: true

116
core/auth.go Normal file
View File

@ -0,0 +1,116 @@
package core
import (
"infini.sh/console/core/security"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"net/http"
)
// Handler is the object of http handler
type Handler struct {
api.Handler
}
var authEnabled = false
// BasicAuth register api with basic auth
func BasicAuth(h httprouter.Handle, requiredUser, requiredPassword string) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// Get the Basic Authentication credentials
user, password, hasAuth := r.BasicAuth()
if hasAuth && user == requiredUser && password == requiredPassword {
// Delegate request to the given handle
h(w, r, ps)
} else {
// Request Basic Authentication otherwise
w.Header().Set("WWW-Authenticate", "Basic realm=Restricted")
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
}
}
}
func EnableAuth(enable bool) {
authEnabled = enable
}
func IsAuthEnable() bool {
return authEnabled
}
func (handler Handler) RequireLogin(h httprouter.Handle) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
if authEnabled {
claims, err := security.ValidateLogin(r.Header.Get("Authorization"))
if err != nil {
handler.WriteError(w, err.Error(), http.StatusUnauthorized)
return
}
r = r.WithContext(security.NewUserContext(r.Context(), claims))
}
h(w, r, ps)
}
}
func (handler Handler) RequirePermission(h httprouter.Handle, permissions ...string) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
if global.Env().SetupRequired() {
return
}
if authEnabled {
claims, err := security.ValidateLogin(r.Header.Get("Authorization"))
if err != nil {
handler.WriteError(w, err.Error(), http.StatusUnauthorized)
return
}
err = security.ValidatePermission(claims, permissions)
if err != nil {
handler.WriteError(w, err.Error(), http.StatusForbidden)
return
}
r = r.WithContext(security.NewUserContext(r.Context(), claims))
}
h(w, r, ps)
}
}
func (handler Handler) RequireClusterPermission(h httprouter.Handle, permissions ...string) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
if authEnabled {
id := ps.ByName("id")
claims, err := security.ValidateLogin(r.Header.Get("Authorization"))
if err != nil {
handler.WriteError(w, err.Error(), http.StatusUnauthorized)
return
}
r = r.WithContext(security.NewUserContext(r.Context(), claims))
hasAllPrivilege, clusterIDs := security.GetCurrentUserCluster(r)
if !hasAllPrivilege && (len(clusterIDs) == 0 || !util.StringInArray(clusterIDs, id)) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(http.StatusText(http.StatusForbidden)))
return
}
}
h(w, r, ps)
}
}
func (handler Handler) GetCurrentUser(req *http.Request) string {
if authEnabled {
claims, ok := req.Context().Value("user").(*security.UserClaims)
if ok {
return claims.Username
}
}
return ""
}

184
core/elastic.go Normal file
View File

@ -0,0 +1,184 @@
package core
import (
rbac "infini.sh/console/core/security"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/radix"
"infini.sh/framework/core/util"
"net/http"
)
func (handler Handler) IndexRequired(h httprouter.Handle, route ...string) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
if authEnabled {
claims, err := rbac.ValidateLogin(r.Header.Get("Authorization"))
if err != nil {
handler.WriteError(w, err.Error(), http.StatusUnauthorized)
return
}
newRole := rbac.CombineUserRoles(claims.Roles)
indexReq := rbac.NewIndexRequest(ps, route)
err = rbac.ValidateIndex(indexReq, newRole)
if err != nil {
handler.WriteError(w, err.Error(), http.StatusForbidden)
return
}
}
h(w, r, ps)
}
}
func (handler Handler) ClusterRequired(h httprouter.Handle, route ...string) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
if authEnabled {
claims, err := rbac.ValidateLogin(r.Header.Get("Authorization"))
if err != nil {
handler.WriteError(w, err.Error(), http.StatusUnauthorized)
return
}
//newRole := biz.CombineUserRoles(claims.Roles)
clusterReq := rbac.NewClusterRequest(ps, route)
newRole := rbac.CombineUserRoles(claims.Roles)
err = rbac.ValidateCluster(clusterReq, newRole)
if err != nil {
handler.WriteError(w, err.Error(), http.StatusForbidden)
return
}
}
h(w, r, ps)
}
}
func (handler Handler) GetClusterFilter(r *http.Request, field string) (util.MapStr, bool) {
if !IsAuthEnable() {
return nil, true
}
hasAllPrivilege, clusterIds := rbac.GetCurrentUserCluster(r)
if hasAllPrivilege {
return nil, true
}
if len(clusterIds) == 0 {
return nil, false
}
return util.MapStr{
"terms": util.MapStr{
field: clusterIds,
},
}, false
}
func (handler Handler) GetAllowedClusters(r *http.Request) ([]string, bool) {
if !IsAuthEnable() {
return nil, true
}
hasAllPrivilege, clusterIds := rbac.GetCurrentUserCluster(r)
return clusterIds, hasAllPrivilege
}
func (handler Handler) GetAllowedIndices(r *http.Request, clusterID string) ([]string, bool) {
if !IsAuthEnable() {
return nil, true
}
hasAllPrivilege, indices := handler.GetCurrentUserClusterIndex(r, clusterID)
if hasAllPrivilege {
return nil, true
}
return indices, false
}
func (handler Handler) IsIndexAllowed(r *http.Request, clusterID string, indexName string) bool {
if !IsAuthEnable() {
return true
}
hasAllPrivilege, indices := handler.GetCurrentUserClusterIndex(r, clusterID)
if hasAllPrivilege {
return true
}
if len(indices) == 0 {
return false
}
return radix.Compile(indices...).Match(indexName)
}
func (handler Handler) ValidateProxyRequest(req *http.Request, clusterID string) (bool, string, error) {
if !IsAuthEnable() {
return false, "", nil
}
claims, err := rbac.ValidateLogin(req.Header.Get("Authorization"))
if err != nil {
return false, "", err
}
if util.StringInArray(claims.Roles, rbac.RoleAdminName) {
return true, "", nil
}
permission, params, matched := rbac.SearchAPIPermission("elasticsearch", req.Method, req.URL.Path)
if matched && permission != "" {
newRole := rbac.CombineUserRoles(claims.Roles)
if indexName, ok := params["index_name"]; ok {
indexReq := rbac.IndexRequest{
Cluster: clusterID,
Index: indexName,
Privilege: []string{permission},
}
err = rbac.ValidateIndex(indexReq, newRole)
if err != nil {
return false, permission, err
}
} else {
clusterReq := rbac.ClusterRequest{
Cluster: clusterID,
Privilege: []string{permission},
}
err = rbac.ValidateCluster(clusterReq, newRole)
if err != nil {
return false, permission, err
}
}
}
return false, permission, nil
}
func (handler Handler) GetCurrentUserIndex(req *http.Request) (bool, map[string][]string) {
if !IsAuthEnable() {
return true, nil
}
ctxVal := req.Context().Value("user")
if userClaims, ok := ctxVal.(*rbac.UserClaims); ok {
roles := userClaims.Roles
var realIndex = map[string][]string{}
for _, roleName := range roles {
role, ok := rbac.RoleMap[roleName]
if ok {
for _, ic := range role.Privilege.Elasticsearch.Cluster.Resources {
for _, ip := range role.Privilege.Elasticsearch.Index {
if ic.ID == "*" && util.StringInArray(ip.Name, "*") {
return true, nil
}
realIndex[ic.ID] = append(realIndex[ic.ID], ip.Name...)
}
}
}
}
return false, realIndex
}
return false, nil
}
func (handler Handler) GetCurrentUserClusterIndex(req *http.Request, clusterID string) (bool, []string) {
ctxVal := req.Context().Value("user")
if userClaims, ok := ctxVal.(*rbac.UserClaims); ok {
return rbac.GetRoleIndex(userClaims.Roles, clusterID)
} else {
panic("user context value not found")
}
}

View File

@ -0,0 +1,52 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package security
import (
"github.com/golang-jwt/jwt"
"infini.sh/framework/core/errors"
"infini.sh/framework/core/util"
"time"
)
func GenerateAccessToken(user *User) (map[string]interface{}, error) {
var data map[string]interface{}
roles, privilege := user.GetPermissions()
token1 := jwt.NewWithClaims(jwt.SigningMethodHS256, UserClaims{
ShortUser: &ShortUser{
Provider: user.AuthProvider,
Username: user.Username,
UserId: user.ID,
Roles: roles,
},
RegisteredClaims: &jwt.RegisteredClaims{
ExpiresAt: jwt.NewNumericDate(time.Now().Add(24 * time.Hour)),
},
})
tokenString, err := token1.SignedString([]byte(Secret))
if tokenString == "" || err != nil {
return nil, errors.Errorf("failed to generate access_token for user: %v", user.Username)
}
token := Token{ExpireIn: time.Now().Unix() + 86400}
SetUserToken(user.ID, token)
data = util.MapStr{
"access_token": tokenString,
"username": user.Username,
"id": user.ID,
"expire_in": 86400,
"roles": roles,
"privilege": privilege,
}
data["status"] = "ok"
return data, err
}

53
core/security/adapter.go Normal file
View File

@ -0,0 +1,53 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package security
import (
"fmt"
"infini.sh/framework/core/orm"
)
type IRole interface {
Get(id string) (Role, error)
GetBy(field string, value interface{}) (Role, error)
Update(role *Role) error
Create(role *Role) (string, error)
Delete(id string) error
Search(keyword string, from, size int) (orm.Result, error)
}
type IUser interface {
Get(id string) (User, error)
GetBy(field string, value interface{}) (*User, error)
Update(user *User) error
Create(user *User) (string, error)
Delete(id string) error
Search(keyword string, from, size int) (orm.Result, error)
}
type SecurityRealm interface {
GetType() string
Authenticate(username, password string) (bool, *User, error) // Return true if authentication is successful, otherwise false
Authorize(user *User) (bool, error) // Return true if authorization is granted, otherwise false
}
type Adapter struct {
Role IRole
User IUser
}
var adapterHandlers = map[string]Adapter{}
func RegisterAdapter(typ string, handler Adapter) {
adapterHandlers[typ] = handler
}
func GetAdapter(typ string) Adapter {
handler, ok := adapterHandlers[typ]
if !ok {
panic(fmt.Errorf("dal handler %s not found", typ))
}
return handler
}

43
core/security/context.go Normal file
View File

@ -0,0 +1,43 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package security
import (
"context"
"fmt"
"github.com/golang-jwt/jwt"
)
const ctxUserKey = "user"
type UserClaims struct {
*jwt.RegisteredClaims
*ShortUser
}
type ShortUser struct {
Provider string `json:"provider"`
Username string `json:"username"`
UserId string `json:"user_id"`
Roles []string `json:"roles"`
}
const Secret = "console"
func NewUserContext(ctx context.Context, clam *UserClaims) context.Context {
return context.WithValue(ctx, ctxUserKey, clam)
}
func FromUserContext(ctx context.Context) (*ShortUser, error) {
ctxUser := ctx.Value(ctxUserKey)
if ctxUser == nil {
return nil, fmt.Errorf("user not found")
}
reqUser, ok := ctxUser.(*UserClaims)
if !ok {
return nil, fmt.Errorf("invalid context user")
}
return reqUser.ShortUser, nil
}

280
core/security/enum/const.go Normal file
View File

@ -0,0 +1,280 @@
package enum
var PermissionMap = make(map[string][]string)
const (
UserRead = "system.user:read"
UserAll = "system.user:all"
RoleRead = "system.role:read"
RoleAll = "system.role:all"
SecurityRead = "system.security:read"
SecurityAll = "system.security:all"
ClusterAll = "system.cluster:all"
ClusterRead = "system.cluster:read"
CommandAll = "system.command:all"
CommandRead = "system.command:read"
CredentialAll = "system.credential:all"
CredentialRead = "system.credential:read"
InstanceRead = "gateway.instance:read"
InstanceAll = "gateway.instance:all"
EntryAll = "gateway.entry:all"
EntryRead = "gateway.entry:read"
RouterRead = "gateway.router:read"
RouterAll = "gateway.router:all"
FlowRead = "gateway.flow:read"
FlowAll = "gateway.flow:all"
AgentInstanceRead = "agent.instance:read"
AgentInstanceAll = "agent.instance:all"
IndexAll = "data.index:all"
IndexRead = "data.index:read"
AliasAll = "data.alias:all"
AliasRead = "data.alias:read"
ViewsAll = "data.view:all"
ViewsRead = "data.view:read"
DiscoverAll = "data.discover:all"
DiscoverRead = "data.discover:read"
RuleRead = "alerting.rule:read"
RuleAll = "alerting.rule:all"
AlertRead = "alerting.alert:read"
AlertAll = "alerting.alert:all"
AlertMessageRead = "alerting.message:read"
AlertMessageAll = "alerting.message:all"
ChannelRead = "alerting.channel:read"
ChannelAll = "alerting.channel:all"
ClusterOverviewRead = "cluster.overview:read"
ClusterOverviewAll = "cluster.overview:all"
MonitoringRead = "cluster.monitoring:read"
MonitoringAll = "cluster.monitoring:all"
ActivitiesRead = "cluster.activities:read"
ActivitiesAll = "cluster.activities:all"
AuditLogsRead = "system.audit_logs:read"
AuditLogsAll = "system.audit_logs:all"
DataMigrationRead = "data_tools.migration:read"
DataMigrationAll = "data_tools.migration:all"
DataComparisonRead = "data_tools.comparison:read"
DataComparisonAll = "data_tools.comparison:all"
DashboardRead = "insight.dashboard:read"
DashboardAll = "insight.dashboard:all"
DevtoolConsoleAll = "devtool.console:all"
DevtoolConsoleRead = "devtool.console:read"
WorkbenchAll = "workbench:all"
WorkbenchRead = "workbench:read"
TenantCustomerRead = "tenant.customer:read"
TenantCustomerAll = "tenant.customer:all"
SubscriptionRead = "tenant.subscription:read"
SubscriptionAll = "tenant.subscription:all"
)
const (
PermissionUserRead string = "user:read"
PermissionUserWrite = "user:write"
PermissionDisableBuiltinAdmin = "user:disable_builtin_admin"
PermissionRoleRead = "role:read"
PermissionRoleWrite = "role:write"
PermissionCommandRead = "command:read"
PermissionCommandWrite = "command:write"
PermissionElasticsearchClusterRead = "es.cluster:read"
PermissionElasticsearchClusterWrite = "es.cluster:write" // es cluster
PermissionElasticsearchIndexRead = "es.index:read"
PermissionElasticsearchIndexWrite = "es.index:write" // es index metadata
PermissionElasticsearchNodeRead = "es.node:read" //es node metadata
PermissionActivityRead = "activity:read"
PermissionActivityWrite = "activity:write"
PermissionAuditLogRead = "audit_log:read"
PermissionAuditLogWrite = "audit_log:write"
PermissionAlertRuleRead = "alert.rule:read"
PermissionAlertRuleWrite = "alert.rule:write"
PermissionAlertHistoryRead = "alert.history:read"
PermissionAlertHistoryWrite = "alert.history:write"
PermissionAlertMessageRead = "alert.message:read"
PermissionAlertMessageWrite = "alert.message:write"
PermissionAlertChannelRead = "alert.channel:read"
PermissionAlertChannelWrite = "alert.channel:write"
PermissionViewRead = "view:read"
PermissionViewWrite = "view:write"
PermissionLayoutRead = "layout:read"
PermissionLayoutWrite = "layout:write"
PermissionGatewayInstanceRead = "gateway.instance:read"
PermissionGatewayInstanceWrite = "gateway.instance:write"
PermissionGatewayEntryRead = "gateway.entry:read"
PermissionGatewayEntryWrite = "gateway.entry:write"
PermissionGatewayRouterRead = "gateway.router:read"
PermissionGatewayRouterWrite = "gateway.router:write"
PermissionGatewayFlowRead = "gateway.flow:read"
PermissionGatewayFlowWrite = "gateway.flow:write"
PermissionElasticsearchMetricRead = "es.metric:read"
PermissionAgentInstanceRead = "agent.instance:read"
PermissionAgentInstanceWrite = "agent.instance:write"
PermissionCredentialRead = "credential:read"
PermissionCredentialWrite = "credential:write"
PermissionMigrationTaskRead = "task:read"
PermissionMigrationTaskWrite = "task:write"
PermissionComparisonTaskRead = "comparison_task:read"
PermissionComparisonTaskWrite = "comparison_task:write"
)
var (
UserReadPermission = []string{PermissionUserRead}
UserAllPermission = []string{PermissionUserRead, PermissionUserWrite, PermissionRoleRead}
RoleReadPermission = []string{PermissionRoleRead}
RoleAllPermission = []string{PermissionRoleRead, PermissionRoleWrite}
SecurityReadPermission = []string{PermissionUserRead, PermissionRoleRead}
SecurityAllPermission = []string{PermissionUserRead, PermissionUserWrite, PermissionRoleRead, PermissionRoleWrite, PermissionDisableBuiltinAdmin}
ClusterReadPermission = []string{PermissionElasticsearchClusterRead}
ClusterAllPermission = []string{PermissionElasticsearchClusterRead, PermissionElasticsearchClusterWrite}
CommandReadPermission = []string{PermissionCommandRead}
CommandAllPermission = []string{PermissionCommandRead, PermissionCommandWrite}
InstanceReadPermission = []string{PermissionGatewayInstanceRead}
InstanceAllPermission = []string{PermissionGatewayInstanceRead, PermissionGatewayInstanceWrite}
EntryReadPermission = []string{PermissionGatewayEntryRead}
EntryAllPermission = []string{PermissionGatewayEntryRead, PermissionGatewayEntryWrite}
RouterReadPermission = []string{PermissionGatewayRouterRead}
RouterAllPermission = []string{PermissionGatewayRouterRead, PermissionGatewayRouterWrite}
FlowReadPermission = []string{PermissionGatewayFlowRead}
FlowAllPermission = []string{PermissionGatewayFlowRead, PermissionGatewayFlowWrite}
IndexAllPermission = []string{"index:read"}
IndexReadPermission = []string{"index:read", "alias:write"}
AliasAllPermission = []string{"alias:read"}
AliasReadPermission = []string{"alias:read", "alias:write"}
ViewsAllPermission = []string{PermissionViewRead, PermissionViewWrite, PermissionLayoutRead, PermissionLayoutWrite}
ViewsReadPermission = []string{PermissionViewRead, PermissionLayoutRead}
DiscoverReadPermission = []string{PermissionViewRead}
DiscoverAllPermission = []string{PermissionViewRead}
RuleReadPermission = []string{PermissionAlertRuleRead, PermissionAlertHistoryRead}
RuleAllPermission = []string{PermissionAlertRuleRead, PermissionAlertRuleWrite, PermissionAlertHistoryRead, PermissionElasticsearchClusterRead}
AlertReadPermission = []string{PermissionAlertHistoryRead}
AlertAllPermission = []string{PermissionAlertHistoryRead, PermissionAlertHistoryWrite}
AlertMessageReadPermission = []string{PermissionAlertMessageRead, PermissionAlertHistoryRead}
AlertMessageAllPermission = []string{PermissionAlertMessageRead, PermissionAlertMessageWrite, PermissionAlertHistoryRead}
ChannelReadPermission = []string{PermissionAlertChannelRead}
ChannelAllPermission = []string{PermissionAlertChannelRead, PermissionAlertChannelWrite}
ClusterOverviewReadPermission = []string{PermissionElasticsearchClusterRead, PermissionElasticsearchIndexRead, PermissionElasticsearchNodeRead, PermissionElasticsearchMetricRead}
ClusterOverviewAllPermission = ClusterOverviewReadPermission
MonitoringReadPermission = ClusterOverviewAllPermission
ActivitiesReadPermission = []string{PermissionActivityRead}
ActivitiesAllPermission = []string{PermissionActivityRead, PermissionActivityWrite}
AuditLogsReadPermission = []string{PermissionAuditLogRead}
AuditLogsAllPermission = []string{PermissionAuditLogRead, PermissionAuditLogWrite}
TenantCustomerReadPermission = []string{TenantCustomerRead}
TenantCustomerAllPermission = []string{TenantCustomerRead, TenantCustomerAll}
SubscriptionReadPermission = []string{SubscriptionRead}
SubscriptionAllPermission = []string{SubscriptionRead, SubscriptionAll}
AgentInstanceReadPermission = []string{PermissionAgentInstanceRead}
AgentInstanceAllPermission = []string{PermissionAgentInstanceRead, PermissionAgentInstanceWrite}
CredentialReadPermission = []string{PermissionCredentialRead}
CredentialAllPermission = []string{PermissionCredentialRead, PermissionCredentialWrite}
DataMigrationReadPermission = []string{PermissionMigrationTaskRead}
DataMigrationAllPermission = []string{PermissionMigrationTaskRead, PermissionMigrationTaskWrite}
DataComparisonReadPermission = []string{PermissionComparisonTaskRead}
DataComparisonAllPermission = []string{PermissionComparisonTaskRead, PermissionComparisonTaskWrite}
DashboardReadPermission = []string{PermissionLayoutRead}
DashboardAllPermission = []string{PermissionLayoutRead, PermissionLayoutWrite}
WorkbenchReadPermission = []string{PermissionElasticsearchClusterRead, PermissionActivityRead, PermissionAlertMessageRead, PermissionElasticsearchMetricRead}
WorkbenchAllPermission = WorkbenchReadPermission
)
var AdminPrivilege = []string{
SecurityAll, ClusterAll, CommandAll,
InstanceAll, EntryAll, RouterAll, FlowAll,
IndexAll, ViewsAll, DiscoverAll,
RuleAll, AlertAll, ChannelAll,
AlertMessageAll,
ClusterOverviewAll, MonitoringAll, ActivitiesAll,
AliasAll, AgentInstanceAll, CredentialAll,
DataMigrationAll, DataComparisonAll, DashboardAll, DevtoolConsoleAll,
WorkbenchAll, TenantCustomerAll, SubscriptionAll, AuditLogsAll,
}
func init() {
PermissionMap = map[string][]string{
UserRead: UserReadPermission,
UserAll: UserAllPermission,
RoleRead: RoleReadPermission,
RoleAll: RoleAllPermission,
SecurityAll: SecurityAllPermission,
SecurityRead: SecurityReadPermission,
ClusterRead: ClusterReadPermission,
ClusterAll: ClusterAllPermission,
CommandRead: CommandReadPermission,
CommandAll: CommandAllPermission,
InstanceRead: InstanceReadPermission,
InstanceAll: InstanceAllPermission,
EntryRead: EntryReadPermission,
EntryAll: EntryAllPermission,
RouterRead: RouterReadPermission,
RouterAll: RouterAllPermission,
FlowRead: FlowReadPermission,
FlowAll: FlowAllPermission,
IndexAll: IndexAllPermission,
IndexRead: IndexReadPermission,
AliasAll: AliasAllPermission,
AliasRead: AliasReadPermission,
ViewsAll: ViewsAllPermission,
ViewsRead: ViewsReadPermission,
DiscoverRead: DiscoverReadPermission,
DiscoverAll: DiscoverAllPermission,
RuleRead: RuleReadPermission,
RuleAll: RuleAllPermission,
AlertRead: AlertReadPermission,
AlertAll: AlertAllPermission,
ChannelRead: ChannelReadPermission,
ChannelAll: ChannelAllPermission,
AlertMessageRead: AlertMessageReadPermission,
AlertMessageAll: AlertMessageAllPermission,
ClusterOverviewRead: ClusterOverviewReadPermission,
ClusterOverviewAll: ClusterOverviewAllPermission,
MonitoringAll: MonitoringReadPermission,
MonitoringRead: MonitoringReadPermission,
ActivitiesAll: ActivitiesAllPermission,
ActivitiesRead: ActivitiesReadPermission,
AuditLogsAll: AuditLogsAllPermission,
AuditLogsRead: AuditLogsReadPermission,
AgentInstanceAll: AgentInstanceAllPermission,
AgentInstanceRead: AgentInstanceReadPermission,
CredentialAll: CredentialAllPermission,
CredentialRead: CredentialReadPermission,
DataMigrationRead: DataMigrationReadPermission,
DataMigrationAll: DataMigrationAllPermission,
DataComparisonRead: DataComparisonReadPermission,
DataComparisonAll: DataComparisonAllPermission,
DashboardRead: DashboardReadPermission,
DashboardAll: DashboardAllPermission,
WorkbenchAll: WorkbenchAllPermission,
WorkbenchRead: WorkbenchReadPermission,
TenantCustomerRead: TenantCustomerReadPermission,
TenantCustomerAll: TenantCustomerAllPermission,
SubscriptionRead: SubscriptionReadPermission,
SubscriptionAll: SubscriptionAllPermission,
}
}

View File

@ -0,0 +1,88 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package security
import (
"infini.sh/framework/core/api/routetree"
"infini.sh/framework/core/kv"
"infini.sh/framework/core/util"
log "src/github.com/cihub/seelog"
"sync"
)
var permissionsMap = map[string]interface{}{}
var permissionsLocker = sync.Mutex{}
func RegisterPermission(typ string, permissions interface{}) {
permissionsLocker.Lock()
defer permissionsLocker.Unlock()
permissionsMap[typ] = permissions
}
func GetPermissions(typ string) interface{} {
permissionsLocker.Lock()
defer permissionsLocker.Unlock()
return permissionsMap[typ]
}
var RoleMap = make(map[string]Role)
type Token struct {
JwtStr string `json:"jwt_str"`
Value string `json:"value"`
ExpireIn int64 `json:"expire_in"`
}
var userTokenLocker = sync.RWMutex{}
var tokenMap = make(map[string]Token)
const KVUserToken = "user_token"
func SetUserToken(key string, token Token) {
userTokenLocker.Lock()
tokenMap[key] = token
userTokenLocker.Unlock()
_ = kv.AddValue(KVUserToken, []byte(key), util.MustToJSONBytes(token))
}
func GetUserToken(key string) *Token {
userTokenLocker.RLock()
defer userTokenLocker.RUnlock()
if token, ok := tokenMap[key]; ok {
return &token
}
tokenBytes, err := kv.GetValue(KVUserToken, []byte(key))
if err != nil {
log.Errorf("get user token from kv error: %v", err)
return nil
}
if tokenBytes == nil {
return nil
}
token := Token{}
util.MustFromJSONBytes(tokenBytes, &token)
return &token
}
func DeleteUserToken(key string) {
userTokenLocker.Lock()
delete(tokenMap, key)
userTokenLocker.Unlock()
_ = kv.DeleteKey(KVUserToken, []byte(key))
}
var apiPermissionRouter = map[string]*routetree.Router{}
var apiPermissionLocker = sync.Mutex{}
func RegisterAPIPermissionRouter(typ string, router *routetree.Router) {
apiPermissionLocker.Lock()
defer apiPermissionLocker.Unlock()
apiPermissionRouter[typ] = router
}
func GetAPIPermissionRouter(typ string) *routetree.Router {
apiPermissionLocker.Lock()
defer apiPermissionLocker.Unlock()
return apiPermissionRouter[typ]
}

94
core/security/role.go Normal file
View File

@ -0,0 +1,94 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package security
import (
"fmt"
"infini.sh/console/core/security/enum"
"infini.sh/framework/core/orm"
"time"
)
type Role struct {
orm.ORMObjectBase
Name string `json:"name" elastic_mapping:"name: { type: keyword }"`
Type string `json:"type" elastic_mapping:"type: { type: keyword }"`
Description string `json:"description" elastic_mapping:"description: { type: text }"`
Builtin bool `json:"builtin" elastic_mapping:"builtin: { type: boolean }"`
Privilege RolePrivilege `json:"privilege" elastic_mapping:"privilege: { type: object }"`
}
type RolePrivilege struct {
Platform []string `json:"platform,omitempty" elastic_mapping:"platform: { type: keyword }"`
Elasticsearch ElasticsearchPrivilege `json:"elasticsearch,omitempty" elastic_mapping:"elasticsearch: { type: object }"`
}
type ElasticsearchPrivilege struct {
Cluster ClusterPrivilege `json:"cluster,omitempty" elastic_mapping:"cluster: { type: object }"`
Index []IndexPrivilege `json:"index,omitempty" elastic_mapping:"index: { type: object }"`
}
type InnerCluster struct {
ID string `json:"id" elastic_mapping:"id: { type: keyword }"`
Name string `json:"name" elastic_mapping:"name: { type: keyword }"`
}
type ClusterPrivilege struct {
Resources []InnerCluster `json:"resources,omitempty" elastic_mapping:"resources: { type: object }"`
Permissions []string `json:"permissions,omitempty" elastic_mapping:"permissions: { type: keyword }"`
}
type IndexPrivilege struct {
Name []string `json:"name,omitempty" elastic_mapping:"name: { type: keyword }"`
Permissions []string `json:"permissions,omitempty" elastic_mapping:"permissions: { type: keyword }"`
}
type RoleType = string
const (
Platform RoleType = "platform"
Elasticsearch RoleType = "elasticsearch"
)
func IsAllowRoleType(roleType string) (err error) {
if roleType != Platform && roleType != Elasticsearch {
err = fmt.Errorf("invalid role type %s ", roleType)
return
}
return
}
var BuiltinRoles = make(map[string]Role, 0)
const RoleAdminName = "Administrator"
func init() {
now := time.Now()
BuiltinRoles[RoleAdminName] = Role{
ORMObjectBase: orm.ORMObjectBase{
ID: RoleAdminName,
Created: &now,
},
Name: RoleAdminName,
Type: "platform",
Privilege: RolePrivilege{
Platform: enum.AdminPrivilege,
Elasticsearch: ElasticsearchPrivilege{
Cluster: ClusterPrivilege{
Resources: []InnerCluster{{"*", "*"}},
Permissions: []string{"*"},
},
Index: []IndexPrivilege{
{Name: []string{"*"},
Permissions: []string{"*"},
},
},
},
},
Builtin: true,
Description: "Administrator is a super role.",
}
}

41
core/security/user.go Normal file
View File

@ -0,0 +1,41 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package security
import (
"infini.sh/framework/core/orm"
)
type User struct {
orm.ORMObjectBase
AuthProvider string `json:"auth_provider" elastic_mapping:"auth_provider: { type: keyword }"`
Username string `json:"name" elastic_mapping:"name: { type: keyword }"`
Nickname string `json:"nick_name" elastic_mapping:"nick_name: { type: keyword }"`
Password string `json:"password" elastic_mapping:"password: { type: keyword }"`
Email string `json:"email" elastic_mapping:"email: { type: keyword }"`
Phone string `json:"phone" elastic_mapping:"phone: { type: keyword }"`
Tags []string `json:"tags" elastic_mapping:"mobile: { type: keyword }"`
AvatarUrl string `json:"avatar_url" elastic_mapping:"avatar_url: { type: keyword }"`
Roles []UserRole `json:"roles" elastic_mapping:"roles: { type: object }"`
Payload interface{} `json:"-"` //used for storing additional data derived from auth provider
}
func (user *User) GetPermissions() (roles []string, privileges []string) {
for _, v := range user.Roles {
role, ok := RoleMap[v.Name]
if ok {
roles = append(roles, v.Name)
privileges = append(privileges, role.Privilege.Platform...)
}
}
return roles, privileges
}
type UserRole struct {
ID string `json:"id" elastic_mapping:"id: { type: keyword }"`
Name string `json:"name" elastic_mapping:"name: { type: keyword }"`
}

383
core/security/validate.go Normal file
View File

@ -0,0 +1,383 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package security
import (
"errors"
"fmt"
"github.com/golang-jwt/jwt"
"infini.sh/console/core/security/enum"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/radix"
"infini.sh/framework/core/util"
"net/http"
"strings"
"time"
)
type EsRequest struct {
Doc string `json:"doc"`
Privilege string `json:"privilege"`
ClusterRequest
IndexRequest
}
type ClusterRequest struct {
Cluster string `json:"cluster"`
Privilege []string `json:"privilege"`
}
type IndexRequest struct {
Cluster string `json:"cluster"`
Index string `json:"index"`
Privilege []string `json:"privilege"`
}
type ElasticsearchAPIPrivilege map[string]map[string]struct{}
func (ep ElasticsearchAPIPrivilege) Merge(epa ElasticsearchAPIPrivilege) {
for k, permissions := range epa {
if _, ok := ep[k]; ok {
for permission := range permissions {
ep[k][permission] = struct{}{}
}
} else {
ep[k] = permissions
}
}
}
type RolePermission struct {
Platform []string `json:"platform,omitempty"`
ElasticPrivilege struct {
Cluster ElasticsearchAPIPrivilege
Index map[string]ElasticsearchAPIPrivilege
}
}
func NewIndexRequest(ps httprouter.Params, privilege []string) IndexRequest {
index := ps.ByName("index")
clusterId := ps.ByName("id")
return IndexRequest{
Cluster: clusterId,
Index: index,
Privilege: privilege,
}
}
func NewClusterRequest(ps httprouter.Params, privilege []string) ClusterRequest {
clusterId := ps.ByName("id")
return ClusterRequest{
Cluster: clusterId,
Privilege: privilege,
}
}
func validateApiPermission(apiPrivileges map[string]struct{}, permissions map[string]struct{}) {
if _, ok := permissions["*"]; ok {
for privilege := range apiPrivileges {
delete(apiPrivileges, privilege)
}
return
}
for permission := range permissions {
if _, ok := apiPrivileges[permission]; ok {
delete(apiPrivileges, permission)
}
}
for privilege := range apiPrivileges {
position := strings.Index(privilege, ".")
if position == -1 {
continue
}
prefix := privilege[:position]
if _, ok := permissions[prefix+".*"]; ok {
delete(apiPrivileges, privilege)
}
}
}
func validateIndexPermission(indexName string, apiPrivileges map[string]struct{}, privilege ElasticsearchAPIPrivilege) bool {
permissions, hasAll := privilege["*"]
if hasAll {
validateApiPermission(apiPrivileges, permissions)
}
for indexPattern, v := range privilege {
if radix.Match(indexPattern, indexName) {
validateApiPermission(apiPrivileges, v)
}
}
return len(apiPrivileges) == 0
}
func ValidateIndex(req IndexRequest, userRole RolePermission) (err error) {
var (
apiPrivileges = map[string]struct{}{}
allowed bool
)
for _, privilege := range req.Privilege {
apiPrivileges[privilege] = struct{}{}
}
indexPermissions, hasAllCluster := userRole.ElasticPrivilege.Index["*"]
if hasAllCluster {
allowed = validateIndexPermission(req.Index, apiPrivileges, indexPermissions)
if allowed {
return nil
}
}
if _, ok := userRole.ElasticPrivilege.Index[req.Cluster]; !ok {
return fmt.Errorf("no permission of cluster [%s]", req.Cluster)
}
allowed = validateIndexPermission(req.Index, apiPrivileges, userRole.ElasticPrivilege.Index[req.Cluster])
if allowed {
return nil
}
var apiPermission string
for k := range apiPrivileges {
apiPermission = k
}
return fmt.Errorf("no index api permission: %s", apiPermission)
}
func ValidateCluster(req ClusterRequest, userRole RolePermission) (err error) {
var (
apiPrivileges = map[string]struct{}{}
)
for _, privilege := range req.Privilege {
apiPrivileges[privilege] = struct{}{}
}
clusterPermissions, hasAllCluster := userRole.ElasticPrivilege.Cluster["*"]
if hasAllCluster {
validateApiPermission(apiPrivileges, clusterPermissions)
if len(apiPrivileges) == 0 {
return nil
}
}
if _, ok := userRole.ElasticPrivilege.Cluster[req.Cluster]; !ok && !hasAllCluster {
return fmt.Errorf("no permission of cluster [%s]", req.Cluster)
}
validateApiPermission(apiPrivileges, userRole.ElasticPrivilege.Cluster[req.Cluster])
if len(apiPrivileges) == 0 {
return nil
}
var apiPermission string
for k := range apiPrivileges {
apiPermission = k
}
return fmt.Errorf("no cluster api permission: %s", apiPermission)
}
func CombineUserRoles(roleNames []string) RolePermission {
newRole := RolePermission{}
clusterPrivilege := ElasticsearchAPIPrivilege{}
indexPrivilege := map[string]ElasticsearchAPIPrivilege{}
platformM := map[string]struct{}{}
for _, val := range roleNames {
role := RoleMap[val]
for _, pm := range role.Privilege.Platform {
if _, ok := platformM[pm]; !ok {
newRole.Platform = append(newRole.Platform, pm)
platformM[pm] = struct{}{}
}
}
singleIndexPrivileges := ElasticsearchAPIPrivilege{}
for _, ip := range role.Privilege.Elasticsearch.Index {
for _, indexName := range ip.Name {
if _, ok := singleIndexPrivileges[indexName]; !ok {
singleIndexPrivileges[indexName] = map[string]struct{}{}
}
for _, permission := range ip.Permissions {
singleIndexPrivileges[indexName][permission] = struct{}{}
}
}
}
for _, cp := range role.Privilege.Elasticsearch.Cluster.Resources {
if _, ok := indexPrivilege[cp.ID]; ok {
indexPrivilege[cp.ID].Merge(singleIndexPrivileges)
} else {
indexPrivilege[cp.ID] = singleIndexPrivileges
}
var (
privileges map[string]struct{}
ok bool
)
if privileges, ok = clusterPrivilege[cp.ID]; !ok {
privileges = map[string]struct{}{}
}
for _, permission := range role.Privilege.Elasticsearch.Cluster.Permissions {
privileges[permission] = struct{}{}
}
clusterPrivilege[cp.ID] = privileges
}
}
newRole.ElasticPrivilege.Cluster = clusterPrivilege
newRole.ElasticPrivilege.Index = indexPrivilege
return newRole
}
func GetRoleClusterMap(roles []string) map[string][]string {
userClusterMap := make(map[string][]string, 0)
for _, roleName := range roles {
role, ok := RoleMap[roleName]
if ok {
for _, ic := range role.Privilege.Elasticsearch.Cluster.Resources {
userClusterMap[ic.ID] = append(userClusterMap[ic.ID], role.Privilege.Elasticsearch.Cluster.Permissions...)
}
}
}
return userClusterMap
}
// GetRoleCluster get cluster id by given role names
// return true when has all cluster privilege, otherwise return cluster id list
func GetRoleCluster(roles []string) (bool, []string) {
userClusterMap := GetRoleClusterMap(roles)
if _, ok := userClusterMap["*"]; ok {
return true, nil
}
realCluster := make([]string, 0, len(userClusterMap))
for k, _ := range userClusterMap {
realCluster = append(realCluster, k)
}
return false, realCluster
}
// GetCurrentUserCluster get cluster id by current login user
// return true when has all cluster privilege, otherwise return cluster id list
func GetCurrentUserCluster(req *http.Request) (bool, []string) {
ctxVal := req.Context().Value("user")
if userClaims, ok := ctxVal.(*UserClaims); ok {
return GetRoleCluster(userClaims.Roles)
} else {
panic("user context value not found")
}
}
func GetRoleIndex(roles []string, clusterID string) (bool, []string) {
var realIndex []string
for _, roleName := range roles {
role, ok := RoleMap[roleName]
if ok {
for _, ic := range role.Privilege.Elasticsearch.Cluster.Resources {
if ic.ID != "*" && ic.ID != clusterID {
continue
}
for _, ip := range role.Privilege.Elasticsearch.Index {
if util.StringInArray(ip.Name, "*") {
return true, nil
}
realIndex = append(realIndex, ip.Name...)
}
}
}
}
return false, realIndex
}
func ValidateLogin(authorizationHeader string) (clams *UserClaims, err error) {
if authorizationHeader == "" {
err = errors.New("authorization header is empty")
return
}
fields := strings.Fields(authorizationHeader)
if fields[0] != "Bearer" || len(fields) != 2 {
err = errors.New("authorization header is invalid")
return
}
tokenString := fields[1]
token, err := jwt.ParseWithClaims(tokenString, &UserClaims{}, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(Secret), nil
})
if err != nil {
return
}
clams, ok := token.Claims.(*UserClaims)
if clams.UserId == "" {
err = errors.New("user id is empty")
return
}
//fmt.Println("user token", clams.UserId, TokenMap[clams.UserId])
tokenVal := GetUserToken(clams.UserId)
if tokenVal == nil {
err = errors.New("token is invalid")
return
}
if tokenVal.ExpireIn < time.Now().Unix() {
err = errors.New("token is expire in")
DeleteUserToken(clams.UserId)
return
}
if ok && token.Valid {
return clams, nil
}
return
}
func ValidatePermission(claims *UserClaims, permissions []string) (err error) {
user := claims.ShortUser
if user.UserId == "" {
err = errors.New("user id is empty")
return
}
if user.Roles == nil {
err = errors.New("api permission is empty")
return
}
// 权限校验
userPermissions := make([]string, 0)
for _, role := range user.Roles {
if _, ok := RoleMap[role]; ok {
for _, v := range RoleMap[role].Privilege.Platform {
userPermissions = append(userPermissions, v)
}
}
}
userPermissionMap := make(map[string]struct{})
for _, val := range userPermissions {
for _, v := range enum.PermissionMap[val] {
userPermissionMap[v] = struct{}{}
}
}
for _, v := range permissions {
if _, ok := userPermissionMap[v]; !ok {
err = errors.New("permission denied")
return
}
}
return nil
}
func SearchAPIPermission(typ string, method, path string) (permission string, params map[string]string, matched bool) {
method = strings.ToLower(method)
router := GetAPIPermissionRouter(typ)
if router == nil {
panic(fmt.Errorf("can not found api permission router of %s", typ))
}
return router.Search(method, path)
}

34
core/user.go Normal file
View File

@ -0,0 +1,34 @@
package core
import (
"github.com/emirpasic/gods/sets/hashset"
)
const (
ROLE_GUEST string = "guest"
ROLE_ADMIN string = "admin"
)
const (
//GUEST
PERMISSION_SNAPSHOT_VIEW string = "view_snapshot"
//ADMIN
PERMISSION_ADMIN_MINIMAL string = "admin_minimal"
)
func GetPermissionsByRole(role string) (*hashset.Set, error) {
initRolesMap()
return rolesMap[role], nil
}
var rolesMap = map[string]*hashset.Set{}
func initRolesMap() {
if rolesMap != nil {
return
}
set := hashset.New()
set.Add(PERMISSION_SNAPSHOT_VIEW)
rolesMap[ROLE_GUEST] = set
}

View File

@ -1,2 +0,0 @@
## 数据库初始化脚本

View File

@ -1,7 +0,0 @@
FROM scratch
COPY ./console /console
COPY ./config/ /config/
COPY ./console.yml /console.yml
CMD ["/console"]

View File

@ -1,7 +0,0 @@
FROM amd64/centos:7.9.2009
COPY ./console /console
COPY ./config/ /config/
COPY ./console.yml /console.yml
CMD ["/console"]

View File

@ -1,38 +0,0 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
NhAAAAAwEAAQAAAYEAzQ4wJFpd+kt5hVntfyVvhUnWhUPvfzpQf0NDyn7TnYxnG0C6uEVs
DdKEoMabNwz/zgK0Hlh+9qTBZ/HdddCKH18dDjIrjob+3YKi107yb4nbAJRKueJ9sK+ZWt
zv2ZjaYav9S9vGx+NWbC0ODqsTywg3VgRoYfai/Tz6iH5FIrSYp4ds15m+bEdtpHs3G2x3
Of8Q937lJb7W14rg4RZuTMg7FjirCEK8dk3pzLt7K0I1fiDpC3VxluX6p27apjcpx6IGo9
OMJzTI2SgO+RHrx29gNMKiq0oz1eE4OBDEe9dhNkRV0Hh6BjJ39K8VbwjhvdFCHTkAm12o
bbFxB/ZYc3IKEK7OMeRqdBlTx5yq2H4W5xhLy2qmX7itWPvBGBzhuNclwXvOjYwm/HQjlu
OIgUKQjUBWm7I0wJdM1n+kh/+kLMmFJC9fncz2nJAU6J5WMD1nx7CQh1AZfdEKs/AFWG70
FUKUlvsZHmuNrBuggiXKq4m17rKcxLGphYHL6mSJAAAFiO+1v/rvtb/6AAAAB3NzaC1yc2
EAAAGBAM0OMCRaXfpLeYVZ7X8lb4VJ1oVD7386UH9DQ8p+052MZxtAurhFbA3ShKDGmzcM
/84CtB5YfvakwWfx3XXQih9fHQ4yK46G/t2CotdO8m+J2wCUSrnifbCvmVrc79mY2mGr/U
vbxsfjVmwtDg6rE8sIN1YEaGH2ov08+oh+RSK0mKeHbNeZvmxHbaR7Nxtsdzn/EPd+5SW+
1teK4OEWbkzIOxY4qwhCvHZN6cy7eytCNX4g6Qt1cZbl+qdu2qY3KceiBqPTjCc0yNkoDv
kR68dvYDTCoqtKM9XhODgQxHvXYTZEVdB4egYyd/SvFW8I4b3RQh05AJtdqG2xcQf2WHNy
ChCuzjHkanQZU8ecqth+FucYS8tqpl+4rVj7wRgc4bjXJcF7zo2MJvx0I5bjiIFCkI1AVp
uyNMCXTNZ/pIf/pCzJhSQvX53M9pyQFOieVjA9Z8ewkIdQGX3RCrPwBVhu9BVClJb7GR5r
jawboIIlyquJte6ynMSxqYWBy+pkiQAAAAMBAAEAAAGABfKsaNGKOlFoI/sYzYBbfMVIiL
MQxmL9pMNhuwT0xHQnJX46LFAvMzNxD2zTYcRpwyMG8H5mqGbdCVPVta4n44MRx7Ci3M6D
pA8/A/nRRHT+OkUS6dNtC+v8Ccuw1WH+q6ief03PtUqd3iNsbfZ+a3xAhqk4EedikO/s4H
qxLLGKYAmomZRnFqL3xjagwZXi23bPmi4/HVosxzHLFhxddLK2LA3WwDWXW+MkrgCeMQIJ
pS/1MpTkh5kCLUsk4n9lFI4P3gB+IFGNtGnBmIhwz/2rjXc5OKD5WlXBdGGQ2mWK49NMlJ
LGBSDrAeFErY3Ja8NnOZcXG9o76V6qKQIib8wVDJ0klstDPxBZSLxs2OkCZpKya8ARA1Ci
T48Lbsc/DCdsmajC3zpNuI3Li7ofbzvgCSf7A5rxOghztPY9fD9vdSdPRWoBqIsUfizgO1
mdXzzsF/iBqwlbSCVrzeKleZAAsCUU/0XLUnaBuSKT2LhYvDu3aIC8vf5tN9uAAIWBAAAA
wHsJpPmlt4gKNmWEm3NyARj8oxJdpcaV4M95ofmb2bWzbvW6Vqce+eswe2ctieISN+7w+2
JQB5saNgHhV3D4V3Be3OWkB1KwOjDJmwN5IcmTT+Kmtn7AJ+0yukNMbnNXEbT1oO3KUmfv
wI294u0jlxgSgsRRqYz/dP0UxPUY/z0g40E8pmV4FK7ogxzHKVcLTFTMXqNqaJQCRp4cg+
JgigXgoFzRJLx3CrcROxLex4uocbnSXNYhCCURDVT5S2krWAAAAMEA7CaEAEX+zt6kIINs
kIH6F8HJJ7FcaVKdzmvt/fK5njm/b5NXgo5Iaar9u0OFEpmkI0v5BZEK6IWZN96jLL8iBx
kqkjbE2LuqliXcB+61zVCBi/RcqYDTYOmhyJcG/NcE1e+IAIhdMtNtpr+Dcd3FsGW6GltN
Ul5U6AGcvacT/lJw0kYqRsJ8La0es9Oxsks9DsKTigCVL+rCv+ZJ63mTqPCtsYCagfUzJA
AkgaSCiHNwgvsM2H7x3T3s9KEH8EGRAAAAwQDeSpHY94RZDY4rUQFxlHno+xWeZvbTHFoy
IfXF5drt/eEnfaGJY7eeBNVJI5PAbcNAuN050ZyxLov221nz9Fu8IlqeoNAfrUFfJnWVKg
ppDz3hHq7WKlxwHEJY3Pwd3/G0ppsMlaTMWyNWCkJ7QNuL3zmxgxx2/Dq/tvxDI2DvXCve
HPOdBIM2Y05he0n/zjhko3Qal+zb52Ie6qAEmQE2GEyQf27KLUZ/ww2kKa/HTjvqR9/dwd
eDxswDpr5Rd3kAAAARcm9vdEA2YTliOThkZTA0YzABAg==
-----END OPENSSH PRIVATE KEY-----

View File

@ -1,13 +0,0 @@
version: '3'
services:
infini-search-center-db:
image: mariadb:10.1.19
# volumes:
# - ../data/db_data:/var/lib/mysql
restart: always
container_name: "infini-search-center-db"
environment:
MYSQL_ROOT_PASSWORD: admin
ports:
- "3306:3306"

View File

@ -1,14 +0,0 @@
version: "3.5"
services:
infini-search-center-api-dev:
image: docker.infini.ltd:64443/golang-dev:latest
ports:
- 9010:9000
container_name: "infini-search-center-dev"
volumes:
- ../:/go/src/infini.sh/console
- ./entrypoint.sh:/entrypoint.sh
volumes:
dist:

View File

@ -1,10 +0,0 @@
#!/bin/sh
cd /go/src/infini.sh/
echo "INFINI GOLANG ENV READY TO ROCK!"
cd search-center
make build
cd /go/src/infini.sh/console && ./bin/search-center

View File

@ -1,2 +0,0 @@
Host *
StrictHostKeyChecking no

View File

@ -9,6 +9,7 @@ import (
"infini.sh/console/plugin/audit_log"
"infini.sh/framework/core/api"
model2 "infini.sh/framework/core/model"
elastic2 "infini.sh/framework/modules/elastic"
_ "time/tzdata"
log "github.com/cihub/seelog"
@ -16,29 +17,27 @@ import (
"infini.sh/console/model"
"infini.sh/console/model/alerting"
"infini.sh/console/model/insight"
"infini.sh/console/modules/security"
_ "infini.sh/console/plugin"
_ "infini.sh/console/plugin/managed"
setup1 "infini.sh/console/plugin/setup"
alerting2 "infini.sh/console/service/alerting"
"infini.sh/framework"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/env"
"infini.sh/framework/core/global"
_ "infini.sh/framework/core/log"
"infini.sh/framework/core/module"
"infini.sh/framework/core/orm"
task1 "infini.sh/framework/core/task"
_ "infini.sh/framework/modules/api"
elastic2 "infini.sh/framework/modules/elastic"
"infini.sh/framework/modules/metrics"
"infini.sh/framework/modules/pipeline"
queue2 "infini.sh/framework/modules/queue/disk_queue"
"infini.sh/framework/modules/redis"
"infini.sh/framework/modules/security"
"infini.sh/framework/modules/stats"
"infini.sh/framework/modules/task"
"infini.sh/framework/modules/web"
_ "infini.sh/framework/plugins"
_ "infini.sh/framework/plugins/managed"
)
var appConfig *config.AppConfig
@ -120,7 +119,6 @@ func main() {
module.Start()
var initFunc = func() {
elastic2.InitTemplate(false)
//orm.RegisterSchema(model.Dict{}, "dict")

View File

@ -9,6 +9,7 @@ import (
"errors"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/console/plugin/managed/server"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
@ -18,7 +19,6 @@ import (
"infini.sh/framework/modules/elastic/adapter"
"infini.sh/framework/modules/elastic/common"
"infini.sh/framework/modules/elastic/metadata"
"infini.sh/framework/plugins/managed/server"
"net/http"
"runtime"
"sync/atomic"

View File

@ -5,13 +5,14 @@
package api
import (
"infini.sh/console/core"
"infini.sh/console/core/security/enum"
"infini.sh/console/plugin/managed/server"
"infini.sh/framework/core/api"
"infini.sh/framework/core/api/rbac/enum"
"infini.sh/framework/plugins/managed/server"
)
type APIHandler struct {
api.Handler
core.Handler
}
func Init() {

View File

@ -8,6 +8,7 @@ import (
"bytes"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/console/plugin/managed/common"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/kv"
@ -16,7 +17,6 @@ import (
"infini.sh/framework/core/util"
common2 "infini.sh/framework/modules/elastic/common"
metadata2 "infini.sh/framework/modules/elastic/metadata"
"infini.sh/framework/plugins/managed/common"
"time"
)

View File

@ -7,11 +7,10 @@ package common
import (
log "github.com/cihub/seelog"
"infini.sh/console/modules/agent/model"
"infini.sh/console/plugin/managed/common"
"infini.sh/framework/core/env"
"infini.sh/framework/plugins/managed/common"
)
func GetAgentConfig() *model.AgentConfig {
agentCfg := &model.AgentConfig{
Enabled: true,
@ -19,7 +18,7 @@ func GetAgentConfig() *model.AgentConfig {
DownloadURL: "https://release.infinilabs.com/agent/stable",
},
}
_, err := env.ParseConfig("agent", agentCfg )
_, err := env.ParseConfig("agent", agentCfg)
if err != nil {
log.Errorf("agent config not found: %v", err)
}

View File

@ -0,0 +1,190 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package api
import (
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/event"
"infini.sh/framework/core/global"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"net/http"
"strings"
)
func (h *APIHandler) HandleSearchActivityAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody:=util.MapStr{}
reqBody := struct{
Keyword string `json:"keyword"`
Size int `json:"size"`
From int `json:"from"`
Aggregations []elastic.SearchAggParam `json:"aggs"`
Highlight elastic.SearchHighlightParam `json:"highlight"`
Filter elastic.SearchFilterParam `json:"filter"`
Sort []string `json:"sort"`
StartTime interface{} `json:"start_time"`
EndTime interface{} `json:"end_time"`
}{}
err := h.DecodeJSON(req, &reqBody)
if err != nil {
resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError )
return
}
aggs := elastic.BuildSearchTermAggregations(reqBody.Aggregations)
aggs["term_cluster_id"] = util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.cluster_id",
"size": 1000,
},
"aggs": util.MapStr{
"term_cluster_name": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.cluster_name",
"size": 1,
},
},
},
}
filter := elastic.BuildSearchTermFilter(reqBody.Filter)
if reqBody.StartTime != "" {
filter = append(filter, util.MapStr{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": reqBody.StartTime,
"lte": reqBody.EndTime,
},
},
})
}
clusterFilter, hasAllPrivilege := h.GetClusterFilter(req, "metadata.labels.cluster_id")
if !hasAllPrivilege && clusterFilter == nil {
h.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
return
}
if !hasAllPrivilege && clusterFilter != nil {
filter = append(filter, clusterFilter)
}
hasAllPrivilege, indexPrivilege := h.GetCurrentUserIndex(req)
if !hasAllPrivilege && len(indexPrivilege) == 0 {
h.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
return
}
if !hasAllPrivilege {
indexShould := make([]interface{}, 0, len(indexPrivilege))
for clusterID, indices := range indexPrivilege {
var (
wildcardIndices []string
normalIndices []string
)
for _, index := range indices {
if strings.Contains(index,"*") {
wildcardIndices = append(wildcardIndices, index)
continue
}
normalIndices = append(normalIndices, index)
}
subShould := []util.MapStr{}
if len(wildcardIndices) > 0 {
subShould = append(subShould, util.MapStr{
"query_string": util.MapStr{
"query": strings.Join(wildcardIndices, " "),
"fields": []string{"metadata.labels.index_name"},
"default_operator": "OR",
},
})
}
if len(normalIndices) > 0 {
subShould = append(subShould, util.MapStr{
"terms": util.MapStr{
"metadata.labels.index_name": normalIndices,
},
})
}
indexShould = append(indexShould, util.MapStr{
"bool": util.MapStr{
"must": []util.MapStr{
{
"wildcard": util.MapStr{
"metadata.labels.cluster_id": util.MapStr{
"value": clusterID,
},
},
},
{
"bool": util.MapStr{
"minimum_should_match": 1,
"should": subShould,
},
},
},
},
})
}
indexFilter := util.MapStr{
"bool": util.MapStr{
"minimum_should_match": 1,
"should": indexShould,
},
}
filter = append(filter, indexFilter)
}
var should = []util.MapStr{}
if reqBody.Keyword != "" {
should = []util.MapStr{
{
"query_string": util.MapStr{
"default_field": "*",
"query": reqBody.Keyword,
},
},
}
}
var boolQuery = util.MapStr{
"filter": filter,
}
if len(should) >0 {
boolQuery["should"] = should
boolQuery["minimum_should_match"] = 1
}
query := util.MapStr{
"aggs": aggs,
"size": reqBody.Size,
"from": reqBody.From,
"_source": []string{"changelog", "id", "metadata", "timestamp"},
"highlight": elastic.BuildSearchHighlight(&reqBody.Highlight),
"query": util.MapStr{
"bool": boolQuery,
},
}
if len(reqBody.Sort) == 0 {
reqBody.Sort = []string{"timestamp", "desc"}
}
query["sort"] = []util.MapStr{
{
reqBody.Sort[0]: util.MapStr{
"order": reqBody.Sort[1],
},
},
}
dsl := util.MustToJSONBytes(query)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(orm.GetWildcardIndexName(event.Activity{}), dsl)
if err != nil {
resBody["error"] = err.Error()
h.WriteJSON(w,resBody, http.StatusInternalServerError )
return
}
w.Write(response.RawResult.Body)
}

View File

@ -0,0 +1,85 @@
package api
import (
"fmt"
log "github.com/cihub/seelog"
"github.com/segmentio/encoding/json"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/util"
"net/http"
)
func (h *APIHandler) HandleAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if !exists{
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(errStr)
h.WriteError(w, errStr, http.StatusInternalServerError)
return
}
var aliasReq = &elastic.AliasRequest{}
err = h.DecodeJSON(req, aliasReq)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
esVersion := elastic.GetMetadata(targetClusterID).Config.Version
if r, _ := util.VersionCompare(esVersion, "6.4"); r == -1 {
for i := range aliasReq.Actions {
for k, v := range aliasReq.Actions[i] {
if v != nil && v["is_write_index"] != nil {
delete(aliasReq.Actions[i][k], "is_write_index")
log.Warnf("elasticsearch aliases api of version [%s] not supports parameter is_write_index", esVersion)
}
}
}
}
bodyBytes, _ := json.Marshal(aliasReq)
err = client.Alias(bodyBytes)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
h.WriteAckOKJSON(w)
}
func (h *APIHandler) HandleGetAliasAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
targetClusterID := ps.ByName("id")
exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
h.WriteJSON(w, err.Error(), http.StatusInternalServerError)
return
}
if !exists {
errStr := fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(errStr)
h.WriteError(w, errStr, http.StatusInternalServerError)
return
}
res, err := client.GetAliasesDetail()
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
h.WriteJSON(w, res, http.StatusOK)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,299 @@
package api
import (
"context"
"fmt"
"github.com/buger/jsonparser"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"net/http"
"time"
)
func (h *APIHandler) HandleEseSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if !exists{
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(errStr)
h.WriteError(w, errStr, http.StatusNotFound)
return
}
var reqParams = struct{
Index string `json:"index"`
Body map[string]interface{} `json:"body"`
DistinctByField map[string]interface{} `json:"distinct_by_field"`
}{}
err = h.DecodeJSON(req, &reqParams)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
ver := client.GetVersion()
if _, ok := reqParams.Body["track_total_hits"]; ok {
if ver.Distribution == "" || ver.Distribution == "elasticsearch" {
vr, _ := util.VersionCompare(ver.Number, "7.0")
if vr < 0 {
delete(reqParams.Body, "track_total_hits")
}
}
}
if reqParams.DistinctByField != nil {
if query, ok := reqParams.Body["query"]; ok {
if qm, ok := query.(map[string]interface{}); ok {
filter, _ := util.MapStr(qm).GetValue("bool.filter")
if fv, ok := filter.([]interface{}); ok{
fv = append(fv, util.MapStr{
"script": util.MapStr{
"script": util.MapStr{
"source": "distinct_by_field",
"lang": "infini",
"params": reqParams.DistinctByField,
},
},
})
util.MapStr(qm).Put("bool.filter", fv)
}
}
}
}
if ver.Distribution == "" || ver.Distribution == "elasticsearch" {
vr, err := util.VersionCompare(ver.Number, "7.2")
if err != nil {
errStr := fmt.Sprintf("version compare error: %v", err)
log.Error(errStr)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if vr < 0 {
if aggs, ok := reqParams.Body["aggs"]; ok {
if maggs, ok := aggs.(map[string]interface{}); ok {
if aggsCounts, ok := maggs["counts"].(map[string]interface{}); ok {
if aggVals, ok := aggsCounts["date_histogram"].(map[string]interface{}); ok {
var interval interface{}
if calendarInterval, ok := aggVals["calendar_interval"]; ok {
interval = calendarInterval
delete(aggVals, "calendar_interval")
}
if fixedInterval, ok := aggVals["fixed_interval"]; ok {
interval = fixedInterval
delete(aggVals, "fixed_interval")
}
aggVals["interval"] = interval
}
}
}
}
}
}
indices, hasAll := h.GetAllowedIndices(req, targetClusterID)
if !hasAll {
if len(indices) == 0 {
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
reqParams.Body["query"] = util.MapStr{
"bool": util.MapStr{
"must": []interface{}{
util.MapStr{
"terms": util.MapStr{
"_index": indices,
},
},
reqParams.Body["query"],
},
},
}
}
reqDSL := util.MustToJSONBytes(reqParams.Body)
timeout := h.GetParameterOrDefault(req, "timeout", "")
var queryArgs *[]util.KV
var ctx context.Context
if timeout != "" {
queryArgs = &[]util.KV{
{
Key: "timeout",
Value: timeout,
},
}
du, err := util.ParseDuration(timeout)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
var cancel context.CancelFunc
// here add one second for network delay
ctx, cancel = context.WithTimeout(context.Background(), du + time.Second)
defer cancel()
}
searchRes, err := client.QueryDSL(ctx, reqParams.Index, queryArgs, reqDSL)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if searchRes.StatusCode != http.StatusOK {
h.WriteError(w, string(searchRes.RawResult.Body), http.StatusInternalServerError)
return
}
failures, _, _, _ := jsonparser.Get(searchRes.RawResult.Body, "_shards", "failures")
if len(failures) > 0 {
h.WriteError(w, string(failures), http.StatusInternalServerError)
return
}
h.WriteJSONHeader(w)
h.WriteHeader(w, http.StatusOK)
h.Write(w, searchRes.RawResult.Body)
}
func (h *APIHandler) HandleValueSuggestionAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if !exists{
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
h.WriteError(w, errStr, http.StatusNotFound)
return
}
var reqParams = struct{
BoolFilter interface{} `json:"boolFilter"`
FieldName string `json:"field"`
Query string `json:"query"`
}{}
err = h.DecodeJSON(req, &reqParams)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
indexName := ps.ByName("index")
boolQ := util.MapStr{
"filter": reqParams.BoolFilter,
}
var values = []interface{}{}
indices, hasAll := h.GetAllowedIndices(req, targetClusterID)
if !hasAll {
if len(indices) == 0 {
h.WriteJSON(w, values,http.StatusOK)
return
}
boolQ["must"] = []util.MapStr{
{
"terms": util.MapStr{
"_index": indices,
},
},
}
}
queryBody := util.MapStr{
"size": 0,
"query": util.MapStr{
"bool": boolQ,
},
"aggs": util.MapStr{
"suggestions": util.MapStr{
"terms": util.MapStr{
"field": reqParams.FieldName,
"include": reqParams.Query + ".*",
"execution_hint": "map",
"shard_size": 10,
},
},
},
}
var queryBodyBytes = util.MustToJSONBytes(queryBody)
searchRes, err := client.SearchWithRawQueryDSL(indexName, queryBodyBytes)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
for _, bucket := range searchRes.Aggregations["suggestions"].Buckets {
values = append(values, bucket["key"])
}
h.WriteJSON(w, values,http.StatusOK)
}
func (h *APIHandler) HandleTraceIDSearchAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
traceID := h.GetParameterOrDefault(req, "traceID", "")
traceIndex := h.GetParameterOrDefault(req, "traceIndex", orm.GetIndexName(elastic.TraceMeta{}))
traceField := h.GetParameterOrDefault(req, "traceField", "trace_id")
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if !exists{
errStr := fmt.Sprintf("cluster [%s] not found",targetClusterID)
h.WriteError(w, errStr, http.StatusNotFound)
return
}
var queryDSL = util.MapStr{
"query": util.MapStr{
"bool": util.MapStr{
"must": []util.MapStr{
{
"term": util.MapStr{
traceField: traceID,
},
},
{
"term": util.MapStr{
"cluster_id": targetClusterID,
},
},
},
},
},
}
searchRes, err := client.SearchWithRawQueryDSL(traceIndex, util.MustToJSONBytes(queryDSL))
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if searchRes.GetTotal() == 0 {
h.WriteJSON(w, []string{}, http.StatusOK)
return
}
var indexNames []string
for _, hit := range searchRes.Hits.Hits {
indexNames = append(indexNames, hit.Source["index"].(string))
}
h.WriteJSON(w, indexNames, http.StatusOK)
}

1306
modules/elastic/api/host.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,57 @@
/* Copyright © INFINI Ltd. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package api
import (
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"io"
"net/http"
)
func (h *APIHandler) HandleGetILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
clusterID := ps.MustGetParameter("id")
esClient := elastic.GetClient(clusterID)
policies, err := esClient.GetILMPolicy("")
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
h.WriteJSON(w, policies, http.StatusOK)
}
func (h *APIHandler) HandleSaveILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
clusterID := ps.MustGetParameter("id")
policy := ps.MustGetParameter("policy")
esClient := elastic.GetClient(clusterID)
reqBody, err := io.ReadAll(req.Body)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
err = esClient.PutILMPolicy(policy, reqBody)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
h.WriteAckOKJSON(w)
}
func (h *APIHandler) HandleDeleteILMPolicyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
clusterID := ps.MustGetParameter("id")
policy := ps.MustGetParameter("policy")
esClient := elastic.GetClient(clusterID)
err := esClient.DeleteILMPolicy(policy)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
h.WriteAckOKJSON(w)
}

View File

@ -0,0 +1,953 @@
package api
import (
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/radix"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/adapter"
"infini.sh/framework/modules/elastic/common"
"net/http"
"sort"
"strings"
"time"
)
func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int, shardID string) (map[string]*common.MetricItem, error){
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
return nil, err
}
var must = []util.MapStr{
{
"term":util.MapStr{
"metadata.labels.cluster_uuid":util.MapStr{
"value": clusterUUID,
},
},
},
{
"term": util.MapStr{
"metadata.category": util.MapStr{
"value": "elasticsearch",
},
},
},
{
"term": util.MapStr{
"metadata.name": util.MapStr{
"value": "shard_stats",
},
},
},
}
if v := strings.TrimSpace(shardID); v != "" {
must = append(must, util.MapStr{
"term": util.MapStr{
"metadata.labels.shard_id": util.MapStr{
"value": shardID,
},
},
})
}
var (
indexNames []string
)
if indexName != "" {
indexNames = strings.Split(indexName, ",")
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID)
if !hasAllPrivilege && len(allowedIndices) == 0 {
return nil, nil
}
if !hasAllPrivilege{
namePattern := radix.Compile(allowedIndices...)
var filterNames []string
for _, name := range indexNames {
if namePattern.Match(name){
filterNames = append(filterNames, name)
}
}
if len(filterNames) == 0 {
return nil, nil
}
indexNames = filterNames
}
top = len(indexNames)
}else{
indexNames, err = h.getTopIndexName(req, clusterID, top, 15)
if err != nil {
return nil, err
}
}
if len(indexNames) > 0 {
must = append(must, util.MapStr{
"terms": util.MapStr{
"metadata.labels.index_name": indexNames,
},
})
}
query:=map[string]interface{}{}
query["query"]=util.MapStr{
"bool": util.MapStr{
"must": must,
"must_not": []util.MapStr{
{
"term": util.MapStr{
"metadata.labels.index_name": util.MapStr{
"value": "_all",
},
},
},
},
"filter": []util.MapStr{
{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": min,
"lte": max,
},
},
},
},
},
}
//索引存储大小
indexStorageMetric := newMetricItem("index_storage", 1, StorageGroupKey)
indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems := []GroupMetricItem{
{
Key: "index_storage",
Field: "payload.elasticsearch.shard_stats.store.size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexStorageMetric,
FormatType: "bytes",
Units: "",
},
}
// segment 数量
segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_count",
Field: "payload.elasticsearch.shard_stats.segments.count",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentCountMetric,
FormatType: "num",
Units: "",
})
//索引文档个数
docCountMetric := newMetricItem("doc_count", 2, DocumentGroupKey)
docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "doc_count",
Field: "payload.elasticsearch.shard_stats.docs.count",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: docCountMetric,
FormatType: "num",
Units: "",
})
// docs 删除数量
docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "docs_deleted",
Field: "payload.elasticsearch.shard_stats.docs.deleted",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: docsDeletedMetric,
FormatType: "num",
Units: "",
})
//查询次数
queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey)
queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_times",
Field: "payload.elasticsearch.shard_stats.search.query_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//Fetch次数
fetchTimesMetric := newMetricItem("fetch_times", 3, OperationGroupKey)
fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_times",
Field: "payload.elasticsearch.shard_stats.search.fetch_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: fetchTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//scroll 次数
scrollTimesMetric := newMetricItem("scroll_times", 4, OperationGroupKey)
scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_times",
Field: "payload.elasticsearch.shard_stats.search.scroll_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: scrollTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//Merge次数
mergeTimesMetric := newMetricItem("merge_times", 7, OperationGroupKey)
mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_times",
Field: "payload.elasticsearch.shard_stats.merges.total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: mergeTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//Refresh次数
refreshTimesMetric := newMetricItem("refresh_times", 5, OperationGroupKey)
refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_times",
Field: "payload.elasticsearch.shard_stats.refresh.total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: refreshTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//flush 次数
flushTimesMetric := newMetricItem("flush_times", 6, OperationGroupKey)
flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_times",
Field: "payload.elasticsearch.shard_stats.flush.total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: flushTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//写入速率
indexingRateMetric := newMetricItem("indexing_rate", 1, OperationGroupKey)
if shardID == "" {
indexingRateMetric.OnlyPrimary = true
}
indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_rate",
Field: "payload.elasticsearch.shard_stats.indexing.index_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexingRateMetric,
FormatType: "num",
Units: "doc/s",
})
indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
if shardID == "" {
indexingBytesMetric.OnlyPrimary = true
}
indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_bytes",
Field: "payload.elasticsearch.shard_stats.store.size_in_bytes",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexingBytesMetric,
FormatType: "bytes",
Units: "bytes/s",
})
//写入时延
indexingLatencyMetric := newMetricItem("indexing_latency", 1, LatencyGroupKey)
if shardID == "" {
indexingLatencyMetric.OnlyPrimary = true
}
indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_latency",
Field: "payload.elasticsearch.shard_stats.indexing.index_time_in_millis",
Field2: "payload.elasticsearch.shard_stats.indexing.index_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexingLatencyMetric,
FormatType: "num",
Units: "ms",
})
//查询时延
queryLatencyMetric := newMetricItem("query_latency", 2, LatencyGroupKey)
queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_latency",
Field: "payload.elasticsearch.shard_stats.search.query_time_in_millis",
Field2: "payload.elasticsearch.shard_stats.search.query_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryLatencyMetric,
FormatType: "num",
Units: "ms",
})
//fetch时延
fetchLatencyMetric := newMetricItem("fetch_latency", 3, LatencyGroupKey)
fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_latency",
Field: "payload.elasticsearch.shard_stats.search.fetch_time_in_millis",
Field2: "payload.elasticsearch.shard_stats.search.fetch_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: fetchLatencyMetric,
FormatType: "num",
Units: "ms",
})
//merge时延
mergeLatencyMetric := newMetricItem("merge_latency", 7, LatencyGroupKey)
mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_latency",
Field: "payload.elasticsearch.shard_stats.merges.total_time_in_millis",
Field2: "payload.elasticsearch.shard_stats.merges.total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: mergeLatencyMetric,
FormatType: "num",
Units: "ms",
})
//refresh时延
refreshLatencyMetric := newMetricItem("refresh_latency", 5, LatencyGroupKey)
refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_latency",
Field: "payload.elasticsearch.shard_stats.refresh.total_time_in_millis",
Field2: "payload.elasticsearch.shard_stats.refresh.total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: refreshLatencyMetric,
FormatType: "num",
Units: "ms",
})
//scroll时延
scrollLatencyMetric := newMetricItem("scroll_latency", 4, LatencyGroupKey)
scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_latency",
Field: "payload.elasticsearch.shard_stats.search.scroll_time_in_millis",
Field2: "payload.elasticsearch.shard_stats.search.scroll_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: scrollLatencyMetric,
FormatType: "num",
Units: "ms",
})
//flush 时延
flushLatencyMetric := newMetricItem("flush_latency", 6, LatencyGroupKey)
flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_latency",
Field: "payload.elasticsearch.shard_stats.flush.total_time_in_millis",
Field2: "payload.elasticsearch.shard_stats.flush.total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: flushLatencyMetric,
FormatType: "num",
Units: "ms",
})
//queryCache
queryCacheMetric := newMetricItem("query_cache", 1, CacheGroupKey)
queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache",
Field: "payload.elasticsearch.shard_stats.query_cache.memory_size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: queryCacheMetric,
FormatType: "bytes",
Units: "",
})
//requestCache
requestCacheMetric := newMetricItem("request_cache", 2, CacheGroupKey)
requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "request_cache",
Field: "payload.elasticsearch.shard_stats.request_cache.memory_size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: requestCacheMetric,
FormatType: "bytes",
Units: "",
})
// Request Cache Hit
requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_hit",
Field: "payload.elasticsearch.shard_stats.request_cache.hit_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: requestCacheHitMetric,
FormatType: "num",
Units: "hits",
})
// Request Cache Miss
requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_miss",
Field: "payload.elasticsearch.shard_stats.request_cache.miss_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: requestCacheMissMetric,
FormatType: "num",
Units: "misses",
})
// Query Cache Count
queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_count",
Field: "payload.elasticsearch.shard_stats.query_cache.cache_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryCacheCountMetric,
FormatType: "num",
Units: "",
})
// Query Cache Miss
queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_hit",
Field: "payload.elasticsearch.shard_stats.query_cache.hit_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryCacheHitMetric,
FormatType: "num",
Units: "hits",
})
//// Query Cache evictions
//queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 11, CacheGroupKey)
//queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
//indexMetricItems=append(indexMetricItems, GroupMetricItem{
// Key: "query_cache_evictions",
// Field: "payload.elasticsearch.index_stats.total.query_cache.evictions",
// ID: util.GetUUID(),
// IsDerivative: true,
// MetricItem: queryCacheEvictionsMetric,
// FormatType: "num",
// Units: "evictions",
//})
// Query Cache Miss
queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_miss",
Field: "payload.elasticsearch.shard_stats.query_cache.miss_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryCacheMissMetric,
FormatType: "num",
Units: "misses",
})
// Fielddata内存占用大小
fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "fielddata_cache",
Field: "payload.elasticsearch.shard_stats.fielddata.memory_size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: fieldDataCacheMetric,
FormatType: "bytes",
Units: "",
})
//segment memory
segmentMemoryMetric := newMetricItem("segment_memory", 13, MemoryGroupKey)
segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_memory",
Field: "payload.elasticsearch.shard_stats.segments.memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentMemoryMetric,
FormatType: "bytes",
Units: "",
})
//segment doc values memory
docValuesMemoryMetric := newMetricItem("segment_doc_values_memory", 13, MemoryGroupKey)
docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_doc_values_memory",
Field: "payload.elasticsearch.shard_stats.segments.doc_values_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: docValuesMemoryMetric,
FormatType: "bytes",
Units: "",
})
//segment terms memory
termsMemoryMetric := newMetricItem("segment_terms_memory", 13, MemoryGroupKey)
termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_terms_memory",
Field: "payload.elasticsearch.shard_stats.segments.terms_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: termsMemoryMetric,
FormatType: "bytes",
Units: "",
})
//segment fields memory
fieldsMemoryMetric := newMetricItem("segment_fields_memory", 13, MemoryGroupKey)
fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_fields_memory",
Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: fieldsMemoryMetric,
FormatType: "bytes",
Units: "",
})
// segment index writer memory
segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_index_writer_memory",
Field: "payload.elasticsearch.shard_stats.segments.index_writer_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentIndexWriterMemoryMetric,
FormatType: "bytes",
Units: "",
})
// segment term vectors memory
segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_term_vectors_memory",
Field: "payload.elasticsearch.shard_stats.segments.term_vectors_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentTermVectorsMemoryMetric,
FormatType: "bytes",
Units: "",
})
aggs:=map[string]interface{}{}
sumAggs := util.MapStr{}
var filterSubAggs = util.MapStr{}
for _,metricItem:=range indexMetricItems {
leafAgg := util.MapStr{
"max":util.MapStr{
"field": metricItem.Field,
},
}
var sumBucketPath = "term_shard>"+ metricItem.ID
if metricItem.MetricItem.OnlyPrimary {
filterSubAggs[metricItem.ID] = leafAgg
aggs["filter_pri"]=util.MapStr{
"filter": util.MapStr{
"term": util.MapStr{
"payload.elasticsearch.shard_stats.routing.primary": util.MapStr{
"value": true,
},
},
},
"aggs": filterSubAggs,
}
sumBucketPath = "term_shard>filter_pri>"+ metricItem.ID
}else{
aggs[metricItem.ID]= leafAgg
}
sumAggs[metricItem.ID] = util.MapStr{
"sum_bucket": util.MapStr{
"buckets_path": sumBucketPath,
},
}
if metricItem.Field2 != ""{
leafAgg2 := util.MapStr{
"max":util.MapStr{
"field": metricItem.Field2,
},
}
if metricItem.MetricItem.OnlyPrimary {
filterSubAggs[metricItem.ID+"_field2"] = leafAgg2
}else{
aggs[metricItem.ID+"_field2"] = leafAgg2
}
sumAggs[metricItem.ID + "_field2"] = util.MapStr{
"sum_bucket": util.MapStr{
"buckets_path": sumBucketPath + "_field2",
},
}
}
if metricItem.IsDerivative{
sumAggs[metricItem.ID+"_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID,
},
}
if metricItem.Field2 != "" {
sumAggs[metricItem.ID + "_deriv_field2"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID + "_field2",
},
}
}
}
}
sumAggs["term_shard"]= util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.shard_id",
"size": 10000,
},
"aggs": aggs,
}
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
log.Error(err)
panic(err)
}
query["size"]=0
query["aggs"]= util.MapStr{
"group_by_level": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.index_name",
"size": top,
// max_store is a pipeline agg, sort not support
//"order": util.MapStr{
// "max_store": "desc",
//},
},
"aggs": util.MapStr{
"dates": util.MapStr{
"date_histogram":util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs":sumAggs,
},
"max_store_bucket_sort": util.MapStr{
"bucket_sort": util.MapStr{
"sort": []util.MapStr{
{"max_store": util.MapStr{"order": "desc"}}},
"size": top,
},
},
"term_shard": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.shard_id",
"size": 10000,
},
"aggs": util.MapStr{
"max_store": util.MapStr{
"max": util.MapStr{
"field": "payload.elasticsearch.shard_stats.store.size_in_bytes",
},
},
},
},
"max_store": util.MapStr{
"sum_bucket": util.MapStr{
"buckets_path": "term_shard>max_store",
},
},
},
},
}
return h.getMetrics(query, indexMetricItems, bucketSize), nil
}
func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error){
ver := h.Client().GetVersion()
cr, _ := util.VersionCompare(ver.Number, "6.1")
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 {
return nil, nil
}
var (
now = time.Now()
max = now.UnixNano()/1e6
min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6
)
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
return nil, err
}
var must = []util.MapStr{
{
"term": util.MapStr{
"metadata.category": util.MapStr{
"value": "elasticsearch",
},
},
},
{
"term": util.MapStr{
"metadata.name": util.MapStr{
"value": "shard_stats",
},
},
},
{
"term": util.MapStr{
"metadata.labels.cluster_uuid": util.MapStr{
"value": clusterUUID,
},
},
},
}
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID)
if !hasAllPrivilege && len(allowedIndices) == 0 {
return nil, fmt.Errorf("no index permission")
}
if !hasAllPrivilege {
must = append(must, util.MapStr{
"query_string": util.MapStr{
"query": strings.Join(allowedIndices, " "),
"fields": []string{"metadata.labels.index_name"},
"default_operator": "OR",
},
})
}
bucketSizeStr := "60s"
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
return nil, err
}
query := util.MapStr{
"size": 0,
"query": util.MapStr{
"bool": util.MapStr{
"must_not": []util.MapStr{
{
"term": util.MapStr{
"metadata.labels.index_name": util.MapStr{
"value": "_all",
},
},
},
},
"must": must,
"filter": []util.MapStr{
{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": min,
"lte": max,
},
},
},
},
},
},
"aggs": util.MapStr{
"group_by_index": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.index_name",
"size": 10000,
},
"aggs": util.MapStr{
"max_qps": util.MapStr{
"max_bucket": util.MapStr{
"buckets_path": "dates>search_qps",
},
},
"max_qps_bucket_sort": util.MapStr{
"bucket_sort": util.MapStr{
"sort": []util.MapStr{
{"max_qps": util.MapStr{"order": "desc"}}},
"size": top,
},
},
"dates": util.MapStr{
"date_histogram": util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs": util.MapStr{
"term_shard": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.shard_id",
"size": 10000,
},
"aggs": util.MapStr{
"search_query_total": util.MapStr{
"max": util.MapStr{
"field": "payload.elasticsearch.shard_stats.search.query_total",
},
},
},
},
"sum_search_query_total": util.MapStr{
"sum_bucket": util.MapStr{
"buckets_path": "term_shard>search_query_total",
},
},
"search_qps": util.MapStr{
"derivative": util.MapStr{
"buckets_path": "sum_search_query_total",
},
},
},
},
},
},
"group_by_index1": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.index_name",
"size": 10000,
},
"aggs": util.MapStr{
"max_qps": util.MapStr{
"max_bucket": util.MapStr{
"buckets_path": "dates>index_qps",
},
},
"max_qps_bucket_sort": util.MapStr{
"bucket_sort": util.MapStr{
"sort": []util.MapStr{
{"max_qps": util.MapStr{"order": "desc"}},
},
"size": top,
},
},
"dates": util.MapStr{
"date_histogram": util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs": util.MapStr{
"term_shard": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.shard_id",
"size": 10000,
},
"aggs": util.MapStr{
"index_total": util.MapStr{
"max": util.MapStr{
"field": "payload.elasticsearch.shard_stats.indexing.index_total",
},
},
},
},
"sum_index_total": util.MapStr{
"sum_bucket": util.MapStr{
"buckets_path": "term_shard>index_total",
},
},
"index_qps": util.MapStr{
"derivative": util.MapStr{
"buckets_path": "sum_index_total",
},
},
},
},
},
},
},
}
response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query))
if err!=nil{
log.Error(err)
return nil, err
}
var maxQpsKVS = map[string] float64{}
for _, agg := range response.Aggregations {
for _, bk := range agg.Buckets {
key := bk["key"].(string)
if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok {
val := maxQps["value"].(float64)
if _, ok = maxQpsKVS[key] ; ok {
maxQpsKVS[key] = maxQpsKVS[key] + val
}else{
maxQpsKVS[key] = val
}
}
}
}
var (
qpsValues TopTermOrder
)
for k, v := range maxQpsKVS {
qpsValues = append(qpsValues, TopTerm{
Key: k,
Value: v,
})
}
sort.Sort(qpsValues)
var length = top
if top > len(qpsValues) {
length = len(qpsValues)
}
indexNames := []string{}
for i := 0; i <length; i++ {
indexNames = append(indexNames, qpsValues[i].Key)
}
return indexNames, nil
}
type TopTerm struct {
Key string
Value float64
}
type TopTermOrder []TopTerm
func (t TopTermOrder) Len() int{
return len(t)
}
func (t TopTermOrder) Less(i, j int) bool{
return t[i].Value > t[j].Value //desc
}
func (t TopTermOrder) Swap(i, j int){
t[i], t[j] = t[j], t[i]
}

File diff suppressed because it is too large Load Diff

111
modules/elastic/api/init.go Normal file
View File

@ -0,0 +1,111 @@
package api
import (
"infini.sh/console/core/security/enum"
"infini.sh/framework/core/api"
)
var clusterAPI APIHandler
func init() {
clusterAPI = APIHandler{}
InitTestAPI()
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/health", clusterAPI.RequireClusterPermission(clusterAPI.GetClusterHealth))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/metrics", clusterAPI.RequireClusterPermission(clusterAPI.HandleMetricsSummaryAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/cluster_metrics", clusterAPI.RequireClusterPermission(clusterAPI.HandleClusterMetricsAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/node_metrics", clusterAPI.RequireClusterPermission(clusterAPI.HandleNodeMetricsAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/index_metrics", clusterAPI.RequireClusterPermission(clusterAPI.HandleIndexMetricsAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/queue_metrics", clusterAPI.RequireClusterPermission(clusterAPI.HandleQueueMetricsAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/storage_metrics", clusterAPI.RequireClusterPermission(clusterAPI.HandleGetStorageMetricAction))
api.HandleAPIMethod(api.POST, "/elasticsearch/", clusterAPI.RequirePermission(clusterAPI.HandleCreateClusterAction, enum.PermissionElasticsearchClusterWrite))
api.HandleAPIMethod(api.GET, "/elasticsearch/indices", clusterAPI.RequireLogin(clusterAPI.ListIndex))
api.HandleAPIMethod(api.GET, "/elasticsearch/status", clusterAPI.RequireLogin(clusterAPI.GetClusterStatusAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.HandleGetClusterAction, enum.PermissionElasticsearchClusterRead)))
api.HandleAPIMethod(api.PUT, "/elasticsearch/:id", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.HandleUpdateClusterAction, enum.PermissionElasticsearchClusterWrite)))
api.HandleAPIMethod(api.DELETE, "/elasticsearch/:id", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.HandleDeleteClusterAction, enum.PermissionElasticsearchClusterWrite)))
api.HandleAPIMethod(api.GET, "/elasticsearch/_search", clusterAPI.RequirePermission(clusterAPI.HandleSearchClusterAction, enum.PermissionElasticsearchClusterRead))
api.HandleAPIMethod(api.POST, "/elasticsearch/_search", clusterAPI.RequirePermission(clusterAPI.HandleSearchClusterAction, enum.PermissionElasticsearchClusterRead))
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/search_template", clusterAPI.HandleCreateSearchTemplateAction)
api.HandleAPIMethod(api.PUT, "/elasticsearch/:id/search_template/:template_id", clusterAPI.HandleUpdateSearchTemplateAction)
api.HandleAPIMethod(api.DELETE, "/elasticsearch/:id/search_template/:template_id", clusterAPI.HandleDeleteSearchTemplateAction)
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/search_template", clusterAPI.HandleSearchSearchTemplateAction)
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/search_template/:template_id", clusterAPI.HandleGetSearchTemplateAction)
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/search_template_history/_search", clusterAPI.HandleSearchSearchTemplateHistoryAction)
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/_render/template", clusterAPI.HandleRenderTemplateAction)
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/_search/template", clusterAPI.HandleSearchTemplateAction)
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/alias", clusterAPI.RequireClusterPermission(clusterAPI.HandleAliasAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/alias", clusterAPI.RequireClusterPermission(clusterAPI.HandleGetAliasAction))
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/saved_objects/view", clusterAPI.RequirePermission(clusterAPI.HandleCreateViewAction, enum.PermissionViewWrite))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/saved_objects/_find", clusterAPI.RequirePermission(clusterAPI.HandleGetViewListAction, enum.PermissionViewRead))
api.HandleAPIMethod(api.DELETE, "/elasticsearch/:id/saved_objects/view/:view_id", clusterAPI.RequirePermission(clusterAPI.HandleDeleteViewAction, enum.PermissionViewWrite))
api.HandleAPIMethod(api.PUT, "/elasticsearch/:id/saved_objects/view/:view_id", clusterAPI.RequirePermission(clusterAPI.HandleUpdateViewAction, enum.PermissionViewWrite))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/internal/view-management/resolve_index/:wild", clusterAPI.RequireLogin(clusterAPI.HandleResolveIndexAction))
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/saved_objects/_bulk_get", clusterAPI.RequirePermission(clusterAPI.HandleBulkGetViewAction, enum.PermissionViewRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/view/_fields_for_wildcard", clusterAPI.RequireClusterPermission(clusterAPI.HandleGetFieldCapsAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/saved_objects/view/:view_id", clusterAPI.RequireClusterPermission(clusterAPI.HandleGetViewAction))
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/view/:view_id/_set_default_layout", clusterAPI.RequireClusterPermission(clusterAPI.SetDefaultLayout))
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/search/ese", clusterAPI.RequireClusterPermission(clusterAPI.HandleEseSearchAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/search/trace_id", clusterAPI.HandleTraceIDSearchAction)
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/suggestions/values/:index", clusterAPI.RequireClusterPermission(clusterAPI.HandleValueSuggestionAction))
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/setting", clusterAPI.RequireClusterPermission(clusterAPI.HandleSettingAction))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/setting/:key", clusterAPI.RequireClusterPermission(clusterAPI.HandleGetSettingAction))
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/_proxy", clusterAPI.RequireClusterPermission(clusterAPI.HandleProxyAction))
api.HandleAPIMethod(api.POST, "/elasticsearch/cluster/_search", clusterAPI.RequirePermission(clusterAPI.SearchClusterMetadata, enum.PermissionElasticsearchClusterRead))
api.HandleAPIMethod(api.POST, "/elasticsearch/cluster/info", clusterAPI.RequirePermission(clusterAPI.FetchClusterInfo, enum.PermissionElasticsearchMetricRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/info", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.GetClusterInfo, enum.PermissionElasticsearchMetricRead)))
api.HandleAPIMethod(api.POST, "/elasticsearch/node/_search", clusterAPI.RequirePermission(clusterAPI.SearchNodeMetadata, enum.PermissionElasticsearchNodeRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/nodes", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.GetClusterNodes, enum.PermissionElasticsearchMetricRead, enum.PermissionElasticsearchNodeRead)))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/nodes/realtime", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.GetRealtimeClusterNodes, enum.PermissionElasticsearchMetricRead)))
api.HandleAPIMethod(api.POST, "/elasticsearch/node/info", clusterAPI.RequirePermission(clusterAPI.FetchNodeInfo, enum.PermissionElasticsearchMetricRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/indices", clusterAPI.RequirePermission(clusterAPI.GetClusterIndices, enum.PermissionElasticsearchMetricRead, enum.PermissionElasticsearchIndexRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/indices/realtime", clusterAPI.RequireLogin(clusterAPI.GetRealtimeClusterIndices))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/node/:node_id/shards", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.GetNodeShards, enum.PermissionElasticsearchMetricRead)))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/node/:node_id/info", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.GetNodeInfo, enum.PermissionElasticsearchMetricRead, enum.PermissionElasticsearchNodeRead)))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/node/:node_id/metrics", clusterAPI.RequireClusterPermission(clusterAPI.RequirePermission(clusterAPI.GetSingleNodeMetrics, enum.PermissionElasticsearchMetricRead)))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/node/:node_id/indices", clusterAPI.RequirePermission(clusterAPI.getNodeIndices, enum.PermissionElasticsearchMetricRead, enum.PermissionElasticsearchIndexRead))
api.HandleAPIMethod(api.POST, "/elasticsearch/index/_search", clusterAPI.RequirePermission(clusterAPI.SearchIndexMetadata, enum.PermissionElasticsearchIndexRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/index/:index/metrics", clusterAPI.RequirePermission(clusterAPI.GetSingleIndexMetrics, enum.PermissionElasticsearchMetricRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/index/:index/info", clusterAPI.RequirePermission(clusterAPI.GetIndexInfo, enum.PermissionElasticsearchIndexRead, enum.PermissionElasticsearchMetricRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/index/:index/shards", clusterAPI.RequirePermission(clusterAPI.GetIndexShards, enum.PermissionElasticsearchIndexRead, enum.PermissionElasticsearchMetricRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/index/:index/nodes", clusterAPI.RequirePermission(clusterAPI.getIndexNodes, enum.PermissionElasticsearchMetricRead, enum.PermissionElasticsearchNodeRead))
api.HandleAPIMethod(api.POST, "/elasticsearch/index/info", clusterAPI.RequirePermission(clusterAPI.FetchIndexInfo, enum.PermissionElasticsearchMetricRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/trace_template", clusterAPI.HandleSearchTraceTemplateAction)
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/trace_template/:template_id", clusterAPI.HandleGetTraceTemplateAction)
api.HandleAPIMethod(api.POST, "/elasticsearch/:id/trace_template", clusterAPI.HandleCrateTraceTemplateAction)
api.HandleAPIMethod(api.PUT, "/elasticsearch/:id/trace_template/:template_id", clusterAPI.HandleSaveTraceTemplateAction)
api.HandleAPIMethod(api.DELETE, "/elasticsearch/:id/trace_template/:template_id", clusterAPI.HandleDeleteTraceTemplateAction)
api.HandleAPIMethod(api.POST, "/elasticsearch/activity/_search", clusterAPI.RequirePermission(clusterAPI.HandleSearchActivityAction, enum.PermissionActivityRead))
api.HandleAPIMethod(api.GET, "/host/_discover", clusterAPI.getDiscoverHosts)
api.HandleAPIMethod(api.POST, "/host/_search", clusterAPI.SearchHostMetadata)
api.HandleAPIMethod(api.POST, "/host/info", clusterAPI.FetchHostInfo)
api.HandleAPIMethod(api.GET, "/host/:host_id/metrics", clusterAPI.GetSingleHostMetrics)
api.HandleAPIMethod(api.GET, "/host/:host_id/metric/_stats", clusterAPI.GetHostMetricStats)
api.HandleAPIMethod(api.GET, "/host/:host_id", clusterAPI.GetHostInfo)
api.HandleAPIMethod(api.PUT, "/host/:host_id", clusterAPI.updateHost)
api.HandleAPIMethod(api.GET, "/host/:host_id/info", clusterAPI.GetHostOverviewInfo)
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/_ilm/policy", clusterAPI.HandleGetILMPolicyAction)
api.HandleAPIMethod(api.PUT, "/elasticsearch/:id/_ilm/policy/:policy", clusterAPI.HandleSaveILMPolicyAction)
api.HandleAPIMethod(api.DELETE, "/elasticsearch/:id/_ilm/policy/:policy", clusterAPI.HandleDeleteILMPolicyAction)
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/_template", clusterAPI.HandleGetTemplateAction)
api.HandleAPIMethod(api.PUT, "/elasticsearch/:id/_template/:template_name", clusterAPI.HandleSaveTemplateAction)
api.HandleAPIMethod(api.GET, "/elasticsearch/:id/shard/:shard_id/info", clusterAPI.RequirePermission(clusterAPI.GetShardInfo, enum.PermissionElasticsearchMetricRead))
api.HandleAPIMethod(api.GET, "/elasticsearch/metadata", clusterAPI.RequireLogin(clusterAPI.GetMetadata))
api.HandleAPIMethod(api.GET, "/elasticsearch/hosts", clusterAPI.RequireLogin(clusterAPI.GetHosts))
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,92 @@
package api
import (
"fmt"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
"net/http"
"testing"
"time"
)
func TestGetMetricParams(t *testing.T) {
handler:=APIHandler{}
req:=http.Request{}
bucketSize, min, max, err:=handler.getMetricRangeAndBucketSize(&req,60,15)
fmt.Println(bucketSize)
fmt.Println(util.FormatUnixTimestamp(min/1000))//2022-01-27 15:28:57
fmt.Println(util.FormatUnixTimestamp(max/1000))//2022-01-27 15:28:57
fmt.Println(time.Now())//2022-01-27 15:28:57
fmt.Println(bucketSize, min, max, err)
}
func TestConvertBucketItemsToAggQueryParams(t *testing.T) {
bucketItem:=common.BucketItem{}
bucketItem.Key="key1"
bucketItem.Type=common.TermsBucket
bucketItem.Parameters=map[string]interface{}{}
bucketItem.Parameters["field"]="metadata.labels.cluster_id"
bucketItem.Parameters["size"]=2
nestBucket:=common.BucketItem{}
nestBucket.Key="key2"
nestBucket.Type=common.DateHistogramBucket
nestBucket.Parameters=map[string]interface{}{}
nestBucket.Parameters["field"]="timestamp"
nestBucket.Parameters["calendar_interval"]="1d"
nestBucket.Parameters["time_zone"]="+08:00"
leafBucket:=common.NewBucketItem(common.TermsBucket,util.MapStr{
"size":5,
"field":"payload.elasticsearch.cluster_health.status",
})
leafBucket.Key="key3"
metricItems:=[]*common.MetricItem{}
var bucketSizeStr ="10s"
metricItem:=newMetricItem("cluster_summary", 2, "cluster")
metricItem.Key="key4"
metricItem.AddLine("Indexing","Total Indexing","Number of documents being indexed for primary and replica shards.","group1",
"payload.elasticsearch.index_stats.total.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Search","Total Search","Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!","group1",
"payload.elasticsearch.index_stats.total.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
nestBucket.AddNestBucket(leafBucket)
nestBucket.Metrics=metricItems
bucketItem.Buckets=[]*common.BucketItem{}
bucketItem.Buckets=append(bucketItem.Buckets,&nestBucket)
aggs:=ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem},nil)
fmt.Println(util.MustToJSON(aggs))
response:="{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }"
res:=SearchResponse{}
util.FromJSONBytes([]byte(response),&res)
fmt.Println(response)
groupKey:="key1"
metricLabelKey:="key2"
metricValueKey:="c7qi5hii4h935v9bs920"
data:=ParseAggregationResult(int(10),res.Aggregations,groupKey,metricLabelKey,metricValueKey)
fmt.Println(data)
}
func TestConvertBucketItems(t *testing.T) {
response:="{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }"
res:=SearchResponse{}
util.FromJSONBytes([]byte(response),&res)
data:=ParseAggregationBucketResult(int(10),res.Aggregations,"c7v2gm3i7638vvo4pv80","c7v2gm3i7638vvo4pv8g","c7v2gm3i7638vvo4pv90", func() {
})
fmt.Println(data)
}

View File

@ -0,0 +1,26 @@
/* Copyright © INFINI Ltd. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package api
import (
"fmt"
"infini.sh/framework/core/elastic"
)
type MonitorState int
const (
Console MonitorState = iota
Agent
)
func GetMonitorState(clusterID string) MonitorState {
conf := elastic.GetConfig(clusterID)
if conf == nil {
panic(fmt.Errorf("config of cluster [%s] is not found", clusterID))
}
if conf.MonitorConfigs != nil && !conf.MonitorConfigs.NodeStats.Enabled && !conf.MonitorConfigs.IndexStats.Enabled {
return Agent
}
return Console
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,283 @@
package api
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"github.com/buger/jsonparser"
log "github.com/cihub/seelog"
"github.com/segmentio/encoding/json"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"infini.sh/framework/lib/fasthttp"
"io"
"net/http"
"net/url"
"strings"
"sync"
"time"
)
var httpPool = fasthttp.NewRequestResponsePool("proxy_search")
func (h *APIHandler) HandleProxyAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
method := h.GetParameterOrDefault(req, "method", "")
path := h.GetParameterOrDefault(req, "path", "")
if method == "" || path == "" {
resBody["error"] = fmt.Errorf("parameter method and path is required")
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
exists, esClient, err := h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
authPath, _ := url.PathUnescape(path)
var realPath = authPath
newURL, err := url.Parse(realPath)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if strings.Trim(newURL.Path, "/") == "_sql" {
distribution := esClient.GetVersion().Distribution
indexName, err := rewriteTableNamesOfSqlRequest(req, distribution)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if !h.IsIndexAllowed(req, targetClusterID, indexName) {
h.WriteError(w, fmt.Sprintf("forbidden to access index %s", indexName), http.StatusForbidden)
return
}
q, _ := url.ParseQuery(newURL.RawQuery)
hasFormat := q.Has("format")
switch distribution {
case elastic.Opensearch:
path = "_plugins/_sql?format=raw"
case elastic.Easysearch:
if !hasFormat {
q.Add("format", "raw")
}
path = "_sql?" + q.Encode()
default:
if !hasFormat {
q.Add("format", "txt")
}
path = "_sql?" + q.Encode()
}
}
//ccs search
if parts := strings.SplitN(authPath, "/", 2); strings.Contains(parts[0], ":") {
ccsParts := strings.SplitN(parts[0], ":", 2)
realPath = fmt.Sprintf("%s/%s", ccsParts[1], parts[1])
}
newReq := req.Clone(context.Background())
newReq.URL = newURL
newReq.Method = method
isSuperAdmin, permission, err := h.ValidateProxyRequest(newReq, targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusForbidden)
return
}
if permission == "" && api.IsAuthEnable() && !isSuperAdmin {
resBody["error"] = "unknown request path"
h.WriteJSON(w, resBody, http.StatusForbidden)
return
}
//if permission != "" {
// if permission == "cat.indices" || permission == "cat.shards" {
// reqUrl.Path
// }
//}
var (
freq = httpPool.AcquireRequest()
fres = httpPool.AcquireResponse()
)
defer func() {
httpPool.ReleaseRequest(freq)
httpPool.ReleaseResponse(fres)
}()
metadata := elastic.GetMetadata(targetClusterID)
if metadata == nil {
resBody["error"] = fmt.Sprintf("cluster [%s] metadata not found", targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
if metadata.Config.BasicAuth != nil {
freq.SetBasicAuth(metadata.Config.BasicAuth.Username, metadata.Config.BasicAuth.Password.Get())
}
endpoint := util.JoinPath(metadata.GetActivePreferredSeedEndpoint(), path)
freq.SetRequestURI(endpoint)
method = strings.ToUpper(method)
freq.Header.SetMethod(method)
freq.Header.SetUserAgent(req.Header.Get("user-agent"))
freq.Header.SetReferer(endpoint)
rurl, _ := url.Parse(endpoint)
if rurl != nil {
freq.Header.SetHost(rurl.Host)
freq.Header.SetRequestURI(rurl.RequestURI())
}
clonedURI := freq.CloneURI()
defer fasthttp.ReleaseURI(clonedURI)
clonedURI.SetScheme(metadata.GetSchema())
freq.SetURI(clonedURI)
if permission == "cluster.search" {
indices, hasAll := h.GetAllowedIndices(req, targetClusterID)
if !hasAll && len(indices) == 0 {
h.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
if hasAll {
freq.SetBodyStream(req.Body, int(req.ContentLength))
} else {
body, err := io.ReadAll(req.Body)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if len(body) == 0 {
body = []byte("{}")
}
v, _, _, _ := jsonparser.Get(body, "query")
newQ := bytes.NewBuffer([]byte(`{"bool": {"must": [{"terms": {"_index":`))
indicesBytes := util.MustToJSONBytes(indices)
newQ.Write(indicesBytes)
newQ.Write([]byte("}}"))
if len(v) > 0 {
newQ.Write([]byte(","))
newQ.Write(v)
}
newQ.Write([]byte(`]}}`))
body, _ = jsonparser.Set(body, newQ.Bytes(), "query")
freq.SetBody(body)
}
} else {
freq.SetBodyStream(req.Body, int(req.ContentLength))
}
defer req.Body.Close()
err = getHttpClient().Do(freq, fres)
if err != nil {
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
okBody := struct {
RequestHeader string `json:"request_header"`
ResponseHeader string `json:"response_header"`
ResponseBody string `json:"response_body"`
}{
RequestHeader: freq.Header.String(),
ResponseHeader: fres.Header.String(),
ResponseBody: string(fres.GetRawBody()),
}
w.Header().Set("Content-type", string(fres.Header.ContentType()))
w.WriteHeader(fres.StatusCode())
json.NewEncoder(w).Encode(okBody)
}
func rewriteTableNamesOfSqlRequest(req *http.Request, distribution string) (string, error) {
var buf bytes.Buffer
if _, err := buf.ReadFrom(req.Body); err != nil {
return "", err
}
if err := req.Body.Close(); err != nil {
return "", err
}
req.Body = io.NopCloser(bytes.NewReader(buf.Bytes()))
sqlQuery, err := jsonparser.GetString(buf.Bytes(), "query")
if err != nil {
return "", fmt.Errorf("parse query from request body error: %w", err)
}
q := util.NewSQLQueryString(sqlQuery)
tableNames, err := q.TableNames()
if err != nil {
return "", err
}
rewriteBody := false
switch distribution {
case elastic.Elasticsearch:
for _, tname := range tableNames {
if strings.ContainsAny(tname, "-.") && !strings.HasPrefix(tname, "\"") {
//append quotes from table name
sqlQuery = strings.Replace(sqlQuery, tname, fmt.Sprintf(`\"%s\"`, tname), -1)
rewriteBody = true
}
}
case elastic.Opensearch, elastic.Easysearch:
for _, tname := range tableNames {
//remove quotes from table name
if strings.HasPrefix(tname, "\"") || strings.HasSuffix(tname, "\"") {
sqlQuery = strings.Replace(sqlQuery, tname, strings.Trim(tname, "\""), -1)
rewriteBody = true
}
}
}
if rewriteBody {
sqlQuery = fmt.Sprintf(`"%s"`, sqlQuery)
reqBody, _ := jsonparser.Set(buf.Bytes(), []byte(sqlQuery), "query")
req.Body = io.NopCloser(bytes.NewReader(reqBody))
req.ContentLength = int64(len(reqBody))
}
var unescapedTableNames []string
for _, tname := range tableNames {
unescapedTableNames = append(unescapedTableNames, strings.Trim(tname, "\""))
}
return strings.Join(unescapedTableNames, ","), nil
}
var (
client *fasthttp.Client
clientOnce sync.Once
)
func getHttpClient() *fasthttp.Client {
clientOnce.Do(func() {
clientCfg := global.Env().SystemConfig.HTTPClientConfig
client = &fasthttp.Client{
MaxConnsPerHost: clientCfg.MaxConnectionPerHost,
TLSConfig: &tls.Config{InsecureSkipVerify: clientCfg.TLSConfig.TLSInsecureSkipVerify},
ReadTimeout: util.GetDurationOrDefault(clientCfg.ReadTimeout, 60*time.Second),
WriteTimeout: util.GetDurationOrDefault(clientCfg.ReadTimeout, 60*time.Second),
DialDualStack: true,
ReadBufferSize: clientCfg.ReadBufferSize,
WriteBufferSize: clientCfg.WriteBufferSize,
//Dial: fasthttpproxy.FasthttpProxyHTTPDialerTimeout(time.Second * 2),
}
})
return client
}

View File

@ -0,0 +1,410 @@
package api
import (
"fmt"
log "github.com/cihub/seelog"
"github.com/segmentio/encoding/json"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"net/http"
"strconv"
"strings"
"time"
)
func (h *APIHandler) HandleCreateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{
}
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists{
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
var template = &elastic.SearchTemplate{}
err = h.DecodeJSON(req, template)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
var body = map[string]interface{}{
"script": map[string]interface{}{
"lang": "mustache",
"source": template.Source,
},
}
bodyBytes, _ := json.Marshal(body)
//fmt.Println(client)
err = client.SetSearchTemplate(template.Name, bodyBytes)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
id := util.GetUUID()
template.Created = time.Now()
template.Updated = template.Created
template.ClusterID = targetClusterID
index:=orm.GetIndexName(elastic.SearchTemplate{})
insertRes, err := esClient.Index(index, "", id, template, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
resBody["_source"] = template
resBody["_id"] = id
resBody["result"] = insertRes.Result
h.WriteJSON(w, resBody,http.StatusOK)
}
func (h *APIHandler) HandleUpdateSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{
}
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists{
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
var template = &elastic.SearchTemplate{}
err = h.DecodeJSON(req, template)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
templateID := ps.ByName("template_id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
index:=orm.GetIndexName(elastic.SearchTemplate{})
getRes, err := esClient.Get(index, "",templateID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if getRes.Found == false {
resBody["error"] = fmt.Sprintf("template %s can not be found", templateID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
originTemplate := getRes.Source
targetTemplate := make(map[string]interface{}, len(originTemplate))
for k, v := range originTemplate {
targetTemplate[k] = v
}
targetName := originTemplate["name"].(string)
if template.Name != "" && template.Name != targetName {
err = client.DeleteSearchTemplate(targetName)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
targetTemplate["name"] = template.Name
targetName = template.Name
}
if template.Source != "" {
targetTemplate["source"] = template.Source
}
var body = map[string]interface{}{
"script": map[string]interface{}{
"lang": "mustache",
"source": targetTemplate["source"],
},
}
bodyBytes, _ := json.Marshal(body)
err = client.SetSearchTemplate(targetName, bodyBytes)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
targetTemplate["updated"] = time.Now()
insertRes, err := esClient.Index(index, "", templateID, targetTemplate, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
ht := &elastic.SearchTemplateHistory{
TemplateID: templateID,
Action: "update",
Content: originTemplate,
Created: time.Now(),
}
esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "")
resBody["_source"] = originTemplate
resBody["_id"] = templateID
resBody["result"] = insertRes.Result
h.WriteJSON(w, resBody,http.StatusOK)
}
func (h *APIHandler) HandleDeleteSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{
}
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists{
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
templateID := ps.ByName("template_id")
index:=orm.GetIndexName(elastic.SearchTemplate{})
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
res, err := esClient.Get(index, "", templateID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
err = client.DeleteSearchTemplate(res.Source["name"].(string))
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
delRes, err := esClient.Delete(index, "", res.ID)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
ht := &elastic.SearchTemplateHistory{
TemplateID: templateID,
Action: "delete",
Content: res.Source,
Created: time.Now(),
}
_, err = esClient.Index(orm.GetIndexName(ht), "", util.GetUUID(), ht, "wait_for")
if err != nil {
log.Error(err)
}
resBody["_id"] = templateID
resBody["result"] = delRes.Result
h.WriteJSON(w, resBody, delRes.StatusCode)
}
func (h *APIHandler) HandleSearchSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{
}
var (
name = h.GetParameterOrDefault(req, "name", "")
strFrom = h.GetParameterOrDefault(req, "from", "0")
strSize = h.GetParameterOrDefault(req, "size", "20")
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
mustBuilder = &strings.Builder{}
)
from, _ := strconv.Atoi(strFrom)
size, _ := strconv.Atoi(strSize)
targetClusterID := ps.ByName("id")
mustBuilder.WriteString(fmt.Sprintf(`{"match":{"cluster_id": "%s"}}`, targetClusterID))
if name != ""{
mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"name": "%s"}}`, name))
}
queryDSL = fmt.Sprintf(queryDSL, mustBuilder.String(), from, size)
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
res, err := esClient.SearchWithRawQueryDSL(orm.GetIndexName(elastic.SearchTemplate{}), []byte(queryDSL))
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
h.WriteJSON(w, res, http.StatusOK)
}
func (h *APIHandler) HandleGetSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{}
id := ps.ByName("template_id")
indexName := orm.GetIndexName(elastic.SearchTemplate{})
getResponse, err := h.Client().Get(indexName, "", id)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
if getResponse!=nil{
h.WriteJSON(w, resBody, getResponse.StatusCode)
}else{
h.WriteJSON(w, resBody, http.StatusInternalServerError)
}
return
}
h.WriteJSON(w,getResponse,200)
}
func (h *APIHandler) HandleSearchSearchTemplateHistoryAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{
}
var (
templateID = h.GetParameterOrDefault(req, "template_id", "")
strFrom = h.GetParameterOrDefault(req, "from", "0")
strSize = h.GetParameterOrDefault(req, "size", "20")
queryDSL = `{"query":{"bool":{"must":[%s]}},"from": %d, "size": %d}`
mustBuilder = &strings.Builder{}
)
from, _ := strconv.Atoi(strFrom)
size, _ := strconv.Atoi(strSize)
targetClusterID := ps.ByName("id")
mustBuilder.WriteString(fmt.Sprintf(`{"match":{"content.cluster_id": "%s"}}`, targetClusterID))
if templateID != ""{
mustBuilder.WriteString(fmt.Sprintf(`,{"match":{"template_id": "%s"}}`, templateID))
}
queryDSL = fmt.Sprintf(queryDSL, mustBuilder.String(), from, size)
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
res, err := esClient.SearchWithRawQueryDSL(orm.GetIndexName(elastic.SearchTemplateHistory{}), []byte(queryDSL))
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
h.WriteJSON(w, res, http.StatusOK)
}
func (h *APIHandler) HandleRenderTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{
}
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists{
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
reqBody := map[string]interface{}{}
err = h.DecodeJSON(req, &reqBody)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
res, err := client.RenderTemplate(reqBody)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
h.WriteJSON(w, string(res), http.StatusOK)
}
func (h *APIHandler) HandleSearchTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{
}
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists{
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
reqBody := map[string]interface{}{}
err = h.DecodeJSON(req, &reqBody)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
res, err := client.SearchTemplate(reqBody)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
h.WriteJSON(w, string(res), http.StatusOK)
}

View File

@ -0,0 +1,78 @@
package api
import (
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"net/http"
"time"
)
func (h *APIHandler) HandleSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
targetClusterID := ps.ByName("id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
var reqParams = elastic.Setting{
UpdatedAt: time.Now(),
ClusterID: targetClusterID,
}
err := h.DecodeJSON(req, &reqParams)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
indexName := orm.GetIndexName(reqParams)
queryDSL := fmt.Sprintf(`{"size":1,"query":{"bool":{"must":[{"match":{"key":"%s"}},{"match":{"cluster_id":"%s"}}]}}}`, reqParams.Key, targetClusterID)
searchRes, err := esClient.SearchWithRawQueryDSL(indexName, []byte(queryDSL))
if len(searchRes.Hits.Hits) > 0 {
_, err = esClient.Index(indexName, "", searchRes.Hits.Hits[0].ID, reqParams, "wait_for")
}else{
reqParams.ID = util.GetUUID()
_, err = esClient.Index(indexName, "", reqParams.ID, reqParams, "wait_for")
}
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
resBody["acknowledged"] = true
h.WriteJSON(w, resBody ,http.StatusOK)
}
func (h *APIHandler) HandleGetSettingAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
targetClusterID := ps.ByName("id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
var key = ps.ByName("key")
queryDSL := fmt.Sprintf(`{"size":1,"query":{"bool":{"must":[{"match":{"key":"%s"}},{"match":{"cluster_id":"%s"}}]}}}`, key, targetClusterID)
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetIndexName(elastic.Setting{}), []byte(queryDSL))
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
var value interface{}
if len(searchRes.Hits.Hits) > 0 {
value = searchRes.Hits.Hits[0].Source["value"]
}else{
value = ""
}
h.WriteJSON(w, value ,http.StatusOK)
}

View File

@ -0,0 +1,46 @@
/* Copyright © INFINI Ltd. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package api
import (
"infini.sh/framework/core/event"
"infini.sh/framework/core/orm"
"infini.sh/framework/modules/elastic/adapter"
"net/http"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
)
func (h *APIHandler) GetShardInfo(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
clusterID := ps.MustGetParameter("id")
shardID := ps.MustGetParameter("shard_id")
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
q := orm.Query{
Size: 1,
}
q.Conds = orm.And(
orm.Eq("metadata.labels.shard_id", shardID),
orm.Eq("metadata.labels.cluster_uuid", clusterUUID),
)
q.AddSort("timestamp", orm.DESC)
err, res := orm.Search(&event.Event{}, &q)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if len(res.Result) == 0 {
h.WriteJSON(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
h.WriteJSON(w, res.Result[0], http.StatusOK)
}

View File

@ -0,0 +1,52 @@
/* Copyright © INFINI Ltd. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package api
import (
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"io"
"net/http"
"src/github.com/buger/jsonparser"
)
func (h *APIHandler) HandleGetTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
clusterID := ps.MustGetParameter("id")
esClient := elastic.GetClient(clusterID)
templates, err := esClient.GetTemplate("")
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
h.WriteJSON(w, templates, http.StatusOK)
}
func (h *APIHandler) HandleSaveTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
clusterID := ps.MustGetParameter("id")
templateName := ps.MustGetParameter("template_name")
esClient := elastic.GetClient(clusterID)
reqBody, err := io.ReadAll(req.Body)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
esResBody, err := esClient.PutTemplate(templateName, reqBody)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
resErr, _, _, _ := jsonparser.Get(esResBody, "error")
if resErr != nil {
errStr := string(resErr)
log.Errorf("put template error: %s", errStr)
h.WriteError(w, errStr, http.StatusInternalServerError)
return
}
h.WriteAckOKJSON(w)
}

View File

@ -0,0 +1,162 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package api
import (
"fmt"
"github.com/segmentio/encoding/json"
util2 "infini.sh/agent/lib/util"
"infini.sh/console/core"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/errors"
"infini.sh/framework/core/model"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
"net/http"
"strings"
"time"
)
type TestAPI struct {
core.Handler
}
var testAPI = TestAPI{}
var testInited bool
func InitTestAPI() {
if !testInited {
api.HandleAPIMethod(api.POST, "/elasticsearch/try_connect", testAPI.HandleTestConnectionAction)
testInited = true
}
}
func (h TestAPI) HandleTestConnectionAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var (
freq = httpPool.AcquireRequest()
fres = httpPool.AcquireResponse()
resBody = map[string]interface{}{}
)
defer func() {
httpPool.ReleaseRequest(freq)
httpPool.ReleaseResponse(fres)
}()
var config = &elastic.ElasticsearchConfig{}
err := h.DecodeJSON(req, &config)
if err != nil {
panic(err)
}
defer req.Body.Close()
var url string
if config.Endpoint != "" {
url = config.Endpoint
} else if config.Host != "" && config.Schema != "" {
url = fmt.Sprintf("%s://%s", config.Schema, config.Host)
config.Endpoint = url
} else {
resBody["error"] = fmt.Sprintf("invalid config: %v", util.MustToJSON(config))
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if url == "" {
panic(errors.Error("invalid url: " + util.MustToJSON(config)))
}
if !util.SuffixStr(url, "/") {
url = fmt.Sprintf("%s/", url)
}
freq.SetRequestURI(url)
freq.Header.SetMethod("GET")
if (config.BasicAuth == nil || (config.BasicAuth != nil && config.BasicAuth.Username == "")) &&
config.CredentialID != "" && config.CredentialID != "manual" {
credential, err := common.GetCredential(config.CredentialID)
if err != nil {
panic(err)
}
var dv interface{}
dv, err = credential.Decode()
if err != nil {
panic(err)
}
if auth, ok := dv.(model.BasicAuth); ok {
config.BasicAuth = &auth
}
}
if config.BasicAuth != nil && strings.TrimSpace(config.BasicAuth.Username) != "" {
freq.SetBasicAuth(config.BasicAuth.Username, config.BasicAuth.Password.Get())
}
err = getHttpClient().DoTimeout(freq, fres, 10*time.Second)
if err != nil {
panic(err)
}
var statusCode = fres.StatusCode()
if statusCode > 300 || statusCode == 0 {
resBody["error"] = fmt.Sprintf("invalid status code: %d", statusCode)
h.WriteJSON(w, resBody, 500)
return
}
b := fres.Body()
clusterInfo := &elastic.ClusterInformation{}
err = json.Unmarshal(b, clusterInfo)
if err != nil {
panic(err)
}
resBody["version"] = clusterInfo.Version.Number
resBody["cluster_uuid"] = clusterInfo.ClusterUUID
resBody["cluster_name"] = clusterInfo.ClusterName
resBody["distribution"] = clusterInfo.Version.Distribution
//fetch cluster health info
freq.SetRequestURI(fmt.Sprintf("%s/_cluster/health", config.Endpoint))
fres.Reset()
err = getHttpClient().Do(freq, fres)
if err != nil {
resBody["error"] = fmt.Sprintf("error on get cluster health: %v", err)
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
healthInfo := &elastic.ClusterHealth{}
err = json.Unmarshal(fres.Body(), &healthInfo)
if err != nil {
resBody["error"] = fmt.Sprintf("error on decode cluster health info : %v", err)
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
resBody["status"] = healthInfo.Status
resBody["number_of_nodes"] = healthInfo.NumberOfNodes
resBody["number_of_data_nodes"] = healthInfo.NumberOf_data_nodes
resBody["active_shards"] = healthInfo.ActiveShards
//fetch local node's info
nodeID, nodeInfo, err := util2.GetLocalNodeInfo(config.GetAnyEndpoint(), config.BasicAuth)
if err != nil {
resBody["error"] = fmt.Sprintf("error on decode cluster health info : %v", err)
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
resBody["status"] = healthInfo.Status
resBody["number_of_nodes"] = healthInfo.NumberOfNodes
resBody["number_of_data_nodes"] = healthInfo.NumberOf_data_nodes
resBody["active_shards"] = healthInfo.ActiveShards
resBody["node_uuid"] = nodeID
resBody["node_info"] = nodeInfo
h.WriteJSON(w, resBody, http.StatusOK)
}

View File

@ -0,0 +1,556 @@
package api
import (
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/adapter"
"infini.sh/framework/modules/elastic/common"
"strings"
)
const (
ThreadPoolGetGroupKey = "thread_pool_get"
ThreadPoolSearchGroupKey = "thread_pool_search"
ThreadPoolFlushGroupKey = "thread_pool_flush"
ThreadPoolRefreshGroupKey = "thread_pool_refresh"
ThreadPoolWriteGroupKey = "thread_pool_write"
ThreadPoolForceMergeGroupKey = "thread_pool_force_merge"
ThreadPoolIndexGroupKey = "thread_pool_index"
ThreadPoolBulkGroupKey = "thread_pool_bulk"
)
func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) (map[string]*common.MetricItem, error){
clusterUUID, err := adapter.GetClusterUUID(clusterID)
if err != nil {
return nil, err
}
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
var must = []util.MapStr{
{
"term":util.MapStr{
"metadata.labels.cluster_uuid":util.MapStr{
"value": clusterUUID,
},
},
},
{
"term": util.MapStr{
"metadata.category": util.MapStr{
"value": "elasticsearch",
},
},
},
{
"term": util.MapStr{
"metadata.name": util.MapStr{
"value": "node_stats",
},
},
},
}
var (
nodeNames []string
)
if nodeName != "" {
nodeNames = strings.Split(nodeName, ",")
top = len(nodeNames)
}else{
nodeNames, err = h.getTopNodeName(clusterID, top, 15)
if err != nil {
log.Error(err)
}
}
if len(nodeNames) > 0 {
must = append(must, util.MapStr{
"bool": util.MapStr{
"minimum_should_match": 1,
"should": []util.MapStr{
{
"terms": util.MapStr{
"metadata.labels.transport_address": nodeNames,
},
},
{
"terms": util.MapStr{
"metadata.labels.node_id": nodeNames,
},
},
},
},
})
}
query:=map[string]interface{}{}
query["query"]=util.MapStr{
"bool": util.MapStr{
"must": must,
"filter": []util.MapStr{
{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": min,
"lte": max,
},
},
},
},
},
}
searchThreadsMetric := newMetricItem("search_threads", 1, ThreadPoolSearchGroupKey)
searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems := []GroupMetricItem{
{
Key: "search_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchThreadsMetric,
FormatType: "num",
Units: "",
},
}
searchQueueMetric := newMetricItem("search_queue", 1, ThreadPoolSearchGroupKey)
searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchQueueMetric,
FormatType: "num",
Units: "",
})
searchActiveMetric := newMetricItem("search_active", 1, ThreadPoolSearchGroupKey)
searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_active",
Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchActiveMetric,
FormatType: "num",
Units: "",
})
searchRejectedMetric := newMetricItem("search_rejected", 1, ThreadPoolSearchGroupKey)
searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: searchRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
getThreadsMetric := newMetricItem("get_threads", 1, ThreadPoolGetGroupKey)
getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getThreadsMetric,
FormatType: "num",
Units: "",
})
getQueueMetric := newMetricItem("get_queue", 1, ThreadPoolGetGroupKey)
getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getQueueMetric,
FormatType: "num",
Units: "",
})
getActiveMetric := newMetricItem("get_active", 1, ThreadPoolGetGroupKey)
getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_active",
Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getActiveMetric,
FormatType: "num",
Units: "",
})
getRejectedMetric := newMetricItem("get_rejected", 1, ThreadPoolGetGroupKey)
getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: getRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
flushThreadsMetric := newMetricItem("flush_threads", 1, ThreadPoolFlushGroupKey)
flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushThreadsMetric,
FormatType: "num",
Units: "",
})
flushQueueMetric := newMetricItem("flush_queue", 1, ThreadPoolFlushGroupKey)
flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushQueueMetric,
FormatType: "num",
Units: "",
})
flushActiveMetric := newMetricItem("flush_active", 1, ThreadPoolFlushGroupKey)
flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_active",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushActiveMetric,
FormatType: "num",
Units: "",
})
flushRejectedMetric := newMetricItem("flush_rejected", 1, ThreadPoolFlushGroupKey)
flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: flushRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
majorVersion := elastic.GetMetadata(clusterID).GetMajorVersion()
ver := elastic.GetClient(clusterID).GetVersion()
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && majorVersion < 6{
indexThreadsMetric := newMetricItem("index_threads", 1, ThreadPoolIndexGroupKey)
indexThreadsMetric.AddAxi("Index Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.index.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexThreadsMetric,
FormatType: "num",
Units: "",
})
indexQueueMetric := newMetricItem("index_queue", 1, ThreadPoolIndexGroupKey)
indexQueueMetric.AddAxi("Index Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.index.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexQueueMetric,
FormatType: "num",
Units: "",
})
indexActiveMetric := newMetricItem("index_active", 1, ThreadPoolIndexGroupKey)
indexActiveMetric.AddAxi("Index Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_active",
Field: "payload.elasticsearch.node_stats.thread_pool.index.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexActiveMetric,
FormatType: "num",
Units: "",
})
indexRejectedMetric := newMetricItem("index_rejected", 1, ThreadPoolIndexGroupKey)
indexRejectedMetric.AddAxi("Index Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.index.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
bulkThreadsMetric := newMetricItem("bulk_threads", 1, ThreadPoolBulkGroupKey)
bulkThreadsMetric.AddAxi("Bulk Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkThreadsMetric,
FormatType: "num",
Units: "",
})
bulkQueueMetric := newMetricItem("bulk_queue", 1, ThreadPoolBulkGroupKey)
bulkQueueMetric.AddAxi("Bulk Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkQueueMetric,
FormatType: "num",
Units: "",
})
bulkActiveMetric := newMetricItem("bulk_active", 1, ThreadPoolBulkGroupKey)
bulkActiveMetric.AddAxi("Bulk Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_active",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkActiveMetric,
FormatType: "num",
Units: "",
})
bulkRejectedMetric := newMetricItem("bulk_rejected", 1, ThreadPoolBulkGroupKey)
bulkRejectedMetric.AddAxi("Bulk Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: bulkRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
}else {
writeThreadsMetric := newMetricItem("write_threads", 1, ThreadPoolWriteGroupKey)
writeThreadsMetric.AddAxi("Write Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.write.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeThreadsMetric,
FormatType: "num",
Units: "",
})
writeQueueMetric := newMetricItem("write_queue", 1, ThreadPoolWriteGroupKey)
writeQueueMetric.AddAxi("Write Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.write.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeQueueMetric,
FormatType: "num",
Units: "",
})
writeActiveMetric := newMetricItem("write_active", 1, ThreadPoolWriteGroupKey)
writeActiveMetric.AddAxi("Write Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_active",
Field: "payload.elasticsearch.node_stats.thread_pool.write.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeActiveMetric,
FormatType: "num",
Units: "",
})
writeRejectedMetric := newMetricItem("write_rejected", 1, ThreadPoolWriteGroupKey)
writeRejectedMetric.AddAxi("Write Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.write.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: writeRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
}
refreshThreadsMetric := newMetricItem("refresh_threads", 1, ThreadPoolRefreshGroupKey)
refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshThreadsMetric,
FormatType: "num",
Units: "",
})
refreshQueueMetric := newMetricItem("refresh_queue", 1, ThreadPoolRefreshGroupKey)
refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshQueueMetric,
FormatType: "num",
Units: "",
})
refreshActiveMetric := newMetricItem("refresh_active", 1, ThreadPoolRefreshGroupKey)
refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_active",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshActiveMetric,
FormatType: "num",
Units: "",
})
refreshRejectedMetric := newMetricItem("refresh_rejected", 1, ThreadPoolRefreshGroupKey)
refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: refreshRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
forceMergeThreadsMetric := newMetricItem("force_merge_threads", 1, ThreadPoolForceMergeGroupKey)
forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeThreadsMetric,
FormatType: "num",
Units: "",
})
forceMergeQueueMetric := newMetricItem("force_merge_queue", 1, ThreadPoolForceMergeGroupKey)
forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeQueueMetric,
FormatType: "num",
Units: "",
})
forceMergeActiveMetric := newMetricItem("force_merge_active", 1, ThreadPoolForceMergeGroupKey)
forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_active",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeActiveMetric,
FormatType: "num",
Units: "",
})
forceMergeRejectedMetric := newMetricItem("force_merge_rejected", 1, ThreadPoolForceMergeGroupKey)
forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: forceMergeRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
//Get Thread Pool queue
aggs:=map[string]interface{}{}
for _,metricItem:=range queueMetricItems{
aggs[metricItem.ID]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2"]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field2,
},
}
}
if metricItem.IsDerivative{
aggs[metricItem.ID+"_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID + "_field2",
},
}
}
}
}
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
log.Error(err)
panic(err)
}
query["size"]=0
query["aggs"]= util.MapStr{
"group_by_level": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.transport_address",
"size": top,
},
"aggs": util.MapStr{
"dates": util.MapStr{
"date_histogram":util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs":aggs,
},
},
},
}
return h.getMetrics(query, queueMetricItems, bucketSize), nil
}

View File

@ -0,0 +1,168 @@
package api
import (
"fmt"
log "github.com/cihub/seelog"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"net/http"
"strconv"
"strings"
"time"
)
func (h *APIHandler) HandleCrateTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
targetClusterID := ps.ByName("id")
exists,client,err:=h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists{
resBody["error"] = fmt.Sprintf("cluster [%s] not found",targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
var traceReq = &elastic.TraceTemplate{
}
err = h.DecodeJSON(req, traceReq)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
traceReq.Created = time.Now()
traceReq.Updated = traceReq.Created
traceReq.ClusterID = targetClusterID
var id = util.GetUUID()
insertRes, err := client.Index(orm.GetIndexName(elastic.TraceTemplate{}), "", id, traceReq, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
resBody["_source"] = traceReq
resBody["_id"] = insertRes.ID
resBody["result"] = insertRes.Result
h.WriteJSON(w, resBody,http.StatusOK)
}
func (h *APIHandler) HandleSearchTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string] interface{}{
}
var (
name = h.GetParameterOrDefault(req, "name", "")
queryDSL = `{"query":{"bool":{"must":[%s]}}, "size": %d, "from": %d}`
strSize = h.GetParameterOrDefault(req, "size", "20")
strFrom = h.GetParameterOrDefault(req, "from", "0")
mustBuilder = &strings.Builder{}
)
targetClusterID := ps.ByName("id")
mustBuilder.WriteString(fmt.Sprintf(`{"term":{"cluster_id":{"value": "%s"}}}`, targetClusterID))
if name != ""{
mustBuilder.WriteString(fmt.Sprintf(`,{"prefix":{"name": "%s"}}`, name))
}
size, _ := strconv.Atoi(strSize)
if size <= 0 {
size = 20
}
from, _ := strconv.Atoi(strFrom)
if from < 0 {
from = 0
}
queryDSL = fmt.Sprintf(queryDSL, mustBuilder.String(), size, from)
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
res, err := esClient.SearchWithRawQueryDSL(orm.GetIndexName(elastic.TraceTemplate{}), []byte(queryDSL))
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
h.WriteJSON(w, res, http.StatusOK)
}
func (h *APIHandler) HandleSaveTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{
}
reqParams := elastic.TraceTemplate{}
err := h.DecodeJSON(req, &reqParams)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
reqParams.ID = ps.ByName("template_id")
reqParams.Updated = time.Now()
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
_, err = esClient.Index(orm.GetIndexName(reqParams),"", reqParams.ID, reqParams, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusOK)
return
}
resBody["_id"] = reqParams.ID
resBody["result"] = "updated"
resBody["_source"] = reqParams
h.WriteJSON(w, resBody,http.StatusOK)
}
func (h *APIHandler) HandleGetTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params){
resBody := map[string] interface{}{}
id := ps.ByName("template_id")
indexName := orm.GetIndexName(elastic.TraceTemplate{})
getResponse, err := h.Client().Get(indexName, "", id)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
}
h.WriteJSON(w,getResponse, getResponse.StatusCode)
}
func (h *APIHandler) HandleDeleteTraceTemplateAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
id := ps.ByName("template_id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
delRes, err := esClient.Delete(orm.GetIndexName(elastic.TraceTemplate{}), "", id, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
if delRes!=nil{
h.WriteJSON(w, resBody, delRes.StatusCode)
}else{
h.WriteJSON(w, resBody, http.StatusInternalServerError)
}
}
elastic.RemoveInstance(id)
resBody["_id"] = id
resBody["result"] = delRes.Result
h.WriteJSON(w, resBody, delRes.StatusCode)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,838 @@
package v1
import (
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/radix"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
"net/http"
"sort"
"strings"
"time"
)
func (h *APIHandler) getIndexMetrics(req *http.Request, clusterID string, bucketSize int, min, max int64, indexName string, top int) map[string]*common.MetricItem{
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
var must = []util.MapStr{
{
"term":util.MapStr{
"metadata.labels.cluster_id":util.MapStr{
"value": clusterID,
},
},
},
{
"term": util.MapStr{
"metadata.category": util.MapStr{
"value": "elasticsearch",
},
},
},
{
"term": util.MapStr{
"metadata.name": util.MapStr{
"value": "index_stats",
},
},
},
}
var (
indexNames []string
err error
)
if indexName != "" {
indexNames = strings.Split(indexName, ",")
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID)
if !hasAllPrivilege && len(allowedIndices) == 0 {
return nil
}
if !hasAllPrivilege{
namePattern := radix.Compile(allowedIndices...)
var filterNames []string
for _, name := range indexNames {
if namePattern.Match(name){
filterNames = append(filterNames, name)
}
}
if len(filterNames) == 0 {
return nil
}
indexNames = filterNames
}
top = len(indexNames)
}else{
indexNames, err = h.getTopIndexName(req, clusterID, top, 15)
if err != nil {
log.Error(err)
}
}
if len(indexNames) > 0 {
must = append(must, util.MapStr{
"terms": util.MapStr{
"metadata.labels.index_name": indexNames,
},
})
}
query:=map[string]interface{}{}
query["query"]=util.MapStr{
"bool": util.MapStr{
"must": must,
"must_not": []util.MapStr{
{
"term": util.MapStr{
"metadata.labels.index_name": util.MapStr{
"value": "_all",
},
},
},
},
"filter": []util.MapStr{
{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": min,
"lte": max,
},
},
},
},
},
}
//索引存储大小
indexStorageMetric := newMetricItem("index_storage", 1, StorageGroupKey)
indexStorageMetric.AddAxi("Index storage","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems := []GroupMetricItem{
{
Key: "index_storage",
Field: "payload.elasticsearch.index_stats.total.store.size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexStorageMetric,
FormatType: "bytes",
Units: "",
},
}
// segment 数量
segmentCountMetric:=newMetricItem("segment_count", 15, StorageGroupKey)
segmentCountMetric.AddAxi("segment count","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_count",
Field: "payload.elasticsearch.index_stats.total.segments.count",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentCountMetric,
FormatType: "num",
Units: "",
})
//索引文档个数
docCountMetric := newMetricItem("doc_count", 2, DocumentGroupKey)
docCountMetric.AddAxi("Doc count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "doc_count",
Field: "payload.elasticsearch.index_stats.total.docs.count",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: docCountMetric,
FormatType: "num",
Units: "",
})
// docs 删除数量
docsDeletedMetric:=newMetricItem("docs_deleted", 17, DocumentGroupKey)
docsDeletedMetric.AddAxi("docs deleted","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "docs_deleted",
Field: "payload.elasticsearch.index_stats.total.docs.deleted",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: docsDeletedMetric,
FormatType: "num",
Units: "",
})
//查询次数
queryTimesMetric := newMetricItem("query_times", 2, OperationGroupKey)
queryTimesMetric.AddAxi("Query times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_times",
Field: "payload.elasticsearch.index_stats.total.search.query_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//Fetch次数
fetchTimesMetric := newMetricItem("fetch_times", 3, OperationGroupKey)
fetchTimesMetric.AddAxi("Fetch times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_times",
Field: "payload.elasticsearch.index_stats.total.search.fetch_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: fetchTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//scroll 次数
scrollTimesMetric := newMetricItem("scroll_times", 4, OperationGroupKey)
scrollTimesMetric.AddAxi("scroll times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_times",
Field: "payload.elasticsearch.index_stats.total.search.scroll_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: scrollTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//Merge次数
mergeTimesMetric := newMetricItem("merge_times", 7, OperationGroupKey)
mergeTimesMetric.AddAxi("Merge times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_times",
Field: "payload.elasticsearch.index_stats.total.merges.total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: mergeTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//Refresh次数
refreshTimesMetric := newMetricItem("refresh_times", 5, OperationGroupKey)
refreshTimesMetric.AddAxi("Refresh times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_times",
Field: "payload.elasticsearch.index_stats.total.refresh.total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: refreshTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//flush 次数
flushTimesMetric := newMetricItem("flush_times", 6, OperationGroupKey)
flushTimesMetric.AddAxi("flush times","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_times",
Field: "payload.elasticsearch.index_stats.total.flush.total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: flushTimesMetric,
FormatType: "num",
Units: "requests/s",
})
//写入速率
indexingRateMetric := newMetricItem("indexing_rate", 1, OperationGroupKey)
indexingRateMetric.AddAxi("Indexing rate","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_rate",
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexingRateMetric,
FormatType: "num",
Units: "doc/s",
})
indexingBytesMetric := newMetricItem("indexing_bytes", 2, OperationGroupKey)
indexingBytesMetric.AddAxi("Indexing bytes","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_bytes",
Field: "payload.elasticsearch.index_stats.primaries.store.size_in_bytes",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexingBytesMetric,
FormatType: "bytes",
Units: "bytes/s",
})
//写入时延
indexingLatencyMetric := newMetricItem("indexing_latency", 1, LatencyGroupKey)
indexingLatencyMetric.AddAxi("Indexing latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "indexing_latency",
Field: "payload.elasticsearch.index_stats.primaries.indexing.index_time_in_millis",
Field2: "payload.elasticsearch.index_stats.primaries.indexing.index_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexingLatencyMetric,
FormatType: "num",
Units: "ms",
})
//查询时延
queryLatencyMetric := newMetricItem("query_latency", 2, LatencyGroupKey)
queryLatencyMetric.AddAxi("Query latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_latency",
Field: "payload.elasticsearch.index_stats.total.search.query_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.search.query_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryLatencyMetric,
FormatType: "num",
Units: "ms",
})
//fetch时延
fetchLatencyMetric := newMetricItem("fetch_latency", 3, LatencyGroupKey)
fetchLatencyMetric.AddAxi("Fetch latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "fetch_latency",
Field: "payload.elasticsearch.index_stats.total.search.fetch_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.search.fetch_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: fetchLatencyMetric,
FormatType: "num",
Units: "ms",
})
//merge时延
mergeLatencyMetric := newMetricItem("merge_latency", 7, LatencyGroupKey)
mergeLatencyMetric.AddAxi("Merge latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "merge_latency",
Field: "payload.elasticsearch.index_stats.total.merges.total_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.merges.total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: mergeLatencyMetric,
FormatType: "num",
Units: "ms",
})
//refresh时延
refreshLatencyMetric := newMetricItem("refresh_latency", 5, LatencyGroupKey)
refreshLatencyMetric.AddAxi("Refresh latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "refresh_latency",
Field: "payload.elasticsearch.index_stats.total.refresh.total_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.refresh.total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: refreshLatencyMetric,
FormatType: "num",
Units: "ms",
})
//scroll时延
scrollLatencyMetric := newMetricItem("scroll_latency", 4, LatencyGroupKey)
scrollLatencyMetric.AddAxi("Scroll Latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "scroll_latency",
Field: "payload.elasticsearch.index_stats.total.search.scroll_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.search.scroll_total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: scrollLatencyMetric,
FormatType: "num",
Units: "ms",
})
//flush 时延
flushLatencyMetric := newMetricItem("flush_latency", 6, LatencyGroupKey)
flushLatencyMetric.AddAxi("Flush latency","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "flush_latency",
Field: "payload.elasticsearch.index_stats.total.flush.total_time_in_millis",
Field2: "payload.elasticsearch.index_stats.total.flush.total",
Calc: func(value, value2 float64) float64 {
return value/value2
},
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: flushLatencyMetric,
FormatType: "num",
Units: "ms",
})
//queryCache
queryCacheMetric := newMetricItem("query_cache", 1, CacheGroupKey)
queryCacheMetric.AddAxi("Query cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "query_cache",
Field: "payload.elasticsearch.index_stats.total.query_cache.memory_size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: queryCacheMetric,
FormatType: "bytes",
Units: "",
})
//requestCache
requestCacheMetric := newMetricItem("request_cache", 2, CacheGroupKey)
requestCacheMetric.AddAxi("request cache","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "request_cache",
Field: "payload.elasticsearch.index_stats.total.request_cache.memory_size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: requestCacheMetric,
FormatType: "bytes",
Units: "",
})
// Request Cache Hit
requestCacheHitMetric:=newMetricItem("request_cache_hit", 6, CacheGroupKey)
requestCacheHitMetric.AddAxi("request cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_hit",
Field: "payload.elasticsearch.index_stats.total.request_cache.hit_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: requestCacheHitMetric,
FormatType: "num",
Units: "hits",
})
// Request Cache Miss
requestCacheMissMetric:=newMetricItem("request_cache_miss", 8, CacheGroupKey)
requestCacheMissMetric.AddAxi("request cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "request_cache_miss",
Field: "payload.elasticsearch.index_stats.total.request_cache.miss_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: requestCacheMissMetric,
FormatType: "num",
Units: "misses",
})
// Query Cache Count
queryCacheCountMetric:=newMetricItem("query_cache_count", 4, CacheGroupKey)
queryCacheCountMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_count",
Field: "payload.elasticsearch.index_stats.total.query_cache.cache_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryCacheCountMetric,
FormatType: "num",
Units: "",
})
// Query Cache Miss
queryCacheHitMetric:=newMetricItem("query_cache_hit", 5, CacheGroupKey)
queryCacheHitMetric.AddAxi("query cache hit","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_hit",
Field: "payload.elasticsearch.index_stats.total.query_cache.hit_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryCacheHitMetric,
FormatType: "num",
Units: "hits",
})
//// Query Cache evictions
//queryCacheEvictionsMetric:=newMetricItem("query_cache_evictions", 11, CacheGroupKey)
//queryCacheEvictionsMetric.AddAxi("query cache evictions","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
//indexMetricItems=append(indexMetricItems, GroupMetricItem{
// Key: "query_cache_evictions",
// Field: "payload.elasticsearch.index_stats.total.query_cache.evictions",
// ID: util.GetUUID(),
// IsDerivative: true,
// MetricItem: queryCacheEvictionsMetric,
// FormatType: "num",
// Units: "evictions",
//})
// Query Cache Miss
queryCacheMissMetric:=newMetricItem("query_cache_miss", 7, CacheGroupKey)
queryCacheMissMetric.AddAxi("query cache miss","group1",common.PositionLeft,"num","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "query_cache_miss",
Field: "payload.elasticsearch.index_stats.total.query_cache.miss_count",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: queryCacheMissMetric,
FormatType: "num",
Units: "misses",
})
// Fielddata内存占用大小
fieldDataCacheMetric:=newMetricItem("fielddata_cache", 3, CacheGroupKey)
fieldDataCacheMetric.AddAxi("FieldData Cache","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "fielddata_cache",
Field: "payload.elasticsearch.index_stats.total.fielddata.memory_size_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: fieldDataCacheMetric,
FormatType: "bytes",
Units: "",
})
//segment memory
segmentMemoryMetric := newMetricItem("segment_memory", 13, MemoryGroupKey)
segmentMemoryMetric.AddAxi("Segment memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_memory",
Field: "payload.elasticsearch.index_stats.total.segments.memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentMemoryMetric,
FormatType: "bytes",
Units: "",
})
//segment doc values memory
docValuesMemoryMetric := newMetricItem("segment_doc_values_memory", 13, MemoryGroupKey)
docValuesMemoryMetric.AddAxi("Segment Doc values Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_doc_values_memory",
Field: "payload.elasticsearch.index_stats.total.segments.doc_values_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: docValuesMemoryMetric,
FormatType: "bytes",
Units: "",
})
//segment terms memory
termsMemoryMetric := newMetricItem("segment_terms_memory", 13, MemoryGroupKey)
termsMemoryMetric.AddAxi("Segment Terms Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_terms_memory",
Field: "payload.elasticsearch.index_stats.total.segments.terms_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: termsMemoryMetric,
FormatType: "bytes",
Units: "",
})
//segment fields memory
fieldsMemoryMetric := newMetricItem("segment_fields_memory", 13, MemoryGroupKey)
fieldsMemoryMetric.AddAxi("Segment Fields Memory","group1",common.PositionLeft,"bytes","0.[0]","0.[0]",5,true)
indexMetricItems = append(indexMetricItems, GroupMetricItem{
Key: "segment_fields_memory",
Field: "payload.elasticsearch.index_stats.total.segments.stored_fields_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: fieldsMemoryMetric,
FormatType: "bytes",
Units: "",
})
// segment index writer memory
segmentIndexWriterMemoryMetric:=newMetricItem("segment_index_writer_memory", 16, MemoryGroupKey)
segmentIndexWriterMemoryMetric.AddAxi("segment doc values memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_index_writer_memory",
Field: "payload.elasticsearch.index_stats.total.segments.index_writer_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentIndexWriterMemoryMetric,
FormatType: "bytes",
Units: "",
})
// segment term vectors memory
segmentTermVectorsMemoryMetric:=newMetricItem("segment_term_vectors_memory", 16, MemoryGroupKey)
segmentTermVectorsMemoryMetric.AddAxi("segment term vectors memory","group1",common.PositionLeft,"bytes","0,0","0,0.[00]",5,true)
indexMetricItems=append(indexMetricItems, GroupMetricItem{
Key: "segment_term_vectors_memory",
Field: "payload.elasticsearch.index_stats.total.segments.term_vectors_memory_in_bytes",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: segmentTermVectorsMemoryMetric,
FormatType: "bytes",
Units: "",
})
aggs:=map[string]interface{}{}
for _,metricItem:=range indexMetricItems {
aggs[metricItem.ID]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field,
},
}
if metricItem.Field2 != ""{
aggs[metricItem.ID + "_field2"]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field2,
},
}
}
if metricItem.IsDerivative{
aggs[metricItem.ID+"_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID + "_deriv_field2"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID + "_field2",
},
}
}
}
}
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
log.Error(err)
panic(err)
}
query["size"]=0
query["aggs"]= util.MapStr{
"group_by_level": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.index_name",
"size": top,
"order": util.MapStr{
"max_store": "desc",
},
},
"aggs": util.MapStr{
"dates": util.MapStr{
"date_histogram":util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs":aggs,
},
"max_store": util.MapStr{
"max": util.MapStr{
"field": "payload.elasticsearch.index_stats.total.store.size_in_bytes",
},
},
},
},
}
return h.getMetrics(query, indexMetricItems, bucketSize)
}
func (h *APIHandler) getTopIndexName(req *http.Request, clusterID string, top int, lastMinutes int) ([]string, error){
ver := h.Client().GetVersion()
cr, _ := util.VersionCompare(ver.Number, "6.1")
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && cr == -1 {
return nil, nil
}
var (
now = time.Now()
max = now.UnixNano()/1e6
min = now.Add(-time.Duration(lastMinutes) * time.Minute).UnixNano()/1e6
)
var must = []util.MapStr{
{
"term": util.MapStr{
"metadata.category": util.MapStr{
"value": "elasticsearch",
},
},
},
{
"term": util.MapStr{
"metadata.name": util.MapStr{
"value": "index_stats",
},
},
},
{
"term": util.MapStr{
"metadata.labels.cluster_id": util.MapStr{
"value": clusterID,
},
},
},
}
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, clusterID)
if !hasAllPrivilege && len(allowedIndices) == 0 {
return nil, fmt.Errorf("no index permission")
}
if !hasAllPrivilege {
must = append(must, util.MapStr{
"query_string": util.MapStr{
"query": strings.Join(allowedIndices, " "),
"fields": []string{"metadata.labels.index_name"},
"default_operator": "OR",
},
})
}
bucketSizeStr := "60s"
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
return nil, err
}
query := util.MapStr{
"size": 0,
"query": util.MapStr{
"bool": util.MapStr{
"must_not": []util.MapStr{
{
"term": util.MapStr{
"metadata.labels.index_name": util.MapStr{
"value": "_all",
},
},
},
},
"must": must,
"filter": []util.MapStr{
{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": min,
"lte": max,
},
},
},
},
},
},
"aggs": util.MapStr{
"group_by_index": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.index_name",
"size": 10000,
},
"aggs": util.MapStr{
"max_qps": util.MapStr{
"max_bucket": util.MapStr{
"buckets_path": "dates>search_qps",
},
},
"max_qps_bucket_sort": util.MapStr{
"bucket_sort": util.MapStr{
"sort": []util.MapStr{
{"max_qps": util.MapStr{"order": "desc"}}},
"size": top,
},
},
"dates": util.MapStr{
"date_histogram": util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs": util.MapStr{
"search_query_total": util.MapStr{
"max": util.MapStr{
"field": "payload.elasticsearch.index_stats.total.search.query_total",
},
},
"search_qps": util.MapStr{
"derivative": util.MapStr{
"buckets_path": "search_query_total",
},
},
},
},
},
},
"group_by_index1": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.index_name",
"size": 10000,
},
"aggs": util.MapStr{
"max_qps": util.MapStr{
"max_bucket": util.MapStr{
"buckets_path": "dates>index_qps",
},
},
"max_qps_bucket_sort": util.MapStr{
"bucket_sort": util.MapStr{
"sort": []util.MapStr{
{"max_qps": util.MapStr{"order": "desc"}},
},
"size": top,
},
},
"dates": util.MapStr{
"date_histogram": util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs": util.MapStr{
"index_total": util.MapStr{
"max": util.MapStr{
"field": "payload.elasticsearch.index_stats.total.indexing.index_total",
},
},
"index_qps": util.MapStr{
"derivative": util.MapStr{
"buckets_path": "index_total",
},
},
},
},
},
},
},
}
response,err:=elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(),util.MustToJSONBytes(query))
if err!=nil{
log.Error(err)
return nil, err
}
var maxQpsKVS = map[string] float64{}
for _, agg := range response.Aggregations {
for _, bk := range agg.Buckets {
key := bk["key"].(string)
if maxQps, ok := bk["max_qps"].(map[string]interface{}); ok {
val := maxQps["value"].(float64)
if _, ok = maxQpsKVS[key] ; ok {
maxQpsKVS[key] = maxQpsKVS[key] + val
}else{
maxQpsKVS[key] = val
}
}
}
}
var (
qpsValues TopTermOrder
)
for k, v := range maxQpsKVS {
qpsValues = append(qpsValues, TopTerm{
Key: k,
Value: v,
})
}
sort.Sort(qpsValues)
var length = top
if top > len(qpsValues) {
length = len(qpsValues)
}
indexNames := []string{}
for i := 0; i <length; i++ {
indexNames = append(indexNames, qpsValues[i].Key)
}
return indexNames, nil
}
type TopTerm struct {
Key string
Value float64
}
type TopTermOrder []TopTerm
func (t TopTermOrder) Len() int{
return len(t)
}
func (t TopTermOrder) Less(i, j int) bool{
return t[i].Value > t[j].Value //desc
}
func (t TopTermOrder) Swap(i, j int){
t[i], t[j] = t[j], t[i]
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,911 @@
package v1
import (
"fmt"
"infini.sh/framework/core/env"
"net/http"
"strings"
"time"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
)
func newMetricItem(metricKey string, order int, group string) *common.MetricItem {
metricItem := common.MetricItem{
Order: order,
Key: metricKey,
Group: group,
}
//axis
metricItem.Axis = []*common.MetricAxis{}
//lines
metricItem.Lines = []*common.MetricLine{}
return &metricItem
}
type GroupMetricItem struct {
Key string
Field string
ID string
IsDerivative bool
Units string
FormatType string
MetricItem *common.MetricItem
Field2 string
Calc func(value, value2 float64) float64
}
type TreeMapNode struct {
Name string `json:"name"`
Value float64 `json:"value,omitempty"`
Children []*TreeMapNode `json:"children,omitempty"`
SubKeys map[string]int `json:"-"`
}
type MetricData map[string][][]interface{}
func generateGroupAggs(nodeMetricItems []GroupMetricItem) map[string]interface{} {
aggs := map[string]interface{}{}
for _, metricItem := range nodeMetricItems {
aggs[metricItem.ID] = util.MapStr{
"max": util.MapStr{
"field": metricItem.Field,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID+"_field2"] = util.MapStr{
"max": util.MapStr{
"field": metricItem.Field2,
},
}
}
if metricItem.IsDerivative {
aggs[metricItem.ID+"_deriv"] = util.MapStr{
"derivative": util.MapStr{
"buckets_path": metricItem.ID,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID+"_deriv_field2"] = util.MapStr{
"derivative": util.MapStr{
"buckets_path": metricItem.ID + "_field2",
},
}
}
}
}
return aggs
}
func (h *APIHandler) getMetrics(query map[string]interface{}, grpMetricItems []GroupMetricItem, bucketSize int) map[string]*common.MetricItem {
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
if err != nil {
log.Error(err)
panic(err)
}
grpMetricItemsIndex := map[string]int{}
for i, item := range grpMetricItems {
grpMetricItemsIndex[item.ID] = i
}
grpMetricData := map[string]MetricData{}
var minDate, maxDate int64
if response.StatusCode == 200 {
if nodeAgg, ok := response.Aggregations["group_by_level"]; ok {
for _, bucket := range nodeAgg.Buckets {
grpKey := bucket["key"].(string)
for _, metricItem := range grpMetricItems {
metricItem.MetricItem.AddLine(metricItem.Key, grpKey, "", "group1", metricItem.Field, "max", bucketSizeStr, metricItem.Units, metricItem.FormatType, "0.[00]", "0.[00]", false, false)
dataKey := metricItem.ID
if metricItem.IsDerivative {
dataKey = dataKey + "_deriv"
}
if _, ok := grpMetricData[dataKey]; !ok {
grpMetricData[dataKey] = map[string][][]interface{}{}
}
grpMetricData[dataKey][grpKey] = [][]interface{}{}
}
if datesAgg, ok := bucket["dates"].(map[string]interface{}); ok {
if datesBuckets, ok := datesAgg["buckets"].([]interface{}); ok {
for _, dateBucket := range datesBuckets {
if bucketMap, ok := dateBucket.(map[string]interface{}); ok {
v, ok := bucketMap["key"].(float64)
if !ok {
panic("invalid bucket key")
}
dateTime := (int64(v))
minDate = util.MinInt64(minDate, dateTime)
maxDate = util.MaxInt64(maxDate, dateTime)
for mk1, mv1 := range grpMetricData {
v1, ok := bucketMap[mk1]
if ok {
v2, ok := v1.(map[string]interface{})
if ok {
v3, ok := v2["value"].(float64)
if ok {
metricID := mk1
if strings.HasSuffix(mk1, "_deriv") {
metricID = strings.TrimSuffix(mk1, "_deriv")
if _, ok := bucketMap[mk1+"_field2"]; !ok {
v3 = v3 / float64(bucketSize)
}
}
if field2, ok := bucketMap[mk1+"_field2"]; ok {
if idx, ok := grpMetricItemsIndex[metricID]; ok {
if field2Map, ok := field2.(map[string]interface{}); ok {
v4 := field2Map["value"].(float64)
if v4 == 0 {
v3 = 0
} else {
v3 = grpMetricItems[idx].Calc(v3, v4)
}
}
}
}
if v3 < 0 {
continue
}
points := []interface{}{dateTime, v3}
mv1[grpKey] = append(mv1[grpKey], points)
}
}
}
}
}
}
}
}
}
}
}
result := map[string]*common.MetricItem{}
for _, metricItem := range grpMetricItems {
for _, line := range metricItem.MetricItem.Lines {
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
dataKey := metricItem.ID
if metricItem.IsDerivative {
dataKey = dataKey + "_deriv"
}
line.Data = grpMetricData[dataKey][line.Metric.Label]
}
result[metricItem.Key] = metricItem.MetricItem
}
return result
}
func GetMinBucketSize() int {
metricsCfg := struct {
MinBucketSizeInSeconds int `config:"min_bucket_size_in_seconds"`
}{
MinBucketSizeInSeconds: 20,
}
_, _ = env.ParseConfig("insight", &metricsCfg)
if metricsCfg.MinBucketSizeInSeconds < 20 {
metricsCfg.MinBucketSizeInSeconds = 20
}
return metricsCfg.MinBucketSizeInSeconds
}
// defaultBucketSize 也就是每次聚合的时间间隔
func (h *APIHandler) getMetricRangeAndBucketSize(req *http.Request, defaultBucketSize, defaultMetricCount int) (int, int64, int64, error) {
minBucketSizeInSeconds := GetMinBucketSize()
if defaultBucketSize <= 0 {
defaultBucketSize = minBucketSizeInSeconds
}
if defaultMetricCount <= 0 {
defaultMetricCount = 15 * 60
}
bucketSize := defaultBucketSize
bucketSizeStr := h.GetParameterOrDefault(req, "bucket_size", "") //默认 10每个 bucket 的时间范围,单位秒
if bucketSizeStr != "" {
du, err := util.ParseDuration(bucketSizeStr)
if err != nil {
return 0, 0, 0, err
}
bucketSize = int(du.Seconds())
}else {
bucketSize = 0
}
metricCount := h.GetIntOrDefault(req, "metric_count", defaultMetricCount) //默认 15分钟的区间每分钟15个指标也就是 15*6 个 bucket //90
//min,max are unix nanoseconds
minStr := h.Get(req, "min", "")
maxStr := h.Get(req, "max", "")
return GetMetricRangeAndBucketSize(minStr, maxStr, bucketSize, metricCount)
}
func GetMetricRangeAndBucketSize(minStr string, maxStr string, bucketSize int, metricCount int) (int, int64, int64, error) {
var min, max int64
var rangeFrom, rangeTo time.Time
var err error
var useMinMax = bucketSize == 0
now := time.Now()
if minStr == "" {
rangeFrom = now.Add(-time.Second * time.Duration(bucketSize*metricCount+1))
} else {
//try 2021-08-21T14:06:04.818Z
rangeFrom, err = util.ParseStandardTime(minStr)
if err != nil {
//try 1629637500000
v, err := util.ToInt64(minStr)
if err != nil {
log.Error("invalid timestamp:", minStr, err)
rangeFrom = now.Add(-time.Second * time.Duration(bucketSize*metricCount+1))
} else {
rangeFrom = util.FromUnixTimestamp(v / 1000)
}
}
}
if maxStr == "" {
rangeTo = now.Add(-time.Second * time.Duration(int(1*(float64(bucketSize)))))
} else {
rangeTo, err = util.ParseStandardTime(maxStr)
if err != nil {
v, err := util.ToInt64(maxStr)
if err != nil {
log.Error("invalid timestamp:", maxStr, err)
rangeTo = now.Add(-time.Second * time.Duration(int(1*(float64(bucketSize)))))
} else {
rangeTo = util.FromUnixTimestamp(int64(v) / 1000)
}
}
}
min = rangeFrom.UnixNano() / 1e6
max = rangeTo.UnixNano() / 1e6
hours := rangeTo.Sub(rangeFrom).Hours()
if useMinMax {
if hours <= 0.25 {
bucketSize = GetMinBucketSize()
} else if hours <= 0.5 {
bucketSize = 30
} else if hours <= 2 {
bucketSize = 60
} else if hours < 3 {
bucketSize = 90
} else if hours < 6 {
bucketSize = 120
} else if hours < 12 {
bucketSize = 60 * 3
} else if hours < 25 { //1day
bucketSize = 60 * 5 * 2
} else if hours <= 7*24+1 { //7days
bucketSize = 60 * 15 * 2
} else if hours <= 15*24+1 { //15days
bucketSize = 60 * 30 * 2
} else if hours < 30*24+1 { //<30 days
bucketSize = 60 * 60 //hourly
} else if hours <= 30*24+1 { //<30days
bucketSize = 12 * 60 * 60 //half daily
} else if hours >= 30*24+1 { //>30days
bucketSize = 60 * 60 * 24 //daily bucket
}
}
return bucketSize, min, max, nil
}
// 获取单个指标,可以包含多条曲线
func (h *APIHandler) getSingleMetrics(metricItems []*common.MetricItem, query map[string]interface{}, bucketSize int) map[string]*common.MetricItem {
metricData := map[string][][]interface{}{}
aggs := map[string]interface{}{}
metricItemsMap := map[string]*common.MetricLine{}
for _, metricItem := range metricItems {
for _, line := range metricItem.Lines {
metricItemsMap[line.Metric.GetDataKey()] = line
metricData[line.Metric.GetDataKey()] = [][]interface{}{}
aggs[line.Metric.ID] = util.MapStr{
line.Metric.MetricAgg: util.MapStr{
"field": line.Metric.Field,
},
}
if line.Metric.Field2 != "" {
aggs[line.Metric.ID+"_field2"] = util.MapStr{
line.Metric.MetricAgg: util.MapStr{
"field": line.Metric.Field2,
},
}
}
if line.Metric.IsDerivative {
//add which metric keys to extract
aggs[line.Metric.ID+"_deriv"] = util.MapStr{
"derivative": util.MapStr{
"buckets_path": line.Metric.ID,
},
}
if line.Metric.Field2 != "" {
aggs[line.Metric.ID+"_deriv_field2"] = util.MapStr{
"derivative": util.MapStr{
"buckets_path": line.Metric.ID + "_field2",
},
}
}
}
}
}
bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
clusterID := global.MustLookupString(elastic.GlobalSystemElasticsearchID)
intervalField, err := getDateHistogramIntervalField(clusterID, bucketSizeStr)
if err != nil {
log.Error(err)
panic(err)
}
query["size"] = 0
query["aggs"] = util.MapStr{
"dates": util.MapStr{
"date_histogram": util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs": aggs,
},
}
response, err := elastic.GetClient(clusterID).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
if err != nil {
log.Error(err)
panic(err)
}
var minDate, maxDate int64
if response.StatusCode == 200 {
for _, v := range response.Aggregations {
for _, bucket := range v.Buckets {
v, ok := bucket["key"].(float64)
if !ok {
panic("invalid bucket key")
}
dateTime := (int64(v))
minDate = util.MinInt64(minDate, dateTime)
maxDate = util.MaxInt64(maxDate, dateTime)
for mk1, mv1 := range metricData {
v1, ok := bucket[mk1]
if ok {
v2, ok := v1.(map[string]interface{})
if ok {
v3, ok := v2["value"].(float64)
if ok {
if strings.HasSuffix(mk1, "_deriv") {
if _, ok := bucket[mk1+"_field2"]; !ok {
v3 = v3 / float64(bucketSize)
}
}
if field2, ok := bucket[mk1+"_field2"]; ok {
if line, ok := metricItemsMap[mk1]; ok {
if field2Map, ok := field2.(map[string]interface{}); ok {
v4 := field2Map["value"].(float64)
if v4 == 0 {
v3 = 0
} else {
v3 = line.Metric.Calc(v3, v4)
}
}
}
}
if v3 < 0 {
continue
}
points := []interface{}{dateTime, v3}
metricData[mk1] = append(mv1, points)
}
}
}
}
}
}
}
result := map[string]*common.MetricItem{}
for _, metricItem := range metricItems {
for _, line := range metricItem.Lines {
line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
line.Data = metricData[line.Metric.GetDataKey()]
}
result[metricItem.Key] = metricItem
}
return result
}
//func (h *APIHandler) executeQuery(query map[string]interface{}, bucketItems *[]common.BucketItem, bucketSize int) map[string]*common.MetricItem {
// response, err := elastic.GetClient(h.Config.Elasticsearch).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
//
//}
func (h *APIHandler) getBucketMetrics(query map[string]interface{}, bucketItems *[]common.BucketItem, bucketSize int) map[string]*common.MetricItem {
//bucketSizeStr := fmt.Sprintf("%vs", bucketSize)
response, err := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID)).SearchWithRawQueryDSL(getAllMetricsIndex(), util.MustToJSONBytes(query))
if err != nil {
log.Error(err)
panic(err)
}
//grpMetricItemsIndex := map[string]int{}
for _, item := range *bucketItems {
//grpMetricItemsIndex[item.Key] = i
agg, ok := response.Aggregations[item.Key]
if ok {
fmt.Println(len(agg.Buckets))
}
}
//grpMetricData := map[string]MetricData{}
//var minDate, maxDate int64
//if response.StatusCode == 200 {
// if nodeAgg, ok := response.Aggregations["group_by_level"]; ok {
// for _, bucket := range nodeAgg.Buckets {
// grpKey := bucket["key"].(string)
// for _, metricItem := range *bucketItems {
// metricItem.MetricItem.AddLine(metricItem.Key, grpKey, "", "group1", metricItem.Field, "max", bucketSizeStr, metricItem.Units, metricItem.FormatType, "0.[00]", "0.[00]", false, false)
// dataKey := metricItem.Key
// if metricItem.IsDerivative {
// dataKey = dataKey + "_deriv"
// }
// if _, ok := grpMetricData[dataKey]; !ok {
// grpMetricData[dataKey] = map[string][][]interface{}{}
// }
// grpMetricData[dataKey][grpKey] = [][]interface{}{}
// }
// if datesAgg, ok := bucket["dates"].(map[string]interface{}); ok {
// if datesBuckets, ok := datesAgg["buckets"].([]interface{}); ok {
// for _, dateBucket := range datesBuckets {
// if bucketMap, ok := dateBucket.(map[string]interface{}); ok {
// v, ok := bucketMap["key"].(float64)
// if !ok {
// panic("invalid bucket key")
// }
// dateTime := (int64(v))
// minDate = util.MinInt64(minDate, dateTime)
// maxDate = util.MaxInt64(maxDate, dateTime)
//
// for mk1, mv1 := range grpMetricData {
// v1, ok := bucketMap[mk1]
// if ok {
// v2, ok := v1.(map[string]interface{})
// if ok {
// v3, ok := v2["value"].(float64)
// if ok {
// if strings.HasSuffix(mk1, "_deriv") {
// v3 = v3 / float64(bucketSize)
// }
// if field2, ok := bucketMap[mk1+"_field2"]; ok {
// if idx, ok := grpMetricItemsIndex[mk1]; ok {
// if field2Map, ok := field2.(map[string]interface{}); ok {
// v3 = grpMetricItems[idx].Calc(v3, field2Map["value"].(float64))
// }
// }
// }
// if v3 < 0 {
// continue
// }
// points := []interface{}{dateTime, v3}
// mv1[grpKey] = append(mv1[grpKey], points)
// }
// }
// }
// }
// }
// }
// }
//
// }
// }
// }
//}
//
//result := map[string]*common.MetricItem{}
//
//for _, metricItem := range grpMetricItems {
// for _, line := range metricItem.MetricItem.Lines {
// line.TimeRange = common.TimeRange{Min: minDate, Max: maxDate}
// dataKey := metricItem.ID
// if metricItem.IsDerivative {
// dataKey = dataKey + "_deriv"
// }
// line.Data = grpMetricData[dataKey][line.ElasticsearchMetric.Label]
// }
// result[metricItem.Key] = metricItem.MetricItem
//}
return nil
}
func ConvertMetricItemsToAggQuery(metricItems []*common.MetricItem) map[string]interface{} {
aggs := map[string]interface{}{}
for _, metricItem := range metricItems {
for _, line := range metricItem.Lines {
aggs[line.Metric.ID] = util.MapStr{
"max": util.MapStr{
"field": line.Metric.Field,
},
}
if line.Metric.IsDerivative {
//add which metric keys to extract
aggs[line.Metric.ID+"_deriv"] = util.MapStr{
"derivative": util.MapStr{
"buckets_path": line.Metric.ID,
},
}
}
}
}
return aggs
}
func ConvertBucketItemsToAggQuery(bucketItems []*common.BucketItem, metricItems []*common.MetricItem) util.MapStr {
aggs := util.MapStr{}
var currentAgg = util.MapStr{}
for _, bucketItem := range bucketItems {
bucketAgg := util.MapStr{}
switch bucketItem.Type {
case "terms":
bucketAgg = util.MapStr{
"terms": bucketItem.Parameters,
}
break
case "date_histogram":
bucketAgg = util.MapStr{
"date_histogram": bucketItem.Parameters,
}
break
case "date_range":
bucketAgg = util.MapStr{
"date_range": bucketItem.Parameters,
}
break
}
//if bucketItem.Buckets!=nil&&len(bucketItem.Buckets)>0{
nestedAggs := ConvertBucketItemsToAggQuery(bucketItem.Buckets, bucketItem.Metrics)
if len(nestedAggs) > 0 {
util.MergeFields(bucketAgg, nestedAggs, true)
}
//}
currentAgg[bucketItem.Key] = bucketAgg
}
if metricItems != nil && len(metricItems) > 0 {
metricAggs := ConvertMetricItemsToAggQuery(metricItems)
util.MergeFields(currentAgg, metricAggs, true)
}
aggs = util.MapStr{
"aggs": currentAgg,
}
return aggs
}
type BucketBase map[string]interface{}
func (receiver BucketBase) GetChildBucket(name string) (map[string]interface{}, bool) {
bks, ok := receiver[name]
if ok {
bks2, ok := bks.(map[string]interface{})
return bks2, ok
}
return nil, false
}
type Bucket struct {
BucketBase //子 buckets
KeyAsString string `json:"key_as_string,omitempty"`
Key interface{} `json:"key,omitempty"`
DocCount int64 `json:"doc_count,omitempty"`
DocCountErrorUpperBound int64 `json:"doc_count_error_upper_bound,omitempty"`
SumOtherDocCount int64 `json:"sum_other_doc_count,omitempty"`
Buckets []Bucket `json:"buckets,omitempty"` //本 buckets
}
type SearchResponse struct {
Took int `json:"took"`
TimedOut bool `json:"timed_out"`
Hits struct {
Total interface{} `json:"total"`
MaxScore float32 `json:"max_score"`
} `json:"hits"`
Aggregations util.MapStr `json:"aggregations,omitempty"`
}
func ParseAggregationBucketResult(bucketSize int, aggsData util.MapStr, groupKey, resultLabelKey, resultValueKey string, resultItemHandle func()) MetricData {
metricData := MetricData{}
for k, v := range aggsData {
if k == groupKey {
//start to collect metric for each bucket
objcs, ok := v.(map[string]interface{})
if ok {
bks, ok := objcs["buckets"].([]interface{})
if ok {
for _, bk := range bks {
//check each bucket, collecting metrics
bkMap, ok := bk.(map[string]interface{})
if ok {
groupKeyValue, ok := bkMap["key"]
if ok {
}
bkHitMap, ok := bkMap[resultLabelKey]
if ok {
//hit label, 说明匹配到时间范围了
labelMap, ok := bkHitMap.(map[string]interface{})
if ok {
labelBks, ok := labelMap["buckets"]
if ok {
labelBksMap, ok := labelBks.([]interface{})
if ok {
for _, labelItem := range labelBksMap {
metrics, ok := labelItem.(map[string]interface{})
labelKeyValue, ok := metrics["to"] //TODO config
if !ok {
labelKeyValue, ok = metrics["from"] //TODO config
}
if !ok {
labelKeyValue, ok = metrics["key"] //TODO config
}
metric, ok := metrics[resultValueKey]
if ok {
metricMap, ok := metric.(map[string]interface{})
if ok {
t := "bucket" //metric, bucket
if t == "metric" {
metricValue, ok := metricMap["value"]
if ok {
saveMetric(&metricData, groupKeyValue.(string), labelKeyValue, metricValue, bucketSize)
continue
}
} else {
metricValue, ok := metricMap["buckets"]
if ok {
buckets, ok := metricValue.([]interface{})
if ok {
var result string = "unavailable"
for _, v := range buckets {
x, ok := v.(map[string]interface{})
if ok {
if x["key"] == "red" {
result = "red"
break
}
if x["key"] == "yellow" {
result = "yellow"
} else {
if result != "yellow" {
result = x["key"].(string)
}
}
}
}
v, ok := (metricData)[groupKeyValue.(string)]
if !ok {
v = [][]interface{}{}
}
v2 := []interface{}{}
v2 = append(v2, labelKeyValue)
v2 = append(v2, result)
v = append(v, v2)
(metricData)[groupKeyValue.(string)] = v
}
continue
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
return metricData
}
func ParseAggregationResult(bucketSize int, aggsData util.MapStr, groupKey, metricLabelKey, metricValueKey string) MetricData {
metricData := MetricData{}
//group bucket key: key1, 获取 key 的 buckets 作为分组的内容 map[group][]{LabelMetricValue}
//metric Label Key: key2, 获取其 key 作为 时间指标
//metric Value Key: c7qgjrqi4h92sqdaa9b0, 获取其 value 作为 point 内容
//groupKey:="key1"
//metricLabelKey:="key2"
//metricValueKey:="c7qi5hii4h935v9bs920"
//fmt.Println(groupKey," => ",metricLabelKey," => ",metricValueKey)
for k, v := range aggsData {
//fmt.Println("k:",k)
//fmt.Println("v:",v)
if k == groupKey {
//fmt.Println("hit group key")
//start to collect metric for each bucket
objcs, ok := v.(map[string]interface{})
if ok {
bks, ok := objcs["buckets"].([]interface{})
if ok {
for _, bk := range bks {
//check each bucket, collecting metrics
//fmt.Println("check bucket:",bk)
bkMap, ok := bk.(map[string]interface{})
if ok {
groupKeyValue, ok := bkMap["key"]
if ok {
//fmt.Println("collecting bucket::",groupKeyValue)
}
bkHitMap, ok := bkMap[metricLabelKey]
if ok {
//hit label, 说明匹配到时间范围了
labelMap, ok := bkHitMap.(map[string]interface{})
if ok {
//fmt.Println("bkHitMap",bkHitMap)
labelBks, ok := labelMap["buckets"]
if ok {
labelBksMap, ok := labelBks.([]interface{})
//fmt.Println("get label buckets",ok)
if ok {
//fmt.Println("get label buckets",ok)
for _, labelItem := range labelBksMap {
metrics, ok := labelItem.(map[string]interface{})
//fmt.Println(labelItem)
labelKeyValue, ok := metrics["key"]
if ok {
//fmt.Println("collecting metric label::",int64(labelKeyValue.(float64)))
}
metric, ok := metrics[metricValueKey]
if ok {
metricMap, ok := metric.(map[string]interface{})
if ok {
metricValue, ok := metricMap["value"]
if ok {
//fmt.Println("collecting metric value::",metricValue.(float64))
saveMetric(&metricData, groupKeyValue.(string), labelKeyValue, metricValue, bucketSize)
continue
}
}
}
}
}
}
}
}
}
}
}
}
}
}
//for k,v:=range bucketItems{
// fmt.Println("k:",k)
// fmt.Println("v:",v)
// aggObect:=aggsData[v.Key]
// fmt.Println("",aggObect)
// //fmt.Println(len(aggObect.Buckets))
// //for _,bucket:=range aggObect.Buckets{
// // fmt.Println(bucket.Key)
// // fmt.Println(bucket.GetChildBucket("key2"))
// // //children,ok:=bucket.GetChildBucket()
// // //if ok{
// // //
// // //}
// //}
//}
return metricData
}
func saveMetric(metricData *MetricData, group string, label, value interface{}, bucketSize int) {
if value == nil {
return
}
v3, ok := value.(float64)
if ok {
value = v3 / float64(bucketSize)
}
v, ok := (*metricData)[group]
if !ok {
v = [][]interface{}{}
}
v2 := []interface{}{}
v2 = append(v2, label)
v2 = append(v2, value)
v = append(v, v2)
(*metricData)[group] = v
//fmt.Printf("save:%v, %v=%v\n",group,label,value)
}
func parseHealthMetricData(buckets []elastic.BucketBase) ([]interface{}, error) {
metricData := []interface{}{}
var minDate, maxDate int64
for _, bucket := range buckets {
v, ok := bucket["key"].(float64)
if !ok {
log.Error("invalid bucket key")
return nil, fmt.Errorf("invalid bucket key")
}
dateTime := int64(v)
minDate = util.MinInt64(minDate, dateTime)
maxDate = util.MaxInt64(maxDate, dateTime)
totalCount := bucket["doc_count"].(float64)
if grpStatus, ok := bucket["group_status"].(map[string]interface{}); ok {
if statusBks, ok := grpStatus["buckets"].([]interface{}); ok {
for _, statusBk := range statusBks {
if bkMap, ok := statusBk.(map[string]interface{}); ok {
statusKey := bkMap["key"].(string)
count := bkMap["doc_count"].(float64)
metricData = append(metricData, map[string]interface{}{
"x": dateTime,
"y": count / totalCount * 100,
"g": statusKey,
})
}
}
}
}
}
return metricData, nil
}

View File

@ -0,0 +1,92 @@
package v1
import (
"fmt"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
"net/http"
"testing"
"time"
)
func TestGetMetricParams(t *testing.T) {
handler:=APIHandler{}
req:=http.Request{}
bucketSize, min, max, err:=handler.getMetricRangeAndBucketSize(&req,60,15)
fmt.Println(bucketSize)
fmt.Println(util.FormatUnixTimestamp(min/1000))//2022-01-27 15:28:57
fmt.Println(util.FormatUnixTimestamp(max/1000))//2022-01-27 15:28:57
fmt.Println(time.Now())//2022-01-27 15:28:57
fmt.Println(bucketSize, min, max, err)
}
func TestConvertBucketItemsToAggQueryParams(t *testing.T) {
bucketItem:=common.BucketItem{}
bucketItem.Key="key1"
bucketItem.Type=common.TermsBucket
bucketItem.Parameters=map[string]interface{}{}
bucketItem.Parameters["field"]="metadata.labels.cluster_id"
bucketItem.Parameters["size"]=2
nestBucket:=common.BucketItem{}
nestBucket.Key="key2"
nestBucket.Type=common.DateHistogramBucket
nestBucket.Parameters=map[string]interface{}{}
nestBucket.Parameters["field"]="timestamp"
nestBucket.Parameters["calendar_interval"]="1d"
nestBucket.Parameters["time_zone"]="+08:00"
leafBucket:=common.NewBucketItem(common.TermsBucket,util.MapStr{
"size":5,
"field":"payload.elasticsearch.cluster_health.status",
})
leafBucket.Key="key3"
metricItems:=[]*common.MetricItem{}
var bucketSizeStr ="10s"
metricItem:=newMetricItem("cluster_summary", 2, "cluster")
metricItem.Key="key4"
metricItem.AddLine("Indexing","Total Indexing","Number of documents being indexed for primary and replica shards.","group1",
"payload.elasticsearch.index_stats.total.indexing.index_total","max",bucketSizeStr,"doc/s","num","0,0.[00]","0,0.[00]",false,true)
metricItem.AddLine("Search","Total Search","Number of search requests being executed across primary and replica shards. A single search can run against multiple shards!","group1",
"payload.elasticsearch.index_stats.total.search.query_total","max",bucketSizeStr,"query/s","num","0,0.[00]","0,0.[00]",false,true)
metricItems=append(metricItems,metricItem)
nestBucket.AddNestBucket(leafBucket)
nestBucket.Metrics=metricItems
bucketItem.Buckets=[]*common.BucketItem{}
bucketItem.Buckets=append(bucketItem.Buckets,&nestBucket)
aggs:=ConvertBucketItemsToAggQuery([]*common.BucketItem{&bucketItem},nil)
fmt.Println(util.MustToJSON(aggs))
response:="{ \"took\": 37, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 10000, \"relation\": \"gte\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"key1\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7pqhptj69a0sg3rn05g\", \"doc_count\": 80482, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-28T00:00:00.000+08:00\", \"key\": 1643299200000, \"doc_count\": 14310, \"c7qi5hii4h935v9bs91g\": { \"value\": 15680 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 2985 } }, { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 66172, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 }, \"c7qi5hii4h935v9bs91g_deriv\": { \"value\": 90526 }, \"c7qi5hii4h935v9bs920_deriv\": { \"value\": 17219 } } ] } }, { \"key\": \"c7qi42ai4h92sksk979g\", \"doc_count\": 660, \"key2\": { \"buckets\": [ { \"key_as_string\": \"2022-01-29T00:00:00.000+08:00\", \"key\": 1643385600000, \"doc_count\": 660, \"c7qi5hii4h935v9bs91g\": { \"value\": 106206 }, \"key3\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [] }, \"c7qi5hii4h935v9bs920\": { \"value\": 20204 } } ] } } ] } } }"
res:=SearchResponse{}
util.FromJSONBytes([]byte(response),&res)
fmt.Println(response)
groupKey:="key1"
metricLabelKey:="key2"
metricValueKey:="c7qi5hii4h935v9bs920"
data:=ParseAggregationResult(int(10),res.Aggregations,groupKey,metricLabelKey,metricValueKey)
fmt.Println(data)
}
func TestConvertBucketItems(t *testing.T) {
response:="{ \"took\": 8, \"timed_out\": false, \"_shards\": { \"total\": 1, \"successful\": 1, \"skipped\": 0, \"failed\": 0 }, \"hits\": { \"total\": { \"value\": 81, \"relation\": \"eq\" }, \"max_score\": null, \"hits\": [] }, \"aggregations\": { \"c7v2gm3i7638vvo4pv80\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"c7uv7p3i76360kgdmpb0\", \"doc_count\": 81, \"c7v2gm3i7638vvo4pv8g\": { \"buckets\": [ { \"key_as_string\": \"2022-02-05T00:00:00.000+08:00\", \"key\": 1643990400000, \"doc_count\": 81, \"c7v2gm3i7638vvo4pv90\": { \"doc_count_error_upper_bound\": 0, \"sum_other_doc_count\": 0, \"buckets\": [ { \"key\": \"yellow\", \"doc_count\": 81 } ] } } ] } } ] } } }"
res:=SearchResponse{}
util.FromJSONBytes([]byte(response),&res)
data:=ParseAggregationBucketResult(int(10),res.Aggregations,"c7v2gm3i7638vvo4pv80","c7v2gm3i7638vvo4pv8g","c7v2gm3i7638vvo4pv90", func() {
})
fmt.Println(data)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,539 @@
package v1
import (
"fmt"
log "github.com/cihub/seelog"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
"strings"
)
const (
ThreadPoolGetGroupKey = "thread_pool_get"
ThreadPoolSearchGroupKey = "thread_pool_search"
ThreadPoolFlushGroupKey = "thread_pool_flush"
ThreadPoolRefreshGroupKey = "thread_pool_refresh"
ThreadPoolWriteGroupKey = "thread_pool_write"
ThreadPoolForceMergeGroupKey = "thread_pool_force_merge"
ThreadPoolIndexGroupKey = "thread_pool_index"
ThreadPoolBulkGroupKey = "thread_pool_bulk"
)
func (h *APIHandler) getThreadPoolMetrics(clusterID string, bucketSize int, min, max int64, nodeName string, top int) map[string]*common.MetricItem{
bucketSizeStr:=fmt.Sprintf("%vs",bucketSize)
var must = []util.MapStr{
{
"term":util.MapStr{
"metadata.labels.cluster_id":util.MapStr{
"value": clusterID,
},
},
},
{
"term": util.MapStr{
"metadata.category": util.MapStr{
"value": "elasticsearch",
},
},
},
{
"term": util.MapStr{
"metadata.name": util.MapStr{
"value": "node_stats",
},
},
},
}
var (
nodeNames []string
err error
)
if nodeName != "" {
nodeNames = strings.Split(nodeName, ",")
top = len(nodeNames)
}else{
nodeNames, err = h.getTopNodeName(clusterID, top, 15)
if err != nil {
log.Error(err)
}
}
if len(nodeNames) > 0 {
must = append(must, util.MapStr{
"terms": util.MapStr{
"metadata.labels.transport_address": nodeNames,
},
})
}
query:=map[string]interface{}{}
query["query"]=util.MapStr{
"bool": util.MapStr{
"must": must,
"filter": []util.MapStr{
{
"range": util.MapStr{
"timestamp": util.MapStr{
"gte": min,
"lte": max,
},
},
},
},
},
}
searchThreadsMetric := newMetricItem("search_threads", 1, ThreadPoolSearchGroupKey)
searchThreadsMetric.AddAxi("Search Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems := []GroupMetricItem{
{
Key: "search_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.search.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchThreadsMetric,
FormatType: "num",
Units: "",
},
}
searchQueueMetric := newMetricItem("search_queue", 1, ThreadPoolSearchGroupKey)
searchQueueMetric.AddAxi("Search Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.search.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchQueueMetric,
FormatType: "num",
Units: "",
})
searchActiveMetric := newMetricItem("search_active", 1, ThreadPoolSearchGroupKey)
searchActiveMetric.AddAxi("Search Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_active",
Field: "payload.elasticsearch.node_stats.thread_pool.search.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: searchActiveMetric,
FormatType: "num",
Units: "",
})
searchRejectedMetric := newMetricItem("search_rejected", 1, ThreadPoolSearchGroupKey)
searchRejectedMetric.AddAxi("Search Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "search_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.search.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: searchRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
getThreadsMetric := newMetricItem("get_threads", 1, ThreadPoolGetGroupKey)
getThreadsMetric.AddAxi("Get Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.get.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getThreadsMetric,
FormatType: "num",
Units: "",
})
getQueueMetric := newMetricItem("get_queue", 1, ThreadPoolGetGroupKey)
getQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.get.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getQueueMetric,
FormatType: "num",
Units: "",
})
getActiveMetric := newMetricItem("get_active", 1, ThreadPoolGetGroupKey)
getActiveMetric.AddAxi("Get Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_active",
Field: "payload.elasticsearch.node_stats.thread_pool.get.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: getActiveMetric,
FormatType: "num",
Units: "",
})
getRejectedMetric := newMetricItem("get_rejected", 1, ThreadPoolGetGroupKey)
getRejectedMetric.AddAxi("Get Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "get_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.get.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: getRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
flushThreadsMetric := newMetricItem("flush_threads", 1, ThreadPoolFlushGroupKey)
flushThreadsMetric.AddAxi("Flush Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushThreadsMetric,
FormatType: "num",
Units: "",
})
flushQueueMetric := newMetricItem("flush_queue", 1, ThreadPoolFlushGroupKey)
flushQueueMetric.AddAxi("Get Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushQueueMetric,
FormatType: "num",
Units: "",
})
flushActiveMetric := newMetricItem("flush_active", 1, ThreadPoolFlushGroupKey)
flushActiveMetric.AddAxi("Flush Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_active",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: flushActiveMetric,
FormatType: "num",
Units: "",
})
flushRejectedMetric := newMetricItem("flush_rejected", 1, ThreadPoolFlushGroupKey)
flushRejectedMetric.AddAxi("Flush Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "flush_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.flush.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: flushRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
majorVersion := elastic.GetMetadata(clusterID).GetMajorVersion()
ver := elastic.GetClient(clusterID).GetVersion()
if (ver.Distribution == "" || ver.Distribution == elastic.Elasticsearch) && majorVersion < 6{
indexThreadsMetric := newMetricItem("index_threads", 1, ThreadPoolIndexGroupKey)
indexThreadsMetric.AddAxi("Index Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.index.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexThreadsMetric,
FormatType: "num",
Units: "",
})
indexQueueMetric := newMetricItem("index_queue", 1, ThreadPoolIndexGroupKey)
indexQueueMetric.AddAxi("Index Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.index.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexQueueMetric,
FormatType: "num",
Units: "",
})
indexActiveMetric := newMetricItem("index_active", 1, ThreadPoolIndexGroupKey)
indexActiveMetric.AddAxi("Index Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_active",
Field: "payload.elasticsearch.node_stats.thread_pool.index.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: indexActiveMetric,
FormatType: "num",
Units: "",
})
indexRejectedMetric := newMetricItem("index_rejected", 1, ThreadPoolIndexGroupKey)
indexRejectedMetric.AddAxi("Index Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "index_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.index.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: indexRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
bulkThreadsMetric := newMetricItem("bulk_threads", 1, ThreadPoolBulkGroupKey)
bulkThreadsMetric.AddAxi("Bulk Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkThreadsMetric,
FormatType: "num",
Units: "",
})
bulkQueueMetric := newMetricItem("bulk_queue", 1, ThreadPoolBulkGroupKey)
bulkQueueMetric.AddAxi("Bulk Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkQueueMetric,
FormatType: "num",
Units: "",
})
bulkActiveMetric := newMetricItem("bulk_active", 1, ThreadPoolBulkGroupKey)
bulkActiveMetric.AddAxi("Bulk Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_active",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: bulkActiveMetric,
FormatType: "num",
Units: "",
})
bulkRejectedMetric := newMetricItem("bulk_rejected", 1, ThreadPoolBulkGroupKey)
bulkRejectedMetric.AddAxi("Bulk Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "bulk_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.bulk.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: bulkRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
}else {
writeThreadsMetric := newMetricItem("write_threads", 1, ThreadPoolWriteGroupKey)
writeThreadsMetric.AddAxi("Write Threads Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.write.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeThreadsMetric,
FormatType: "num",
Units: "",
})
writeQueueMetric := newMetricItem("write_queue", 1, ThreadPoolWriteGroupKey)
writeQueueMetric.AddAxi("Write Queue Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.write.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeQueueMetric,
FormatType: "num",
Units: "",
})
writeActiveMetric := newMetricItem("write_active", 1, ThreadPoolWriteGroupKey)
writeActiveMetric.AddAxi("Write Active Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_active",
Field: "payload.elasticsearch.node_stats.thread_pool.write.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: writeActiveMetric,
FormatType: "num",
Units: "",
})
writeRejectedMetric := newMetricItem("write_rejected", 1, ThreadPoolWriteGroupKey)
writeRejectedMetric.AddAxi("Write Rejected Count", "group1", common.PositionLeft, "num", "0.[0]", "0.[0]", 5, true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "write_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.write.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: writeRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
}
refreshThreadsMetric := newMetricItem("refresh_threads", 1, ThreadPoolRefreshGroupKey)
refreshThreadsMetric.AddAxi("Refresh Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshThreadsMetric,
FormatType: "num",
Units: "",
})
refreshQueueMetric := newMetricItem("refresh_queue", 1, ThreadPoolRefreshGroupKey)
refreshQueueMetric.AddAxi("Refresh Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshQueueMetric,
FormatType: "num",
Units: "",
})
refreshActiveMetric := newMetricItem("refresh_active", 1, ThreadPoolRefreshGroupKey)
refreshActiveMetric.AddAxi("Refresh Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_active",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: refreshActiveMetric,
FormatType: "num",
Units: "",
})
refreshRejectedMetric := newMetricItem("refresh_rejected", 1, ThreadPoolRefreshGroupKey)
refreshRejectedMetric.AddAxi("Refresh Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "refresh_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.refresh.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: refreshRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
forceMergeThreadsMetric := newMetricItem("force_merge_threads", 1, ThreadPoolForceMergeGroupKey)
forceMergeThreadsMetric.AddAxi("Force Merge Threads Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_threads",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.threads",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeThreadsMetric,
FormatType: "num",
Units: "",
})
forceMergeQueueMetric := newMetricItem("force_merge_queue", 1, ThreadPoolForceMergeGroupKey)
forceMergeQueueMetric.AddAxi("Force Merge Queue Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_queue",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.queue",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeQueueMetric,
FormatType: "num",
Units: "",
})
forceMergeActiveMetric := newMetricItem("force_merge_active", 1, ThreadPoolForceMergeGroupKey)
forceMergeActiveMetric.AddAxi("Force Merge Active Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_active",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.active",
ID: util.GetUUID(),
IsDerivative: false,
MetricItem: forceMergeActiveMetric,
FormatType: "num",
Units: "",
})
forceMergeRejectedMetric := newMetricItem("force_merge_rejected", 1, ThreadPoolForceMergeGroupKey)
forceMergeRejectedMetric.AddAxi("Force Merge Rejected Count","group1",common.PositionLeft,"num","0.[0]","0.[0]",5,true)
queueMetricItems = append(queueMetricItems, GroupMetricItem{
Key: "force_merge_rejected",
Field: "payload.elasticsearch.node_stats.thread_pool.force_merge.rejected",
ID: util.GetUUID(),
IsDerivative: true,
MetricItem: forceMergeRejectedMetric,
FormatType: "num",
Units: "rejected/s",
})
//Get Thread Pool queue
aggs:=map[string]interface{}{}
for _,metricItem:=range queueMetricItems{
aggs[metricItem.ID]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2"]=util.MapStr{
"max":util.MapStr{
"field": metricItem.Field2,
},
}
}
if metricItem.IsDerivative{
aggs[metricItem.ID+"_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID,
},
}
if metricItem.Field2 != "" {
aggs[metricItem.ID + "_field2_deriv"]=util.MapStr{
"derivative":util.MapStr{
"buckets_path": metricItem.ID + "_field2",
},
}
}
}
}
intervalField, err := getDateHistogramIntervalField(global.MustLookupString(elastic.GlobalSystemElasticsearchID), bucketSizeStr)
if err != nil {
log.Error(err)
panic(err)
}
query["size"]=0
query["aggs"]= util.MapStr{
"group_by_level": util.MapStr{
"terms": util.MapStr{
"field": "metadata.labels.transport_address",
"size": top,
},
"aggs": util.MapStr{
"dates": util.MapStr{
"date_histogram":util.MapStr{
"field": "timestamp",
intervalField: bucketSizeStr,
},
"aggs":aggs,
},
},
},
}
return h.getMetrics(query, queueMetricItems, bucketSize)
}

500
modules/elastic/api/view.go Normal file
View File

@ -0,0 +1,500 @@
package api
import (
"fmt"
log "github.com/cihub/seelog"
"github.com/segmentio/encoding/json"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/radix"
"infini.sh/framework/core/util"
"net/http"
"strconv"
"strings"
"time"
)
func (h *APIHandler) HandleCreateViewAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, _, err := h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
var viewReq = &elastic.ViewRequest{}
err = h.DecodeJSON(req, viewReq)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
id := util.GetUUID()
viewReq.Attributes.UpdatedAt = time.Now()
viewReq.Attributes.ClusterID = targetClusterID
_, err = esClient.Index(orm.GetIndexName(viewReq.Attributes), "", id, viewReq.Attributes, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
resBody = map[string]interface{}{
"id": id,
"type": "index-pattern",
"updated_at": viewReq.Attributes.UpdatedAt,
"attributes": viewReq.Attributes,
"namespaces": []string{"default"},
}
h.WriteJSON(w, resBody, http.StatusOK)
}
func (h *APIHandler) HandleGetViewListAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
strSize := h.GetParameterOrDefault(req, "per_page", "10000")
size, _ := strconv.Atoi(strSize)
search := h.GetParameterOrDefault(req, "search", "")
if search != "" {
search = fmt.Sprintf(`,{"match":{"title":%s}}`, search)
}
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
queryDSL := []byte(fmt.Sprintf(`{"_source":["title","viewName", "updated_at"],"size": %d, "query":{"bool":{"must":[{"match":{"cluster_id":"%s"}}%s]}}}`, size, targetClusterID, search))
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetIndexName(elastic.View{}), queryDSL)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
var total = len(searchRes.Hits.Hits)
if totalVal, ok := searchRes.Hits.Total.(map[string]interface{}); ok {
total = int(totalVal["value"].(float64))
}
resBody = map[string]interface{}{
"per_page": size,
"total": total,
}
var savedObjects = make([]map[string]interface{}, 0, len(searchRes.Hits.Hits))
for _, hit := range searchRes.Hits.Hits {
var savedObject = map[string]interface{}{
"id": hit.ID,
"attributes": map[string]interface{}{
"title": hit.Source["title"],
"viewName": hit.Source["viewName"],
},
"score": 0,
"type": "index-pattern",
"namespaces": []string{"default"},
"updated_at": hit.Source["updated_at"],
}
savedObjects = append(savedObjects, savedObject)
}
resBody["saved_objects"] = savedObjects
h.WriteJSON(w, resBody, http.StatusOK)
}
func (h *APIHandler) HandleDeleteViewAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
viewID := ps.ByName("view_id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
_, err := esClient.Delete(orm.GetIndexName(elastic.View{}), "", viewID, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
h.WriteJSON(w, resBody, http.StatusOK)
}
func (h *APIHandler) HandleResolveIndexAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
wild := ps.ByName("wild")
//wild = strings.ReplaceAll(wild, "*", "")
exists, client, err := h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
allowedIndices, hasAllPrivilege := h.GetAllowedIndices(req, targetClusterID)
if !hasAllPrivilege && len(allowedIndices) == 0 {
h.WriteJSON(w, elastic.AliasAndIndicesResponse{
Aliases: []elastic.AAIR_Alias{},
Indices: []elastic.AAIR_Indices{},
}, http.StatusOK)
return
}
//ccs
if strings.Contains(wild, ":") {
q := util.MapStr{
"size": 0,
"aggs": util.MapStr{
"indices": util.MapStr{
"terms": util.MapStr{
"field": "_index",
"size": 200,
},
},
},
}
searchRes, err := client.SearchWithRawQueryDSL(wild, util.MustToJSONBytes(q))
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
indices := []elastic.AAIR_Indices{}
parts := strings.SplitN(wild, ":", 2)
if parts[1] == "" {
wild = "*"
}
var filterPattern *radix.Pattern
if !hasAllPrivilege {
filterPattern = radix.Compile(allowedIndices...)
}
inputPattern := radix.Compile(wild)
if agg, ok := searchRes.Aggregations["indices"]; ok {
for _, bk := range agg.Buckets {
if k, ok := bk["key"].(string); ok {
if !hasAllPrivilege && !filterPattern.Match(k) {
continue
}
if inputPattern.Match(k) {
indices = append(indices, elastic.AAIR_Indices{
Name: k,
Attributes: []string{"open"},
})
}
}
}
}
h.WriteJSON(w, elastic.AliasAndIndicesResponse{
Aliases: []elastic.AAIR_Alias{},
Indices: indices,
}, http.StatusOK)
return
}
res, err := client.GetAliasesAndIndices()
if err != nil || res == nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if wild == "" {
wild = "*"
}
var filterPattern *radix.Pattern
if !hasAllPrivilege {
filterPattern = radix.Compile(allowedIndices...)
}
inputPattern := radix.Compile(wild)
var (
aliases = []elastic.AAIR_Alias{}
indices = []elastic.AAIR_Indices{}
)
for _, alias := range res.Aliases {
if !hasAllPrivilege && !filterPattern.Match(alias.Name) {
continue
}
if inputPattern.Match(alias.Name) {
aliases = append(aliases, alias)
}
}
for _, index := range res.Indices {
if !hasAllPrivilege && !filterPattern.Match(index.Name) {
continue
}
if inputPattern.Match(index.Name) {
indices = append(indices, index)
}
}
res.Indices = indices
res.Aliases = aliases
h.WriteJSON(w, res, http.StatusOK)
}
func (h *APIHandler) HandleBulkGetViewAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
var reqIDs = []struct {
ID string `json:"id"`
Type string `json:"type"`
}{}
err := h.DecodeJSON(req, &reqIDs)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
var strIDs []string
var indexNames []string
for _, reqID := range reqIDs {
if reqID.Type == "view" {
strIDs = append(strIDs, fmt.Sprintf(`"%s"`, reqID.ID))
} else if reqID.Type == "index" {
indexNames = append(indexNames, reqID.ID)
}
}
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
esTragertClient := elastic.GetClient(targetClusterID)
queryDSL := []byte(fmt.Sprintf(`{"query": {"bool": {"must": [{"terms": {"_id": [%s]}},
{"match": {"cluster_id": "%s"}}]}}}`, strings.Join(strIDs, ","), targetClusterID))
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetIndexName(elastic.View{}), queryDSL)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
var savedObjects = make([]map[string]interface{}, 0, len(searchRes.Hits.Hits))
for _, hit := range searchRes.Hits.Hits {
var savedObject = map[string]interface{}{
"id": hit.ID,
"attributes": map[string]interface{}{
"title": hit.Source["title"],
"fields": hit.Source["fields"],
"viewName": hit.Source["viewName"],
"timeFieldName": hit.Source["timeFieldName"],
"fieldFormatMap": hit.Source["fieldFormatMap"],
},
"score": 0,
"type": "view",
"namespaces": []string{"default"},
"migrationVersion": map[string]interface{}{"index-pattern": "7.6.0"},
"updated_at": hit.Source["updated_at"],
}
savedObjects = append(savedObjects, savedObject)
}
//index mock
for _, indexName := range indexNames {
fields, err := elastic.GetFieldCaps(esTragertClient, indexName, []string{"_source", "_id", "_type", "_index"})
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
bufFields, _ := json.Marshal(fields)
var savedObject = map[string]interface{}{
"id": indexName, //fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s-%s", targetClusterID,indexName)))),
"attributes": map[string]interface{}{
"title": indexName,
"fields": string(bufFields),
"viewName": indexName,
"timeFieldName": "",
"fieldFormatMap": "",
},
"score": 0,
"type": "index",
"namespaces": []string{"default"},
"migrationVersion": map[string]interface{}{"index-pattern": "7.6.0"},
"updated_at": time.Now(),
}
savedObjects = append(savedObjects, savedObject)
}
resBody["saved_objects"] = savedObjects
h.WriteJSON(w, resBody, http.StatusOK)
}
func (h *APIHandler) HandleUpdateViewAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
exists, _, err := h.GetClusterClient(targetClusterID)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if !exists {
resBody["error"] = fmt.Sprintf("cluster [%s] not found", targetClusterID)
log.Error(resBody["error"])
h.WriteJSON(w, resBody, http.StatusNotFound)
return
}
var viewReq = &elastic.ViewRequest{}
err = h.DecodeJSON(req, viewReq)
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if viewReq.Attributes.Title == "" {
resBody["error"] = "miss title"
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
id := ps.ByName("view_id")
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
viewReq.Attributes.UpdatedAt = time.Now()
viewReq.Attributes.ClusterID = targetClusterID
_, err = esClient.Index(orm.GetIndexName(viewReq.Attributes), "", id, viewReq.Attributes, "wait_for")
if err != nil {
log.Error(err)
resBody["error"] = err
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
h.WriteJSON(w, viewReq.Attributes, http.StatusOK)
}
func (h *APIHandler) HandleGetFieldCapsAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
resBody := map[string]interface{}{}
targetClusterID := ps.ByName("id")
pattern := h.GetParameterOrDefault(req, "pattern", "*")
keyword := h.GetParameterOrDefault(req, "keyword", "")
aggregatable := h.GetParameterOrDefault(req, "aggregatable", "")
size := h.GetIntOrDefault(req, "size", 0)
typ := h.GetParameterOrDefault(req, "type", "")
esType := h.GetParameterOrDefault(req, "es_type", "")
metaFields := req.URL.Query()["meta_fields"]
esClient := elastic.GetClient(targetClusterID)
kbnFields, err := elastic.GetFieldCaps(esClient, pattern, metaFields)
if err != nil {
log.Error(err)
resBody["error"] = err.Error()
h.WriteJSON(w, resBody, http.StatusInternalServerError)
return
}
if keyword != "" || aggregatable != "" || typ != "" || esType != "" || size > 0 {
var filteredFields []elastic.ElasticField
var count = 0
for _, field := range kbnFields {
if keyword != "" && !strings.Contains(field.Name, keyword) {
continue
}
if aggregatable == "true" && !field.Aggregatable {
continue
}
if typ != "" && field.Type != typ {
continue
}
if esType != "" && field.ESTypes[0] != esType {
continue
}
count++
if size > 0 && count > size {
break
}
filteredFields = append(filteredFields, field)
}
kbnFields = filteredFields
}
resBody["fields"] = kbnFields
h.WriteJSON(w, resBody, http.StatusOK)
}
func (h *APIHandler) HandleGetViewAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("view_id")
obj := elastic.View{}
obj.ID = id
exists, err := orm.Get(&obj)
if !exists || err != nil {
h.WriteJSON(w, util.MapStr{
"_id": id,
"found": false,
}, http.StatusNotFound)
return
}
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
log.Error(err)
return
}
h.WriteGetOKJSON(w, id, obj)
}
func (h *APIHandler) SetDefaultLayout(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var viewReq = &elastic.View{}
err := h.DecodeJSON(req, viewReq)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
id := ps.MustGetParameter("view_id")
viewObj := elastic.View{}
viewObj.ID = id
exists, err := orm.Get(&viewObj)
if !exists || err != nil {
h.WriteJSON(w, util.MapStr{
"_id": id,
"result": "not_found",
}, http.StatusNotFound)
return
}
viewObj.DefaultLayoutID = viewReq.DefaultLayoutID
ctx := &orm.Context{
Refresh: "wait_for",
}
err = orm.Update(ctx, &viewObj)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
log.Error(err)
return
}
h.WriteJSON(w, util.MapStr{
"success": true,
}, 200)
}

View File

@ -0,0 +1,206 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package api
import (
"fmt"
"golang.org/x/crypto/bcrypt"
rbac "infini.sh/console/core/security"
"infini.sh/console/modules/security/realm"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/util"
"net/http"
)
const userInSession = "user_session:"
// const SSOProvider = "sso"
const NativeProvider = "native"
//const LDAPProvider = "ldap"
func (h APIHandler) Logout(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
reqUser, err := rbac.FromUserContext(r.Context())
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
rbac.DeleteUserToken(reqUser.UserId)
h.WriteOKJSON(w, util.MapStr{
"status": "ok",
})
}
func (h APIHandler) Profile(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
reqUser, err := rbac.FromUserContext(r.Context())
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
if reqUser.Provider == NativeProvider {
user, err := h.User.Get(reqUser.UserId)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
if user.Nickname == "" {
user.Nickname = user.Username
}
u := util.MapStr{
"user_id": user.ID,
"name": user.Username,
"email": user.Email,
"nick_name": user.Nickname,
"phone": user.Phone,
}
h.WriteOKJSON(w, api.FoundResponse(reqUser.UserId, u))
} else {
//TODO fetch external profile
u := util.MapStr{
"user_id": reqUser.UserId,
"name": reqUser.Username,
"email": "", //TOOD, save user profile come from SSO
"nick_name": reqUser.Username, //TODO
"phone": "", //TODO
}
h.WriteOKJSON(w, api.FoundResponse(reqUser.UserId, u))
}
}
func (h APIHandler) UpdatePassword(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
reqUser, err := rbac.FromUserContext(r.Context())
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
var req struct {
OldPassword string `json:"old_password"`
NewPassword string `json:"new_password"`
}
err = h.DecodeJSON(r, &req)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
user, err := h.User.Get(reqUser.UserId)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(req.OldPassword))
if err == bcrypt.ErrMismatchedHashAndPassword {
h.ErrorInternalServer(w, "old password is not correct")
return
}
hash, err := bcrypt.GenerateFromPassword([]byte(req.NewPassword), bcrypt.DefaultCost)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
user.Password = string(hash)
err = h.User.Update(&user)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
h.WriteOKJSON(w, api.UpdateResponse(reqUser.UserId))
return
}
func (h APIHandler) UpdateProfile(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
reqUser, err := rbac.FromUserContext(r.Context())
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
var req struct {
Name string `json:"name"`
Phone string `json:"phone"`
Email string `json:"email"`
}
err = h.DecodeJSON(r, &req)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
user, err := h.User.Get(reqUser.UserId)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
user.Username = req.Name
user.Email = req.Email
user.Phone = req.Phone
err = h.User.Update(&user)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
h.WriteOKJSON(w, api.UpdateResponse(reqUser.UserId))
return
}
func (h APIHandler) Login(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var req struct {
Username string `json:"username"`
Password string `json:"password"`
}
err := h.DecodeJSON(r, &req)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
var user *rbac.User
//check user validation
ok, user, err := realm.Authenticate(req.Username, req.Password)
if err != nil {
h.WriteError(w, err.Error(), 500)
return
}
if !ok {
h.WriteError(w, "invalid username or password", 403)
return
}
if user == nil {
h.ErrorInternalServer(w, fmt.Sprintf("failed to authenticate user: %v", req.Username))
return
}
//check permissions
ok, err = realm.Authorize(user)
if err != nil || !ok {
h.ErrorInternalServer(w, fmt.Sprintf("failed to authorize user: %v", req.Username))
return
}
//fetch user profile
//TODO
if user.Nickname == "" {
user.Nickname = user.Username
}
//generate access token
token, err := rbac.GenerateAccessToken(user)
if err != nil {
h.ErrorInternalServer(w, fmt.Sprintf("failed to authorize user: %v", req.Username))
return
}
//api.SetSession(w, r, userInSession+req.Username, req.Username)
h.WriteOKJSON(w, token)
}

View File

@ -0,0 +1,47 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package api
import (
"infini.sh/console/core"
rbac "infini.sh/console/core/security"
"infini.sh/console/core/security/enum"
"infini.sh/framework/core/api"
)
type APIHandler struct {
core.Handler
rbac.Adapter
}
const adapterType = "native"
var apiHandler = APIHandler{Adapter: rbac.GetAdapter(adapterType)} //TODO handle hard coded
func Init() {
api.HandleAPIMethod(api.GET, "/permission/:type", apiHandler.RequireLogin(apiHandler.ListPermission))
api.HandleAPIMethod(api.POST, "/role/:type", apiHandler.RequirePermission(apiHandler.CreateRole, enum.RoleAllPermission...))
api.HandleAPIMethod(api.GET, "/role/:id", apiHandler.RequirePermission(apiHandler.GetRole, enum.RoleReadPermission...))
api.HandleAPIMethod(api.DELETE, "/role/:id", apiHandler.RequirePermission(apiHandler.DeleteRole, enum.RoleAllPermission...))
api.HandleAPIMethod(api.PUT, "/role/:id", apiHandler.RequirePermission(apiHandler.UpdateRole, enum.RoleAllPermission...))
api.HandleAPIMethod(api.GET, "/role/_search", apiHandler.RequirePermission(apiHandler.SearchRole, enum.RoleReadPermission...))
api.HandleAPIMethod(api.POST, "/user", apiHandler.RequirePermission(apiHandler.CreateUser, enum.UserAllPermission...))
api.HandleAPIMethod(api.GET, "/user/:id", apiHandler.RequirePermission(apiHandler.GetUser, enum.UserReadPermission...))
api.HandleAPIMethod(api.DELETE, "/user/:id", apiHandler.RequirePermission(apiHandler.DeleteUser, enum.UserAllPermission...))
api.HandleAPIMethod(api.PUT, "/user/:id", apiHandler.RequirePermission(apiHandler.UpdateUser, enum.UserAllPermission...))
api.HandleAPIMethod(api.GET, "/user/_search", apiHandler.RequirePermission(apiHandler.SearchUser, enum.UserReadPermission...))
api.HandleAPIMethod(api.PUT, "/user/:id/password", apiHandler.RequirePermission(apiHandler.UpdateUserPassword, enum.UserAllPermission...))
api.HandleAPIMethod(api.POST, "/account/login", apiHandler.Login)
api.HandleAPIMethod(api.POST, "/account/logout", apiHandler.Logout)
api.HandleAPIMethod(api.DELETE, "/account/logout", apiHandler.Logout)
api.HandleAPIMethod(api.GET, "/account/profile", apiHandler.RequireLogin(apiHandler.Profile))
api.HandleAPIMethod(api.PUT, "/account/password", apiHandler.RequireLogin(apiHandler.UpdatePassword))
}

View File

@ -0,0 +1,28 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package api
import (
log "github.com/cihub/seelog"
rbac "infini.sh/console/core/security"
httprouter "infini.sh/framework/core/api/router"
"net/http"
)
func (h APIHandler) ListPermission(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
typ := ps.MustGetParameter("type")
err := rbac.IsAllowRoleType(typ)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
var permissions interface{}
if typ == rbac.Elasticsearch {
permissions = rbac.GetPermissions(typ)
}
h.WriteOKJSON(w, permissions)
return
}

View File

@ -0,0 +1,186 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package api
import (
log "github.com/cihub/seelog"
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/util"
"net/http"
"time"
)
func (h APIHandler) CreateRole(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
roleType := ps.MustGetParameter("type")
//localUser, err := rbac.FromUserContext(r.Context())
//if err != nil {
// log.Error(err.Error())
// h.ErrorInternalServer(w, err.Error())
// return
//}
err := rbac.IsAllowRoleType(roleType)
if err != nil {
h.ErrorInternalServer(w, err.Error())
return
}
role := &rbac.Role{
Type: roleType,
}
err = h.DecodeJSON(r, role)
if err != nil {
h.Error400(w, err.Error())
return
}
if _, ok := rbac.RoleMap[role.Name]; ok {
h.ErrorInternalServer(w, "role name already exists")
return
}
now := time.Now()
role.Created = &now
role.Updated = role.Created
role.Type = roleType
var id string
id, err = h.Adapter.Role.Create(role)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
rbac.RoleMap[role.Name] = *role
_ = h.WriteOKJSON(w, api.CreateResponse(id))
return
}
func (h APIHandler) SearchRole(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var (
keyword = h.GetParameterOrDefault(r, "keyword", "")
from = h.GetIntOrDefault(r, "from", 0)
size = h.GetIntOrDefault(r, "size", 20)
)
res, err := h.Adapter.Role.Search(keyword, from, size)
if err != nil {
log.Error(err)
h.ErrorInternalServer(w, err.Error())
return
}
response := elastic.SearchResponse{}
util.FromJSONBytes(res.Raw, &response)
hits := response.Hits.Hits
list := make([]elastic.IndexDocument, 0)
total := response.GetTotal()
var index string
for _, v := range hits {
index = v.Index
}
for k, v := range rbac.BuiltinRoles {
mval := map[string]interface{}{}
vbytes := util.MustToJSONBytes(v)
util.MustFromJSONBytes(vbytes, &mval)
list = append(list, elastic.IndexDocument{
ID: k,
Index: index,
Type: "_doc",
Source: mval,
})
total++
}
list = append(list, hits...)
response.Hits.Hits = list
response.Hits.Total = total
h.WriteOKJSON(w, response)
return
}
func (h APIHandler) GetRole(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
role, err := h.Adapter.Role.Get(id)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
h.WriteOKJSON(w, api.Response{Hit: role})
return
}
func (h APIHandler) DeleteRole(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
//localUser, err := biz.FromUserContext(r.Context())
//if err != nil {
// log.Error(err.Error())
// h.ErrorInternalServer(w, err.Error())
// return
//}
oldRole, err := h.Role.Get(id)
if err != nil {
h.ErrorInternalServer(w, err.Error())
}
err = h.Adapter.Role.Delete(id)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
delete(rbac.RoleMap, oldRole.Name)
_ = h.WriteOKJSON(w, api.DeleteResponse(id))
return
}
func (h APIHandler) UpdateRole(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
//localUser, err := biz.FromUserContext(r.Context())
//if err != nil {
// log.Error(err.Error())
// h.ErrorInternalServer(w, err.Error())
// return
//}
role := &rbac.Role{}
err := h.DecodeJSON(r, role)
if err != nil {
h.Error400(w, err.Error())
return
}
role.ID = id
oldRole, err := h.Role.Get(id)
if err != nil {
log.Error(err)
h.ErrorInternalServer(w, err.Error())
return
}
if role.Name != oldRole.Name {
h.ErrorInternalServer(w, "Changing role name is not allowed")
return
}
now := time.Now()
role.Type = oldRole.Type
role.Updated = &now
role.Created = oldRole.Created
err = h.Role.Update(role)
delete(rbac.RoleMap, oldRole.Name)
rbac.RoleMap[role.Name] = *role
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
_ = h.WriteOKJSON(w, api.UpdateResponse(id))
return
}

View File

@ -0,0 +1,260 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package api
import (
"bytes"
"errors"
"github.com/buger/jsonparser"
log "github.com/cihub/seelog"
"golang.org/x/crypto/bcrypt"
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic"
"net/http"
"sort"
"time"
)
func (h APIHandler) CreateUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var user rbac.User
err := h.DecodeJSON(r, &user)
if err != nil {
h.Error400(w, err.Error())
return
}
if user.Username == "" {
h.Error400(w, "username is required")
return
}
//localUser, err := biz.FromUserContext(r.Context())
//if err != nil {
// log.Error(err.Error())
// h.ErrorInternalServer(w, err.Error())
// return
//}
if h.userNameExists(w, user.Username) {
return
}
randStr := util.GenerateRandomString(8)
hash, err := bcrypt.GenerateFromPassword([]byte(randStr), bcrypt.DefaultCost)
if err != nil {
return
}
user.Password = string(hash)
now := time.Now()
user.Created = &now
user.Updated = &now
id, err := h.User.Create(&user)
user.ID = id
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
_ = h.WriteOKJSON(w, util.MapStr{
"_id": id,
"password": randStr,
"result": "created",
})
return
}
func (h APIHandler) userNameExists(w http.ResponseWriter, name string) bool {
u, err := h.User.GetBy("name", name)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return true
}
if u != nil {
h.ErrorInternalServer(w, "user name already exists")
return true
}
return false
}
func (h APIHandler) GetUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
user, err := h.User.Get(id)
if errors.Is(err, elastic.ErrNotFound) {
h.WriteJSON(w, api.NotFoundResponse(id), http.StatusNotFound)
return
}
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
h.WriteOKJSON(w, api.FoundResponse(id, user))
return
}
func (h APIHandler) UpdateUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
var user rbac.User
err := h.DecodeJSON(r, &user)
if err != nil {
_ = log.Error(err.Error())
h.Error400(w, err.Error())
return
}
//localUser, err := biz.FromUserContext(r.Context())
//if err != nil {
// log.Error(err.Error())
// h.ErrorInternalServer(w, err.Error())
// return
//}
oldUser, err := h.User.Get(id)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
if user.Username != oldUser.Username && h.userNameExists(w, user.Username) {
return
}
now := time.Now()
user.Updated = &now
user.Created = oldUser.Created
user.ID = id
err = h.User.Update(&user)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
//let user relogin after roles changed
sort.Slice(user.Roles, func(i, j int) bool {
return user.Roles[i].ID < user.Roles[j].ID
})
sort.Slice(oldUser.Roles, func(i, j int) bool {
return oldUser.Roles[i].ID < oldUser.Roles[j].ID
})
changeLog, _ := util.DiffTwoObject(user.Roles, oldUser.Roles)
if len(changeLog) > 0 {
rbac.DeleteUserToken(id)
}
_ = h.WriteOKJSON(w, api.UpdateResponse(id))
return
}
func (h APIHandler) DeleteUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
user, err := rbac.FromUserContext(r.Context())
if err != nil {
log.Error("failed to get user from context, err: %v", err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if user != nil && user.UserId == id {
h.WriteError(w, "can not delete yourself", http.StatusInternalServerError)
return
}
err = h.User.Delete(id)
if errors.Is(err, elastic.ErrNotFound) {
h.WriteJSON(w, api.NotFoundResponse(id), http.StatusNotFound)
return
}
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
rbac.DeleteUserToken(id)
_ = h.WriteOKJSON(w, api.DeleteResponse(id))
return
}
func (h APIHandler) SearchUser(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var (
keyword = h.GetParameterOrDefault(r, "keyword", "")
from = h.GetIntOrDefault(r, "from", 0)
size = h.GetIntOrDefault(r, "size", 20)
)
res, err := h.User.Search(keyword, from, size)
if err != nil {
log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
//remove password field
hitsBuf := bytes.Buffer{}
hitsBuf.Write([]byte("["))
jsonparser.ArrayEach(res.Raw, func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
value = jsonparser.Delete(value, "_source", "password")
hitsBuf.Write(value)
hitsBuf.Write([]byte(","))
}, "hits", "hits")
buf := hitsBuf.Bytes()
if buf[len(buf)-1] == ',' {
buf[len(buf)-1] = ']'
} else {
hitsBuf.Write([]byte("]"))
}
res.Raw, err = jsonparser.Set(res.Raw, hitsBuf.Bytes(), "hits", "hits")
if err != nil {
log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
h.Write(w, res.Raw)
return
}
func (h APIHandler) UpdateUserPassword(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
var req = struct {
Password string `json:"password"`
}{}
err := h.DecodeJSON(r, &req)
if err != nil {
_ = log.Error(err.Error())
h.Error400(w, err.Error())
return
}
//localUser, err := biz.FromUserContext(r.Context())
//if err != nil {
// log.Error(err.Error())
// h.ErrorInternalServer(w, err.Error())
// return
//}
user, err := h.User.Get(id)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
hash, err := bcrypt.GenerateFromPassword([]byte(req.Password), bcrypt.DefaultCost)
if err != nil {
return
}
user.Password = string(hash)
//t:=time.Now()
//user.Updated =&t
err = h.User.Update(&user)
if err != nil {
_ = log.Error(err.Error())
h.ErrorInternalServer(w, err.Error())
return
}
//disable old token to let user login
rbac.DeleteUserToken(id)
_ = h.WriteOKJSON(w, api.UpdateResponse(id))
return
}

View File

@ -0,0 +1,32 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package config
import (
ldap2 "infini.sh/console/modules/security/realm/authc/ldap"
)
type Config struct {
Enabled bool `config:"enabled"`
Authentication AuthenticationConfig `config:"authc"`
OAuthConfig OAuthConfig `config:"oauth"`
}
type RealmConfig struct {
Enabled bool `config:"enabled"`
Order int `config:"order"`
}
type RealmsConfig struct {
Native RealmConfig `config:"native"`
//ldap,oauth, active_directory, pki, file, saml, kerberos, oidc, jwt
OAuth map[string]OAuthConfig `config:"oauth"`
LDAP map[string]ldap2.LDAPConfig `config:"ldap"`
}
type AuthenticationConfig struct {
Realms RealmsConfig `config:"realms"`
}

View File

@ -0,0 +1,20 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package config
type OAuthConfig struct {
Enabled bool `config:"enabled"`
ClientID string `config:"client_id"`
ClientSecret string `config:"client_secret"`
DefaultRoles []string `config:"default_roles"`
RoleMapping map[string][]string `config:"role_mapping"`
AuthorizeUrl string `config:"authorize_url"`
TokenUrl string `config:"token_url"`
RedirectUrl string `config:"redirect_url"`
Scopes []string `config:"scopes"`
SuccessPage string `config:"success_page"`
FailedPage string `config:"failed_page"`
}

View File

@ -0,0 +1,277 @@
/* Copyright © INFINI Ltd. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package api
import (
"context"
"fmt"
log "github.com/cihub/seelog"
"infini.sh/console/core"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/credential"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/task"
"infini.sh/framework/core/util"
"net/http"
"strconv"
)
type APIHandler struct {
core.Handler
}
func (h *APIHandler) createCredential(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
cred := credential.Credential{}
err := h.DecodeJSON(req, &cred)
if err != nil {
log.Error(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = cred.Validate()
if err != nil {
log.Error(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = cred.Encode()
if err != nil {
log.Error(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ctx := orm.Context{
Refresh: "wait_for",
}
cred.ID = util.GetUUID()
err = orm.Create(&ctx, &cred)
if err != nil {
log.Error(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
h.WriteCreatedOKJSON(w, cred.ID)
}
func (h *APIHandler) updateCredential(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
obj := credential.Credential{}
obj.ID = id
exists, err := orm.Get(&obj)
if !exists || err != nil {
h.WriteJSON(w, util.MapStr{
"_id": id,
"result": "not_found",
}, http.StatusNotFound)
return
}
newObj := credential.Credential{}
err = h.DecodeJSON(req, &newObj)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
log.Error(err)
return
}
err = newObj.Validate()
if err != nil {
log.Error(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
encodeChanged := false
if obj.Type != newObj.Type {
encodeChanged = true
} else {
switch newObj.Type {
case credential.BasicAuth:
var oldPwd string
if oldParams, ok := obj.Payload[newObj.Type].(map[string]interface{}); ok {
if pwd, ok := oldParams["password"].(string); ok {
oldPwd = pwd
} else {
http.Error(w, fmt.Sprintf("invalid password of credential [%s]", obj.ID), http.StatusInternalServerError)
return
}
}
if params, ok := newObj.Payload[newObj.Type].(map[string]interface{}); ok {
if pwd, ok := params["password"].(string); ok && pwd != oldPwd {
obj.Payload = newObj.Payload
encodeChanged = true
} else {
if oldParams, ok := obj.Payload[obj.Type].(map[string]interface{}); ok {
oldParams["username"] = params["username"]
}
}
}
default:
h.WriteError(w, fmt.Sprintf("unsupport credential type [%s]", newObj.Type), http.StatusInternalServerError)
return
}
}
obj.Name = newObj.Name
obj.Type = newObj.Type
obj.Tags = newObj.Tags
if encodeChanged {
err = obj.Encode()
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
log.Error(err)
return
}
}
ctx := &orm.Context{
Refresh: "wait_for",
}
obj.Invalid = false
err = orm.Update(ctx, &obj)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
log.Error(err)
return
}
task.RunWithinGroup("credential_callback", func(ctx context.Context) error {
credential.TriggerChangeEvent(&obj)
return nil
})
h.WriteUpdatedOKJSON(w, id)
}
func (h *APIHandler) deleteCredential(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
obj := credential.Credential{}
obj.ID = id
exists, err := orm.Get(&obj)
if !exists || err != nil {
h.WriteJSON(w, util.MapStr{
"_id": id,
"result": "not_found",
}, http.StatusNotFound)
return
}
//check dependency
toDelete, err := canDelete(&obj)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
log.Error(err)
return
}
if !toDelete {
h.WriteError(w, "This credential is in use and cannot be deleted", http.StatusInternalServerError)
return
}
ctx := &orm.Context{
Refresh: "wait_for",
}
err = orm.Delete(ctx, &obj)
if err != nil {
h.WriteError(w, err.Error(), http.StatusInternalServerError)
log.Error(err)
return
}
h.WriteDeletedOKJSON(w, id)
}
func canDelete(cred *credential.Credential) (bool, error) {
if cred == nil {
return false, fmt.Errorf("parameter cred can not be nil")
}
q := orm.Query{
Conds: orm.And(orm.Eq("credential_id", cred.ID)),
}
err, result := orm.Search(elastic.ElasticsearchConfig{}, &q)
if err != nil {
return false, fmt.Errorf("query elasticsearch config error: %w", err)
}
return result.Total == 0, nil
}
func (h *APIHandler) searchCredential(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var (
keyword = h.GetParameterOrDefault(req, "keyword", "")
strSize = h.GetParameterOrDefault(req, "size", "20")
strFrom = h.GetParameterOrDefault(req, "from", "0")
mustQ []interface{}
)
if keyword != "" {
mustQ = append(mustQ, util.MapStr{
"query_string": util.MapStr{
"default_field": "*",
"query": keyword,
},
})
}
size, _ := strconv.Atoi(strSize)
if size <= 0 {
size = 20
}
from, _ := strconv.Atoi(strFrom)
if from < 0 {
from = 0
}
queryDSL := util.MapStr{
"size": size,
"from": from,
"sort": []util.MapStr{
{
"created": util.MapStr{
"order": "desc",
},
},
},
}
if len(mustQ) > 0 {
queryDSL["query"] = util.MapStr{
"bool": util.MapStr{
"must": mustQ,
},
}
}
q := orm.Query{}
q.RawQuery = util.MustToJSONBytes(queryDSL)
err, res := orm.Search(&credential.Credential{}, &q)
if err != nil {
log.Error(err)
h.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
searchRes := elastic.SearchResponse{}
util.MustFromJSONBytes(res.Raw, &searchRes)
if len(searchRes.Hits.Hits) > 0 {
for _, hit := range searchRes.Hits.Hits {
delete(hit.Source, "encrypt")
util.MapStr(hit.Source).Delete("payload.basic_auth.password")
}
}
h.WriteJSON(w, searchRes, http.StatusOK)
}
func (h *APIHandler) getCredential(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
id := ps.MustGetParameter("id")
obj := credential.Credential{}
obj.ID = id
exists, err := orm.Get(&obj)
if !exists || err != nil {
h.WriteJSON(w, util.MapStr{
"_id": id,
"result": "not_found",
}, http.StatusNotFound)
return
}
util.MapStr(obj.Payload).Delete("basic_auth.password")
h.WriteGetOKJSON(w, id, obj)
}

View File

@ -0,0 +1,58 @@
/* Copyright © INFINI Ltd. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package api
import (
log "github.com/cihub/seelog"
"infini.sh/console/core/security/enum"
"infini.sh/framework/core/api"
"infini.sh/framework/core/credential"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/model"
"infini.sh/framework/core/util"
"infini.sh/framework/modules/elastic/common"
)
func Init() {
handler := APIHandler{}
api.HandleAPIMethod(api.POST, "/credential", handler.RequirePermission(handler.createCredential, enum.PermissionCredentialWrite))
api.HandleAPIMethod(api.PUT, "/credential/:id", handler.RequirePermission(handler.updateCredential, enum.PermissionCredentialWrite))
api.HandleAPIMethod(api.DELETE, "/credential/:id", handler.RequirePermission(handler.deleteCredential, enum.PermissionCredentialWrite))
api.HandleAPIMethod(api.GET, "/credential/_search", handler.RequirePermission(handler.searchCredential, enum.PermissionCredentialRead))
api.HandleAPIMethod(api.GET, "/credential/:id", handler.RequirePermission(handler.getCredential, enum.PermissionCredentialRead))
credential.RegisterChangeEvent(func(cred *credential.Credential) {
var keys []string
elastic.WalkConfigs(func(key, value interface{}) bool {
if v, ok := value.(*elastic.ElasticsearchConfig); ok {
if v.CredentialID == cred.ID {
if k, ok := key.(string); ok {
keys = append(keys, k)
}
}
}
return true
})
for _, key := range keys {
conf := elastic.GetConfig(key)
if conf.CredentialID == cred.ID {
obj, err := cred.Decode()
if err != nil {
log.Error(err)
continue
}
if v, ok := obj.(model.BasicAuth); ok {
newConf := *conf
newConf.BasicAuth = &v
_, err = common.InitElasticInstance(newConf)
if err != nil {
log.Error(err)
}
log.Tracef("updated cluster config: %s", util.MustToJSON(newConf))
}
}
}
})
}

View File

@ -0,0 +1,83 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package security
import (
rbac "infini.sh/console/core/security"
authapi "infini.sh/console/modules/security/api"
"infini.sh/console/modules/security/config"
credapi "infini.sh/console/modules/security/credential/api"
"infini.sh/console/modules/security/realm"
"infini.sh/console/modules/security/realm/authc/oauth"
"infini.sh/framework/core/credential"
"infini.sh/framework/core/env"
"infini.sh/framework/core/global"
"infini.sh/framework/core/orm"
)
type Module struct {
cfg *config.Config
}
func (module *Module) Name() string {
return "security"
}
func (module *Module) Setup() {
module.cfg = &config.Config{
Enabled: false,
Authentication: config.AuthenticationConfig{
Realms: config.RealmsConfig{
Native: config.RealmConfig{
Enabled: true,
},
},
},
OAuthConfig: config.OAuthConfig{
SuccessPage: "/#/user/sso/success",
FailedPage: "/#/user/sso/failed",
},
}
ok, err := env.ParseConfig("security", &module.cfg)
if ok && err != nil && global.Env().SystemConfig.Configs.PanicOnConfigError {
panic(err)
}
if !module.cfg.Enabled {
return
}
credapi.Init()
if module.cfg.OAuthConfig.Enabled {
oauth.Init(module.cfg.OAuthConfig)
}
authapi.Init()
}
func InitSchema() {
orm.RegisterSchemaWithIndexName(rbac.Role{}, "rbac-role")
orm.RegisterSchemaWithIndexName(rbac.User{}, "rbac-user")
orm.RegisterSchemaWithIndexName(credential.Credential{}, "credential")
}
func (module *Module) Start() error {
if !module.cfg.Enabled {
return nil
}
InitSchema()
realm.Init(module.cfg)
return nil
}
func (module *Module) Stop() error {
return nil
}

View File

@ -0,0 +1,5 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package ldap

View File

@ -0,0 +1,151 @@
package ldap
import (
"context"
"errors"
log "github.com/cihub/seelog"
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
"infini.sh/framework/lib/guardian/auth"
"infini.sh/framework/lib/guardian/auth/strategies/basic"
"infini.sh/framework/lib/guardian/auth/strategies/ldap"
"time"
)
type LDAPConfig struct {
Enabled bool `config:"enabled"`
Tls bool `config:"tls"`
Host string `config:"host"`
Port int `config:"port"`
BindDn string `config:"bind_dn"`
BindPassword string `config:"bind_password"`
BaseDn string `config:"base_dn"`
UserFilter string `config:"user_filter"`
UidAttribute string `config:"uid_attribute"`
GroupAttribute string `config:"group_attribute"`
RoleMapping struct {
Group map[string][]string `config:"group"`
Uid map[string][]string `config:"uid"`
} `config:"role_mapping"`
}
func (r *LDAPRealm) mapLDAPRoles(authInfo auth.Info) []string {
var ret []string
if global.Env().IsDebug {
log.Tracef("mapping LDAP authInfo: %v", authInfo)
}
//check uid
uid := authInfo.GetID()
if uid == "" {
uid = authInfo.GetUserName()
}
if global.Env().IsDebug {
log.Tracef("ldap config: %v", util.MustToJSON(r.config))
}
if roles, ok := r.config.RoleMapping.Uid[uid]; ok {
ret = append(ret, roles...)
} else {
if global.Env().IsDebug {
log.Tracef("ldap uid mapping config: %v", r.config.RoleMapping.Uid)
}
log.Debugf("LDAP uid: %v, user: %v", uid, authInfo)
}
//map group
for _, roleName := range authInfo.GetGroups() {
newRoles, ok := r.config.RoleMapping.Group[roleName]
if ok {
ret = append(ret, newRoles...)
} else {
if global.Env().IsDebug {
log.Tracef("ldap group mapping config: %v", r.config.RoleMapping.Group)
}
log.Debugf("LDAP group: %v, roleName: %v, match: %v", uid, roleName, newRoles)
}
}
return ret
}
func New(cfg2 LDAPConfig) *LDAPRealm {
var realm = &LDAPRealm{
config: cfg2,
ldapCfg: ldap.Config{
Port: cfg2.Port,
Host: cfg2.Host,
TLS: nil,
BindDN: cfg2.BindDn,
BindPassword: cfg2.BindPassword,
Attributes: nil,
BaseDN: cfg2.BaseDn,
UserFilter: cfg2.UserFilter,
GroupFilter: "",
UIDAttribute: cfg2.UidAttribute,
GroupAttribute: cfg2.GroupAttribute,
},
}
realm.ldapFunc = ldap.GetAuthenticateFunc(&realm.ldapCfg)
return realm
}
const providerName = "ldap"
type LDAPRealm struct {
config LDAPConfig
ldapCfg ldap.Config
ldapFunc basic.AuthenticateFunc
}
func (r *LDAPRealm) GetType() string {
return providerName
}
func (r *LDAPRealm) Authenticate(username, password string) (bool, *rbac.User, error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(time.Second*10))
defer cancel()
authInfo, err := r.ldapFunc(ctx, nil, []byte(username), []byte(password))
if err != nil {
return false, nil, err
}
u := &rbac.User{
AuthProvider: providerName,
Username: authInfo.GetUserName(),
Nickname: authInfo.GetUserName(),
Email: "",
}
u.Payload = &authInfo
u.ID = authInfo.GetUserName()
return true, u, err
}
func (r *LDAPRealm) Authorize(user *rbac.User) (bool, error) {
authInfo := user.Payload.(*auth.Info)
if authInfo != nil {
roles := r.mapLDAPRoles(*authInfo)
for _, roleName := range roles {
user.Roles = append(user.Roles, rbac.UserRole{
ID: roleName,
Name: roleName,
})
}
} else {
log.Warnf("LDAP %v auth Info is nil", user.Username)
}
var _, privilege = user.GetPermissions()
if len(privilege) == 0 {
log.Debug("no privilege assigned to user:", user)
return false, errors.New("no privilege assigned to this user:" + user.Username)
}
return true, nil
}

View File

@ -0,0 +1,71 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package native
import (
"errors"
"fmt"
log "github.com/cihub/seelog"
"golang.org/x/crypto/bcrypt"
rbac "infini.sh/console/core/security"
)
var handler rbac.Adapter
func init() {
handler = rbac.Adapter{
User: &User{},
Role: &Role{},
}
rbac.RegisterAdapter(providerName, handler)
}
const providerName = "native"
type NativeRealm struct {
// Implement any required fields
}
func (r *NativeRealm) GetType() string {
return providerName
}
func (r *NativeRealm) Authenticate(username, password string) (bool, *rbac.User, error) {
// Implement the authentication logic
// Retrieve the user profile upon successful authentication
// Return the authentication result, user profile, and any potential error
user, err := handler.User.GetBy("name", username)
if err != nil {
return false, user, err
}
if user == nil {
return false, nil, fmt.Errorf("user account [%s] not found", username)
}
err = bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
if err == bcrypt.ErrMismatchedHashAndPassword {
err = errors.New("incorrect password")
}
if err == nil {
user.AuthProvider = providerName
return true, user, nil
}
return false, nil, err
}
func (r *NativeRealm) Authorize(user *rbac.User) (bool, error) {
var _, privilege = user.GetPermissions()
if len(privilege) == 0 {
log.Error("no privilege assigned to user:", user)
return false, errors.New("no privilege assigned to this user:" + user.Username)
}
return true, nil
}

View File

@ -0,0 +1,129 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package native
import (
_ "embed"
"github.com/mitchellh/mapstructure"
"infini.sh/framework/core/elastic"
"path"
"strings"
log "github.com/cihub/seelog"
"github.com/segmentio/encoding/json"
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/api/routetree"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
)
type ElasticsearchAPIMetadata struct {
Name string `json:"name"`
Methods []string `json:"methods"`
Path string `json:"path"`
}
type ElasticsearchAPIMetadataList []ElasticsearchAPIMetadata
func (list ElasticsearchAPIMetadataList) GetNames() []string {
var names []string
for _, md := range list {
if !util.StringInArray(names, md.Name) {
names = append(names, md.Name)
}
}
return names
}
//go:embed permission.json
var permissionFile []byte
func loadJsonConfig() map[string]ElasticsearchAPIMetadataList {
externalConfig := path.Join(global.Env().GetConfigDir(), "permission.json")
if util.FileExists(externalConfig) {
log.Infof("loading permission file from %s", externalConfig)
bytes, err := util.FileGetContent(externalConfig)
if err != nil {
log.Errorf("load permission file failed, use embedded config, err: %v", err)
} else {
permissionFile = bytes
}
}
apis := map[string]ElasticsearchAPIMetadataList{}
err := json.Unmarshal(permissionFile, &apis)
if err != nil {
log.Error("json config unmarshal err " + err.Error())
return nil
}
return apis
}
func Init() {
//load local files
apis := loadJsonConfig()
if apis != nil {
var esAPIRouter = routetree.New()
for _, list := range apis {
for _, md := range list {
//skip wildcard *
if strings.HasSuffix(md.Path, "*") {
continue
}
for _, method := range md.Methods {
esAPIRouter.Handle(method, md.Path, md.Name)
}
}
}
rbac.RegisterAPIPermissionRouter("elasticsearch", esAPIRouter)
}
permissions := map[string]interface{}{
"index_privileges": apis["indices"].GetNames(),
}
delete(apis, "indices")
otherApis := map[string][]string{}
for key, list := range apis {
otherApis[key] = list.GetNames()
}
permissions["cluster_privileges"] = otherApis
rbac.RegisterPermission(rbac.Elasticsearch, permissions)
//load role from store
loadRemoteRolePermission()
}
func loadRemoteRolePermission() {
log.Trace("start loading roles from adapter")
rbac.RoleMap = make(map[string]rbac.Role)
for k, role := range rbac.BuiltinRoles {
rbac.RoleMap[k] = role
}
log.Debug("load security permissions,", rbac.RoleMap, rbac.BuiltinRoles)
res, err := handler.Role.Search("", 0, 1000)
if err != nil {
log.Error(err)
return
}
response := elastic.SearchResponse{}
util.FromJSONBytes(res.Raw, &response)
for _, v := range response.Hits.Hits {
var role rbac.Role
delete(v.Source, "created")
delete(v.Source, "updated")
err = mapstructure.Decode(v.Source, &role)
if err != nil {
log.Error(err)
return
}
rbac.RoleMap[role.Name] = role
}
}

View File

@ -0,0 +1,487 @@
{
"cat": [
{"name": "cat.*", "methods": ["get"],
"path": "_cat/*"
},
{"name": "cat.indices", "methods": ["get"],
"path": "_cat/indices"
},
{"name": "cat.indices", "methods": ["get"],
"path": "_cat/indices/:target"
},
{"name": "cat.help", "methods": ["get"],
"path": "_cat/help"
},
{"name": "cat.repositories", "methods": ["get"],
"path": "_cat/repositories"
},
{"name": "cat.pending_tasks", "methods": ["get"],
"path": "_cat/pending_tasks"
},
{"name": "cat.tasks", "methods": ["get"],
"path": "_cat/tasks"
},
{"name": "cat.allocation", "methods": ["get"],
"path": "_cat/allocation"
},
{"name": "cat.count", "methods": ["get"],
"path": "_cat/count"
},
{"name": "cat.shards", "methods": ["get"],
"path": "_cat/shards"
},
{"name": "cat.shards", "methods": ["get"],
"path": "_cat/shards/:target"
},
{"name": "cat.aliases", "methods": ["get"],
"path": "_cat/aliases"
},
{"name": "cat.aliases", "methods": ["get"],
"path": "_cat/aliases/:name"
},
{"name": "cat.nodeattrs", "methods": ["get"],
"path": "_cat/nodeattrs"
},
{"name": "cat.templates", "methods": ["get"],
"path": "_cat/templates"
},
{"name": "cat.thread_pool", "methods": ["get"],
"path": "_cat/thread_pool"
},
{"name": "cat.health", "methods": ["get"],
"path": "_cat/health"
},
{"name": "cat.recovery", "methods": ["get"],
"path": "_cat/recovery"
},
{"name": "cat.fielddata", "methods": ["get"],
"path": "_cat/fielddata"
},
{"name": "cat.nodes", "methods": ["get"],
"path": "_cat/nodes"
},
{"name": "cat.plugins", "methods": ["get"],
"path": "_cat/plugins"
},
{"name": "cat.segments", "methods": ["get"],
"path": "_cat/segments"
},
{"name": "cat.snapshots", "methods": ["get"],
"path": "_cat/snapshots"
},
{"name": "cat.master", "methods": ["get"],
"path": "_cat/master"
}
],
"cluster": [
{"name": "cluster.*", "methods": ["*"],
"path": "_cluster/*"
},
{"name": "cluster.info", "methods": ["get"],
"path": "/"
},
{"name": "cluster.health", "methods": ["get"],
"path": "_cluster/health"
},
{"name": "cluster.get_settings", "methods":["get"],
"path": "_cluster/settings"
},
{"name": "cluster.pending_tasks", "methods": ["get"],
"path": "_cluster/pending_tasks"
},
{"name": "cluster.stats", "methods": ["get"],
"path": "_cluster/stats"
},
{"name": "cluster.remote_info", "methods": ["get"],
"path": "_remote/info"
},
{"name": "cluster.allocation_explain", "methods": ["get"],
"path": "_cluster/allocation/explain"
},
{"name": "cluster.put_settings", "methods": ["put"],
"path": "_cluster/settings"
},
{"name": "cluster.reroute", "methods": ["post"],
"path": "_cluster/reroute"
},
{"name": "cluster.count", "methods": ["get"],
"path": "_count"
},
{"name": "cluster.state", "methods": ["get"],
"path": "_cluster/state"
},
{"name": "cluster.bulk", "methods": ["put", "post"],
"path": "_bulk"
},
{"name": "cluster.mget", "methods": ["get", "post"],
"path": "_mget"
},
{"name": "cluster.ping", "methods": ["head"],
"path": "/"
},
{"name": "cluster.msearch", "methods": ["get", "post"],
"path": "_msearch"
},
{"name": "cluster.msearch_template", "methods": ["get", "post"],
"path": "_msearch/template"
},
{"name": "cluster.mtermvectors", "methods": ["get", "post"],
"path": "_mtermvectors"
},
{"name": "cluster.rank_eval", "methods": ["get", "post"],
"path": "_rank_eval"
},
{"name": "cluster.search", "methods": ["get", "post"],
"path": "_search"
},
{"name": "cluster.search_shards", "methods": ["get", "post"],
"path": "_search_shards"
},
{"name": "cluster.exists_alias", "methods": ["head"],
"path": "_alias/:alias"
},
{"name": "cluster.get_alias", "methods": ["get"],
"path": "_alias/:alias"
}
],
"indices": [
{"name": "indices.*", "methods": ["*"],
"path": "/*"
},
{"name": "indices.exists_alias", "methods": ["head"],
"path": "/:index_name/_alias/:alias"
},
{"name": "indices.get_alias", "methods": ["get"],
"path": "/:index_name/_alias/:alias"
},
{"name": "indices.recovery", "methods": ["get"],
"path": "/:index_name/_recovery"
},
{"name": "indices.delete", "methods": ["delete"],
"path": "/:index_name"
},
{"name": "indices.put", "methods": ["put"],
"path": "/:index_name"
},
{"name": "indices.clear_cache", "methods": ["post"],
"path": "/:index_name/_cache/clear"
},
{"name": "indices.update_by_query", "methods": ["post"],
"path": "/:index_name/_update_by_query"
},
{"name": "indices.shrink", "methods": ["post"],
"path": "/:index_name/_shrink"
},
{"name": "indices.forcemerge", "methods": ["post"],
"path": "/:index_name/_forcemerge"
},
{"name": "indices.put_alias", "methods": ["put"],
"path": "/:index_name/_alias/:alias"
},
{"name": "indices.create", "methods": ["post"],
"path": "/:index_name"
},
{"name": "indices.split", "methods": ["post"],
"path": "/:index_name/_split"
},
{"name": "indices.flush", "methods": ["post"],
"path": "/:index_name/_flush"
},
{"name": "indices.get_mapping", "methods": ["get"],
"path": "/:index_name/_mapping"
},
{"name": "indices.upgrade", "methods": ["post"],
"path": "/:index_name/_upgrade"
},
{"name": "indices.validate_query", "methods": ["get", "post"],
"path": "/:index_name/_validate/query"
},
{"name": "indices.analyze", "methods": ["post"],
"path": "/:index_name/analyze"
},
{"name": "indices.exists", "methods": ["head"],
"path": "/:index_name"
},
{"name": "indices.close", "methods": ["post"],
"path": "/:index_name/_close"
},
{"name": "indices.get_field_mapping", "methods": ["get"],
"path": "/:index_name/_mapping/:fields"
},
{"name": "indices.delete_alias", "methods": ["delete"],
"path": "/:index_name/_alias/:alias"
},
{"name": "indices.refresh", "methods": ["get", "post"],
"path": "/:index_name/_refresh"
},
{"name": "indices.segments", "methods": ["get"],
"path": "/:index_name/_segments"
},
{"name": "indices.termvectors", "methods": ["get"],
"path": "/:index_name/_termvectors"
},
{"name": "indices.flush_synced", "methods": ["get", "post"],
"path": "/:index_name/_flush/synced"
},
{"name": "indices.put_mapping", "methods": ["put"],
"path": "/:index_name/_mapping"
},
{"name": "indices.get", "methods": ["get"],
"path": "/:index_name"
},
{"name": "indices.get_settings", "methods": ["get"],
"path": "/:index_name/_settings"
},
{"name": "indices.open", "methods": ["post"],
"path": "/:index_name/_open"
},
{"name": "indices.put_settings", "methods": ["put"],
"path": "/:index_name/_settings"
},
{"name": "indices.stats", "methods": ["get"],
"path": "/:index_name/_stats"
},
{"name": "indices.delete_by_query", "methods": ["post"],
"path": "/:index_name/_delete_by_query"
},
{"name": "indices.rollover", "methods": ["post"],
"path": "/:index_name/_rollover"
},
{"name": "indices.count", "methods": ["get"],
"path": "/:index_name/_count"
},
{"name": "indices.shard_stores", "methods": ["get"],
"path": "/:index_name/_shard_stores"
},
{"name": "indices.bulk", "methods": ["post"],
"path": "/:index_name/_bulk"
},
{"name": "indices.mget", "methods": ["get", "post"],
"path": "/:index_name/_mget"
},
{"name": "indices.msearch", "methods": ["get", "post"],
"path": "/:index_name/_msearch"
},
{"name": "indices.msearch_template", "methods": ["get", "post"],
"path": "/:index_name/_msearch/template"
},
{"name": "indices.mtermvectors", "methods": ["get"],
"path": "/:index_name/_mtermvectors"
},
{"name": "indices.rank_eval", "methods": ["get"],
"path": "/:index_name/_rank_eval"
},
{"name": "indices.search", "methods": ["get", "post"],
"path": "/:index_name/_search"
},
{"name": "indices.search_shards", "methods": ["get", "post"],
"path": "/:index_name/_search_shards"
},
{"name": "indices.field_caps", "methods":["get", "post"],
"path": "/:index_name/_field_caps"
},
{"name": "indices.exists_template", "methods":["get"],
"path": "/_template/:name"
},
{"name": "indices.field_usage_stats", "methods":["get"],
"path": "/:index_name/_field_usage_stats"
},
{"name": "doc.*", "methods": ["*"],
"path": "/:index_name/:doctype"
},
{"name": "doc.update", "methods": ["put"],
"path": "/:index_name/:doctype/:doc_id"
},
{"name": "doc.update", "methods": ["post"],
"path": "/:index_name/_update/:doc_id"
},
{"name": "doc.create", "methods": ["post"],
"path": "/:index_name/:doctype"
},
{"name": "doc.create", "methods": ["post", "put"],
"path": "/:index_name/_create/:doc_id"
},
{"name": "doc.delete", "methods": ["delete"],
"path": "/:index_name/:doctype/:doc_id"
},
{"name": "doc.get", "methods": ["get"],
"path": "/:index_name/:doctype/:doc_id"
},
{"name": "doc.get", "methods": ["get"],
"path": "/:index_name/_source/:doc_id"
},
{"name": "doc.exists", "methods": ["head"],
"path": "/:index_name/:doctype/:doc_id"
},
{"name": "doc.exists_source", "methods": ["head"],
"path": "/:index_name/_source/:doc_id"
},
{"name": "doc.explain", "methods": ["get"],
"path": "/:index_name/_explain/:doc_id"
}
],
"ingest": [
{"name": "ingest.*", "methods": ["*"],
"path": "/_ingest/*"
},
{"name": "ingest.delete_pipeline", "methods": ["delete"],
"path": "/_ingest/pipeline"
},
{"name": "ingest.put_pipeline", "methods": ["put"],
"path": "/_ingest/pipeline"
},
{"name": "ingest.simulate", "methods": ["get", "post"],
"path": "/_ingest/pipeline/_simulate"
},
{"name": "ingest.put_pipeline", "methods": ["get"],
"path": "/_ingest/pipeline"
},
{"name": "ingest.processor_grok", "methods": ["get"],
"path": "/_ingest/processor/grok"
}
],
"nodes": [
{"name": "nodes.*", "methods": ["*"],
"path": "/_nodes/*"
},
{"name": "nodes.info", "methods": ["get"],
"path": "/_nodes"
},
{"name": "nodes.info", "methods": ["get"],
"path": "/_nodes/:node_id"
},
{"name": "nodes.stats", "methods": ["get"],
"path": "/_nodes/stats"
},
{"name": "nodes.reload_secure_settings", "methods": ["post"],
"path": "/_nodes/reload_secure_settings"
},
{"name": "nodes.usage", "methods": ["get"],
"path": "/_nodes/usage"
},
{"name": "nodes.hot_threads", "methods": ["get"],
"path": "/_nodes/hot_threads"
}
],
"reindex": [
{"name": "reindex.*", "methods": ["*"],
"path": "/_reindex/*"
},
{"name": "reindex", "methods": ["post"],
"path": "/_reindex"
},
{"name": "reindex.rethrottle", "methods": ["post"],
"path": "/_reindex/:rid/_rethrottle"
}
],
"render_search_template": [
{"name": "render_search_template.*", "methods": ["*"],
"path": "/_render/template"
},
{"name": "render_search_template", "methods": ["post", "get"],
"path": "/_render/template"
},
{"name": "render_search_template_by_id", "methods": ["post", "get"],
"path": "/_render/template/:tid"
}
],
"scripts": [
{"name": "scripts.*", "methods": ["*"],
"path": "/_scripts/:sid"
},
{"name": "scripts.get", "methods": ["get"],
"path": "/_scripts/:sid"
},
{"name": "scripts.put", "methods": ["put"],
"path": "/_scripts/:sid"
},
{"name": "scripts.delete", "methods": ["delete"],
"path": "/_scripts/:sid"
},
{"name": "scripts.painless_execute", "methods": ["get", "post"],
"path": "_scripts/painless/_execute"
}
],
"scroll": [
{"name": "scroll.*", "methods": ["*"],
"path": "/_search/scroll*"
},
{"name": "scroll.search", "methods": ["get", "post"],
"path": "/_search/scroll"
},
{"name": "scroll.delete", "methods": ["delete"],
"path": "/_search/scroll/:scroll_id"
},
{"name": "scroll.get", "methods": ["get"],
"path": "/_search/scroll/:scroll_id"
},
{"name": "scroll.create", "methods": ["post"],
"path": "/_search/scroll/:scroll_id"
}
],
"snapshot": [
{"name": "snapshot.*", "methods": ["*"],
"path": "/_snapshot/*"
},
{"name": "snapshot.get_repository", "methods": ["get"],
"path": "/_snapshot/:repo_name"
},
{"name": "snapshot.create_repository", "methods": ["post"],
"path": "/_snapshot/:repo_name"
},
{"name": "snapshot.create", "methods": ["post"],
"path": "/_snapshot/:repo_name/:snapshot_name"
},
{"name": "snapshot.restore", "methods": ["post"],
"path": "/_snapshot/:repo_name/:snapshot_name/_restore"
},
{"name": "snapshot.status", "methods": ["get"],
"path": "/_snapshot/_status"
},
{"name": "snapshot.delete", "methods": ["delete"],
"path": "/_snapshot/:repo_name/:snapshot_name"
},
{"name": "snapshot.delete_repository", "methods": ["delete"],
"path": "/_snapshot/:repo_name"
},
{"name": "snapshot.verify_repository", "methods": ["post"],
"path": "/_snapshot/:repo_name/_verify"
},
{"name": "snapshot.get", "methods": ["get"],
"path": "/_snapshot/:repo_name/:snapshot_name"
}
],
"tasks": [
{"name": "tasks.*", "methods": ["*"],
"path": "/_tasks/*"
},
{"name": "tasks.list", "methods": ["get"],
"path": "/_tasks"
},
{"name": "tasks.cancel", "methods": ["post"],
"path": "/_tasks/:task_id/_cancel"
},
{"name": "tasks.get", "methods": ["get"],
"path": "/_tasks/:task_id"
}
],
"sql": [
{"name": "sql.*", "methods": ["*"], "path": "/_sql/*"},
{"name": "sql.clear", "methods": ["post"], "path": "/_sql/close"},
{"name": "sql.get_async", "methods": ["get"], "path": "/_sql/async/:search_id"},
{"name": "sql.delete_async", "methods": ["delete"], "path": "/_sql/async/delete/:search_id"},
{"name": "sql.get_async_status", "methods": ["get"], "path": "/_sql/async/status/:search_id"},
{"name": "sql.search", "methods": ["get", "post"], "path": "/_sql"},
{"name": "sql.search", "methods": ["post"], "path": "/_plugins/_sql"},
{"name": "sql.translate", "methods": ["get", "post"], "path": "/_sql/translate"}
]
}

View File

@ -0,0 +1,76 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package native
import (
"fmt"
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"strings"
)
type Role struct {
}
func (dal *Role) Get(id string) (rbac.Role, error) {
r, ok := rbac.BuiltinRoles[id]
if ok {
return r, nil
}
role := rbac.Role{}
role.ID = id
_, err := orm.Get(&role)
return role, err
}
func (dal *Role) GetBy(field string, value interface{}) (rbac.Role, error) {
role := rbac.Role{}
err, result := orm.GetBy(field, value, &role)
if result.Total > 0 {
if len(result.Result) > 0 {
bytes := util.MustToJSONBytes(result.Result[0])
err := util.FromJSONBytes(bytes, &role)
if err != nil {
panic(err)
}
return role, nil
}
}
return role, err
}
func (dal *Role) Update(role *rbac.Role) error {
return orm.Save(nil, role)
}
func (dal *Role) Create(role *rbac.Role) (string, error) {
role.ID = util.GetUUID()
return role.ID, orm.Save(nil, role)
}
func (dal *Role) Delete(id string) error {
role := rbac.Role{}
role.ID = id
return orm.Delete(nil, role)
}
func (dal *Role) Search(keyword string, from, size int) (orm.Result, error) {
query := orm.Query{}
queryDSL := `{"query":{"bool":{"must":[%s]}}, "from": %d,"size": %d}`
mustBuilder := &strings.Builder{}
if keyword != "" {
mustBuilder.WriteString(fmt.Sprintf(`{"query_string":{"default_field":"*","query": "%s"}}`, keyword))
}
queryDSL = fmt.Sprintf(queryDSL, mustBuilder.String(), from, size)
query.RawQuery = []byte(queryDSL)
err, result := orm.Search(rbac.Role{}, &query)
return result, err
}

View File

@ -0,0 +1,72 @@
/* Copyright © INFINI Ltd. All rights reserved.
* web: https://infinilabs.com
* mail: hello#infini.ltd */
package native
import (
"fmt"
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
"strings"
)
type User struct {
}
func (dal *User) Get(id string) (rbac.User, error) {
user := rbac.User{}
user.ID = id
_, err := orm.Get(&user)
return user, err
}
func (dal *User) GetBy(field string, value interface{}) (*rbac.User, error) {
user := &rbac.User{}
err, result := orm.GetBy(field, value, rbac.User{})
if err != nil {
return nil, err
}
if len(result.Result) == 0 {
return nil, nil
}
userBytes, err := util.ToJSONBytes(result.Result[0])
if err != nil {
return nil, err
}
util.FromJSONBytes(userBytes, &user)
return user, err
}
func (dal *User) Update(user *rbac.User) error {
return orm.Update(nil, user)
}
func (dal *User) Create(user *rbac.User) (string, error) {
user.ID = util.GetUUID()
return user.ID, orm.Save(nil, user)
}
func (dal *User) Delete(id string) error {
user := rbac.User{}
user.ID = id
return orm.Delete(nil, user)
}
func (dal *User) Search(keyword string, from, size int) (orm.Result, error) {
query := orm.Query{}
queryDSL := `{"query":{"bool":{"must":[%s]}}, "from": %d,"size": %d}`
mustBuilder := &strings.Builder{}
if keyword != "" {
mustBuilder.WriteString(fmt.Sprintf(`{"query_string":{"default_field":"*","query": "%s"}}`, keyword))
}
queryDSL = fmt.Sprintf(queryDSL, mustBuilder.String(), from, size)
query.RawQuery = []byte(queryDSL)
err, result := orm.Search(rbac.User{}, &query)
return result, err
}

View File

@ -0,0 +1,19 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package oauth
import (
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/api"
)
type APIHandler struct {
api.Handler
rbac.Adapter
}
const adapterType = "native"
var apiHandler = APIHandler{Adapter: rbac.GetAdapter(adapterType)} //TODO handle hard coded

View File

@ -0,0 +1,41 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package oauth
import (
"golang.org/x/oauth2"
rbac "infini.sh/console/core/security"
"infini.sh/console/modules/security/config"
"infini.sh/framework/core/api"
)
var (
oAuthConfig config.OAuthConfig
defaultOAuthRoles []rbac.UserRole
oauthCfg oauth2.Config
)
// func New(cfg config.OAuthConfig) *OAuthRealm {
func Init(cfg config.OAuthConfig) {
//init oauth
if cfg.Enabled {
api.HandleUIMethod(api.GET, "/sso/login/", apiHandler.AuthHandler)
api.HandleUIMethod(api.GET, "/sso/callback/", apiHandler.CallbackHandler)
oAuthConfig = cfg
oauthCfg = oauth2.Config{
ClientID: cfg.ClientID,
ClientSecret: cfg.ClientSecret,
Endpoint: oauth2.Endpoint{
AuthURL: cfg.AuthorizeUrl,
TokenURL: cfg.TokenUrl,
},
RedirectURL: cfg.RedirectUrl,
Scopes: cfg.Scopes,
}
}
}

View File

@ -0,0 +1,228 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package oauth
import (
"encoding/base64"
log "github.com/cihub/seelog"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
rbac "infini.sh/console/core/security"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/util"
"math/rand"
"net/http"
"strings"
"time"
)
func (h APIHandler) getDefaultRoles() []rbac.UserRole {
if len(oAuthConfig.DefaultRoles) == 0 {
return nil
}
if len(defaultOAuthRoles) > 0 {
return defaultOAuthRoles
}
roles := h.getRolesByRoleIDs(oAuthConfig.DefaultRoles)
if len(roles) > 0 {
defaultOAuthRoles = roles
}
return roles
}
func (h APIHandler) getRolesByRoleIDs(roles []string) []rbac.UserRole {
out := []rbac.UserRole{}
for _, v := range roles {
role, err := h.Adapter.Role.Get(v)
if err != nil {
if !strings.Contains(err.Error(), "record not found") {
panic(err)
}
//try name
role, err = h.Adapter.Role.GetBy("name", v)
if err != nil {
continue
}
}
out = append(out, rbac.UserRole{ID: role.ID, Name: role.Name})
}
return out
}
const oauthSession string = "oauth-session"
func (h APIHandler) AuthHandler(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
b := make([]byte, 16)
rand.Read(b)
state := base64.URLEncoding.EncodeToString(b)
session, err := api.GetSessionStore(r, oauthSession)
session.Values["state"] = state
session.Values["redirect_url"] = h.Get(r, "redirect_url", "")
err = session.Save(r, w)
if err != nil {
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
url := oauthCfg.AuthCodeURL(state)
http.Redirect(w, r, url, 302)
}
func joinError(url string, err error) string {
if err != nil {
return url + "?err=" + util.UrlEncode(err.Error())
}
return url
}
func (h APIHandler) CallbackHandler(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
session, err := api.GetSessionStore(r, oauthSession)
if err != nil {
log.Error(w, "failed to sso, aborted")
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
if r.URL.Query().Get("state") != session.Values["state"] {
log.Error("failed to sso, no state match; possible csrf OR cookies not enabled")
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
tkn, err := oauthCfg.Exchange(oauth2.NoContext, r.URL.Query().Get("code"))
if err != nil {
log.Error("failed to sso, there was an issue getting your token")
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
if !tkn.Valid() {
log.Error("failed to sso, retreived invalid token")
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
//only for github, TODO
client := github.NewClient(oauthCfg.Client(oauth2.NoContext, tkn))
user, res, err := client.Users.Get(oauth2.NoContext, "")
if err != nil {
if res != nil {
log.Error("failed to sso, error getting name:", err, res.String())
}
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
if user != nil {
roles := []rbac.UserRole{}
var id, name, email string
if user.Login != nil && *user.Login != "" {
id = *user.Login
}
if user.Name != nil && *user.Name != "" {
name = *user.Name
}
if user.Email != nil && *user.Email != "" {
email = *user.Email
}
if id == "" {
log.Error("failed to sso, user id can't be nil")
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
if name == "" {
name = id
}
//get by roleMapping
roles = h.getRoleMapping(user)
if len(roles) > 0 {
u := &rbac.User{
AuthProvider: "github",
Username: id,
Nickname: name,
Email: email,
Roles: roles,
}
u.ID = id
//generate access token
data, err := rbac.GenerateAccessToken(u)
if err != nil {
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
return
}
token := rbac.Token{ExpireIn: time.Now().Unix() + 86400}
rbac.SetUserToken(u.ID, token)
//data["status"] = "ok"
url := oAuthConfig.SuccessPage + "?payload=" + util.UrlEncode(util.MustToJSON(data))
http.Redirect(w, r, url, 302)
return
}
}
http.Redirect(w, r, joinError(oAuthConfig.FailedPage, err), 302)
}
func (h APIHandler) getRoleMapping(user *github.User) []rbac.UserRole {
roles := []rbac.UserRole{}
if user != nil {
if len(oAuthConfig.RoleMapping) > 0 {
r, ok := oAuthConfig.RoleMapping[*user.Login]
if ok {
roles = h.getRolesByRoleIDs(r)
}
}
}
if len(roles) == 0 {
return h.getDefaultRoles()
}
return roles
}
const providerName = "oauth"
type OAuthRealm struct {
// Implement any required fields
}
//func (r *OAuthRealm) GetType() string{
// return providerName
//}
//func (r *OAuthRealm) Authenticate(username, password string) (bool, *rbac.User, error) {
//
// //if user == nil {
// // return false,nil, fmt.Errorf("user account [%s] not found", username)
// //}
//
// return false,nil, err
//}
//
//func (r *OAuthRealm) Authorize(user *rbac.User) (bool, error) {
// var _, privilege = user.GetPermissions()
//
// if len(privilege) == 0 {
// log.Error("no privilege assigned to user:", user)
// return false, errors.New("no privilege assigned to this user:" + user.Name)
// }
//
// return true,nil
//}

View File

@ -0,0 +1,65 @@
package main
import (
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"fmt"
"net/http"
"net/url"
"github.com/crewjam/saml"
"github.com/crewjam/saml/samlsp"
)
var metdataurl = "https://sso.infini.ltd/metadata" //Metadata of the IDP
var sessioncert = "./sessioncert" //Key pair used for creating a signed session
var sessionkey = "./sessionkey"
var serverkey = "./serverkey" //Server TLS
var servercert = "./servercert"
var serverurl = "https://localhost" // base url of this service
var entityId = serverurl //Entity ID uniquely identifies your service for IDP (does not have to be server url)
var listenAddr = "0.0.0.0:443"
func hello(w http.ResponseWriter, r *http.Request) {
s := samlsp.SessionFromContext(r.Context())
if s == nil {
return
}
sa, ok := s.(samlsp.SessionWithAttributes)
if !ok {
return
}
fmt.Fprintf(w, "Token contents, %+v!", sa.GetAttributes())
}
func main() {
keyPair, err := tls.LoadX509KeyPair(sessioncert, sessionkey)
panicIfError(err)
keyPair.Leaf, err = x509.ParseCertificate(keyPair.Certificate[0])
panicIfError(err)
//idpMetadataURL, err := url.Parse(metdataurl)
panicIfError(err)
rootURL, err := url.Parse(serverurl)
panicIfError(err)
samlSP, _ := samlsp.New(samlsp.Options{
URL: *rootURL,
Key: keyPair.PrivateKey.(*rsa.PrivateKey),
Certificate: keyPair.Leaf,
IDPMetadata: &saml.EntityDescriptor{
//EntityID:
}, // you can also have Metadata XML instead of URL
EntityID: entityId,
})
app := http.HandlerFunc(hello)
http.Handle("/hello", samlSP.RequireAccount(app))
http.Handle("/saml/", samlSP)
panicIfError(http.ListenAndServeTLS(listenAddr, servercert, serverkey, nil))
}
func panicIfError(err error) {
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,10 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package authz
func Authorize() (map[string]interface{}, error) {
return nil, nil
}

View File

@ -0,0 +1,96 @@
/* Copyright © INFINI LTD. All rights reserved.
* Web: https://infinilabs.com
* Email: hello#infini.ltd */
package realm
import (
log "github.com/cihub/seelog"
rbac "infini.sh/console/core/security"
"infini.sh/console/modules/security/config"
ldap2 "infini.sh/console/modules/security/realm/authc/ldap"
"infini.sh/console/modules/security/realm/authc/native"
"infini.sh/framework/core/errors"
"infini.sh/framework/core/global"
"infini.sh/framework/core/util"
)
var realms = []rbac.SecurityRealm{}
func Init(config *config.Config) {
if !config.Enabled {
return
}
if config.Authentication.Realms.Native.Enabled {
native.Init()
nativeRealm := native.NativeRealm{}
realms = append(realms, &nativeRealm) //TODO sort by order
}
//if len(config.Authentication.Realms.OAuth) > 0 {
// for _, v := range config.Authentication.Realms.OAuth {
// {
// realm:=oauth.New(v)
// realms=append(realms,realm) //TODO sort by order
// }
// }
//}
if global.Env().IsDebug {
log.Tracef("config: %v", util.MustToJSON(config))
}
if len(config.Authentication.Realms.LDAP) > 0 {
for _, v := range config.Authentication.Realms.LDAP {
if v.Enabled {
realm := ldap2.New(v)
realms = append(realms, realm) //TODO sort by order
}
}
}
}
func Authenticate(username, password string) (bool, *rbac.User, error) {
for i, realm := range realms {
ok, user, err := realm.Authenticate(username, password)
log.Debugf("authenticate result: %v, user: %v, err: %v, realm: %v", ok, user, err, i)
if ok && user != nil && err == nil {
return true, user, nil
}
}
if global.Env().IsDebug {
log.Errorf("failed to authenticate user: %v", username)
}
return false, nil, errors.Errorf("failed to authenticate user: %v", username)
}
func Authorize(user *rbac.User) (bool, error) {
for i, realm := range realms {
//skip if not the same auth provider, TODO: support cross-provider authorization
if user.AuthProvider != realm.GetType() {
continue
}
ok, err := realm.Authorize(user)
log.Debugf("authorize result: %v, user: %v, err: %v, realm: %v", ok, user, err, i)
if ok && err == nil {
//return on any success, TODO, maybe merge all roles and privileges from all realms
return true, nil
}
}
roles, privilege := user.GetPermissions()
if len(roles) == 0 && len(privilege) == 0 {
if global.Env().IsDebug {
log.Errorf("failed to authorize user: %v", user.Username)
}
return false, errors.New("no roles or privileges")
}
return false, errors.Errorf("failed to authorize user: %v", user.Username)
}

View File

@ -5,17 +5,17 @@
package alerting
import (
"infini.sh/console/core"
"infini.sh/console/core/security/enum"
"infini.sh/framework/core/api"
"infini.sh/framework/core/api/rbac/enum"
)
type AlertAPI struct {
api.Handler
core.Handler
}
func (alert *AlertAPI) Init() {
api.HandleAPIMethod(api.GET, "/alerting/rule/:rule_id", alert.RequirePermission(alert.getRule,enum.PermissionAlertRuleRead))
api.HandleAPIMethod(api.GET, "/alerting/rule/:rule_id", alert.RequirePermission(alert.getRule, enum.PermissionAlertRuleRead))
api.HandleAPIMethod(api.POST, "/alerting/rule", alert.RequirePermission(alert.createRule, enum.PermissionAlertRuleWrite))
api.HandleAPIMethod(api.POST, "/alerting/rule/test", alert.RequireLogin(alert.sendTestMessage))
api.HandleAPIMethod(api.DELETE, "/alerting/rule/:rule_id", alert.RequirePermission(alert.deleteRule, enum.PermissionAlertRuleWrite))
@ -47,13 +47,11 @@ func (alert *AlertAPI) Init() {
api.HandleAPIMethod(api.GET, "/alerting/message/_search", alert.RequirePermission(alert.searchAlertMessage, enum.PermissionAlertMessageRead))
api.HandleAPIMethod(api.POST, "/alerting/message/_ignore", alert.RequirePermission(alert.ignoreAlertMessage, enum.PermissionAlertMessageWrite))
api.HandleAPIMethod(api.GET, "/alerting/message/_stats", alert.RequirePermission(alert.getAlertMessageStats, enum.PermissionAlertMessageRead))
api.HandleAPIMethod(api.GET, "/alerting/message/_stats", alert.RequirePermission(alert.getAlertMessageStats, enum.PermissionAlertMessageRead))
api.HandleAPIMethod(api.GET, "/alerting/message/:message_id", alert.RequirePermission(alert.getAlertMessage, enum.PermissionAlertMessageRead))
api.HandleAPIMethod(api.GET, "/alerting/message/:message_id/notification", alert.getMessageNotificationInfo)
//just for test
//api.HandleAPIMethod(api.GET, "/alerting/rule/test", alert.testRule)
}

View File

@ -9,11 +9,12 @@ import (
"fmt"
log "github.com/cihub/seelog"
"github.com/r3labs/diff/v2"
"infini.sh/console/core/security"
"infini.sh/console/model/alerting"
"infini.sh/console/model/insight"
"infini.sh/console/modules/elastic/api"
alerting2 "infini.sh/console/service/alerting"
_ "infini.sh/console/service/alerting/elasticsearch"
"infini.sh/framework/core/api/rbac"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/event"
@ -24,7 +25,6 @@ import (
"infini.sh/framework/core/task"
"infini.sh/framework/core/util"
elastic2 "infini.sh/framework/modules/elastic"
"infini.sh/framework/modules/elastic/api"
"infini.sh/framework/modules/elastic/common"
"net/http"
"strings"
@ -32,7 +32,7 @@ import (
)
func (alertAPI *AlertAPI) createRule(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
rules := []alerting.Rule{}
rules := []alerting.Rule{}
err := alertAPI.DecodeJSON(req, &rules)
if err != nil {
log.Error(err)
@ -41,7 +41,7 @@ func (alertAPI *AlertAPI) createRule(w http.ResponseWriter, req *http.Request, p
}, http.StatusInternalServerError)
return
}
user, err := rbac.FromUserContext(req.Context())
user, err := security.FromUserContext(req.Context())
if err != nil {
log.Error(err)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
@ -69,7 +69,7 @@ func (alertAPI *AlertAPI) createRule(w http.ResponseWriter, req *http.Request, p
ids = append(ids, rule.ID)
rule.Created = time.Now()
rule.Updated = time.Now()
if rule.Schedule.Interval == ""{
if rule.Schedule.Interval == "" {
rule.Schedule.Interval = "1m"
}
//filter empty metric group
@ -93,19 +93,19 @@ func (alertAPI *AlertAPI) createRule(w http.ResponseWriter, req *http.Request, p
}, http.StatusInternalServerError)
return
}
saveAlertActivity("alerting_rule_change", "create", util.MapStr{
"cluster_id": rule.Resource.ID,
"rule_id": rule.ID,
saveAlertActivity("alerting_rule_change", "create", util.MapStr{
"cluster_id": rule.Resource.ID,
"rule_id": rule.ID,
"cluster_name": rule.Resource.Name,
"rule_name": rule.Name,
},nil, &rule)
"rule_name": rule.Name,
}, nil, &rule)
eng := alerting2.GetEngine(rule.Resource.Type)
if rule.Enabled {
ruleTask := task.ScheduleTask{
ID: rule.ID,
Interval: rule.Schedule.Interval,
ID: rule.ID,
Interval: rule.Schedule.Interval,
Description: rule.Metrics.Expression,
Task: eng.GenerateTask(rule),
Task: eng.GenerateTask(rule),
}
task.RegisterScheduleTask(ruleTask)
task.StartTask(ruleTask.ID)
@ -115,7 +115,7 @@ func (alertAPI *AlertAPI) createRule(w http.ResponseWriter, req *http.Request, p
alertAPI.WriteJSON(w, util.MapStr{
"result": "created",
"ids": ids,
"ids": ids,
}, http.StatusOK)
}
func (alertAPI *AlertAPI) getRule(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -125,7 +125,7 @@ func (alertAPI *AlertAPI) getRule(w http.ResponseWriter, req *http.Request, ps h
_, err := orm.Get(&obj)
if err != nil {
if errors.Is(err, elastic2.ErrNotFound){
if errors.Is(err, elastic2.ErrNotFound) {
alertAPI.WriteJSON(w, util.MapStr{
"_id": id,
"found": false,
@ -166,7 +166,7 @@ func (alertAPI *AlertAPI) getRuleDetail(w http.ResponseWriter, req *http.Request
exists, err := orm.Get(&obj)
if !exists || err != nil {
if errors.Is(err, elastic2.ErrNotFound){
if errors.Is(err, elastic2.ErrNotFound) {
alertAPI.WriteJSON(w, util.MapStr{
"_id": id,
"found": false,
@ -182,7 +182,7 @@ func (alertAPI *AlertAPI) getRuleDetail(w http.ResponseWriter, req *http.Request
expression, _ := cond.GenerateConditionExpression()
obj.Conditions.Items[i].Expression = strings.ReplaceAll(expression, "result", metricExpression)
}
alertNumbers, err := alertAPI.getRuleAlertMessageNumbers([]string{obj.ID})
alertNumbers, err := alertAPI.getRuleAlertMessageNumbers([]string{obj.ID})
if err != nil {
log.Error(err)
alertAPI.WriteJSON(w, util.MapStr{
@ -222,12 +222,12 @@ func (alertAPI *AlertAPI) getRuleDetail(w http.ResponseWriter, req *http.Request
}
q := &orm.Query{
WildcardIndex: true,
RawQuery: util.MustToJSONBytes(queryDSL),
RawQuery: util.MustToJSONBytes(queryDSL),
}
err, result := orm.Search(alerting.AlertMessage{}, q)
if err != nil {
log.Error(err)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
var state interface{} = "N/A"
@ -259,7 +259,7 @@ func (alertAPI *AlertAPI) getRuleDetail(w http.ResponseWriter, req *http.Request
err, result = orm.Search(alerting.Channel{}, q)
if err != nil {
log.Error(err)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
chm := map[string]alerting.Channel{}
@ -296,25 +296,25 @@ func (alertAPI *AlertAPI) getRuleDetail(w http.ResponseWriter, req *http.Request
}
detailObj := util.MapStr{
"rule_name": obj.Name,
"resource_name": obj.Resource.Name,
"resource_id": obj.Resource.ID,
"resource_objects": obj.Resource.Objects,
"resource_time_field": obj.Resource.TimeField,
"resource_raw_filter": obj.Resource.RawFilter,
"metrics": obj.Metrics,
"bucket_size": obj.Metrics.BucketSize, //统计周期
"updated": obj.Updated,
"conditions": obj.Conditions,
"message_count": alertNumbers[obj.ID], //所有关联告警消息数(包括已恢复的)
"state": state,
"enabled": obj.Enabled,
"created": obj.Created,
"creator": obj.Creator,
"tags": obj.Tags,
"alerting_message": alertingMessageItem,
"expression": obj.Metrics.Expression,
"notification_config": obj.NotificationConfig,
"rule_name": obj.Name,
"resource_name": obj.Resource.Name,
"resource_id": obj.Resource.ID,
"resource_objects": obj.Resource.Objects,
"resource_time_field": obj.Resource.TimeField,
"resource_raw_filter": obj.Resource.RawFilter,
"metrics": obj.Metrics,
"bucket_size": obj.Metrics.BucketSize, //统计周期
"updated": obj.Updated,
"conditions": obj.Conditions,
"message_count": alertNumbers[obj.ID], //所有关联告警消息数(包括已恢复的)
"state": state,
"enabled": obj.Enabled,
"created": obj.Created,
"creator": obj.Creator,
"tags": obj.Tags,
"alerting_message": alertingMessageItem,
"expression": obj.Metrics.Expression,
"notification_config": obj.NotificationConfig,
"recovery_notification_config": obj.RecoveryNotificationConfig,
}
@ -322,7 +322,7 @@ func (alertAPI *AlertAPI) getRuleDetail(w http.ResponseWriter, req *http.Request
}
func saveActivity(activityInfo *event.Activity){
func saveActivity(activityInfo *event.Activity) {
queueConfig := queue.GetOrInitConfig("platform##activities")
if queueConfig.Labels == nil {
queueConfig.ReplaceLabels(util.MapStr{
@ -336,7 +336,7 @@ func saveActivity(activityInfo *event.Activity){
Timestamp: time.Now(),
Metadata: event.EventMetadata{
Category: "elasticsearch",
Name: "activity",
Name: "activity",
},
Fields: util.MapStr{
"activity": activityInfo,
@ -346,16 +346,16 @@ func saveActivity(activityInfo *event.Activity){
}
}
func saveAlertActivity(name, typ string, labels map[string]interface{}, changelog diff.Changelog, oldState interface{}){
func saveAlertActivity(name, typ string, labels map[string]interface{}, changelog diff.Changelog, oldState interface{}) {
activityInfo := &event.Activity{
ID: util.GetUUID(),
ID: util.GetUUID(),
Timestamp: time.Now(),
Metadata: event.ActivityMetadata{
Category: "elasticsearch",
Group: "platform",
Name: name,
Type: typ,
Labels: labels,
Group: "platform",
Name: name,
Type: typ,
Labels: labels,
},
Changelog: changelog,
Fields: util.MapStr{
@ -382,8 +382,7 @@ func (alertAPI *AlertAPI) updateRule(w http.ResponseWriter, req *http.Request, p
id = oldRule.ID
create := oldRule.Created
rule := &alerting.Rule{
}
rule := &alerting.Rule{}
err = alertAPI.DecodeJSON(req, rule)
if err != nil {
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
@ -421,12 +420,12 @@ func (alertAPI *AlertAPI) updateRule(w http.ResponseWriter, req *http.Request, p
log.Error(err)
return
}
saveAlertActivity("alerting_rule_change", "update", util.MapStr{
"cluster_id": rule.Resource.ID,
"rule_id": rule.ID,
"rule_name": rule.Name,
saveAlertActivity("alerting_rule_change", "update", util.MapStr{
"cluster_id": rule.Resource.ID,
"rule_id": rule.ID,
"rule_name": rule.Name,
"cluster_name": rule.Resource.Name,
},changeLog, oldRule)
}, changeLog, oldRule)
if rule.Enabled {
exists, err = checkResourceExists(rule)
@ -449,7 +448,7 @@ func (alertAPI *AlertAPI) updateRule(w http.ResponseWriter, req *http.Request, p
}
task.RegisterScheduleTask(ruleTask)
task.StartTask(ruleTask.ID)
}else{
} else {
task.DeleteTask(id)
}
@ -459,10 +458,10 @@ func (alertAPI *AlertAPI) updateRule(w http.ResponseWriter, req *http.Request, p
}, 200)
}
func clearKV(ruleID string){
func clearKV(ruleID string) {
_ = kv.DeleteKey(alerting2.KVLastNotificationTime, []byte(ruleID))
_ = kv.DeleteKey(alerting2.KVLastEscalationTime, []byte(ruleID))
_ = kv.DeleteKey(alerting2.KVLastMessageState,[]byte(ruleID))
_ = kv.DeleteKey(alerting2.KVLastMessageState, []byte(ruleID))
}
func (alertAPI *AlertAPI) deleteRule(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -487,12 +486,12 @@ func (alertAPI *AlertAPI) deleteRule(w http.ResponseWriter, req *http.Request, p
log.Error(err)
return
}
saveAlertActivity("alerting_rule_change", "delete", util.MapStr{
"cluster_id": obj.Resource.ID,
"rule_id": obj.ID,
saveAlertActivity("alerting_rule_change", "delete", util.MapStr{
"cluster_id": obj.Resource.ID,
"rule_id": obj.ID,
"cluster_name": obj.Resource.Name,
"rule_name": obj.Name,
},nil, &obj)
"rule_name": obj.Name,
}, nil, &obj)
task.DeleteTask(obj.ID)
clearKV(obj.ID)
@ -541,12 +540,12 @@ func (alertAPI *AlertAPI) batchDeleteRule(w http.ResponseWriter, req *http.Reque
}
var newIDs []string
for _, rule := range rules {
saveAlertActivity("alerting_rule_change", "delete", util.MapStr{
"cluster_id": rule.Resource.ID,
"rule_id": rule.ID,
saveAlertActivity("alerting_rule_change", "delete", util.MapStr{
"cluster_id": rule.Resource.ID,
"rule_id": rule.ID,
"cluster_name": rule.Resource.Name,
"rule_name": rule.Name,
},nil, &rule)
"rule_name": rule.Name,
}, nil, &rule)
task.DeleteTask(rule.ID)
clearKV(rule.ID)
newIDs = append(newIDs, rule.ID)
@ -587,16 +586,14 @@ func (alertAPI *AlertAPI) batchDeleteRule(w http.ResponseWriter, req *http.Reque
func (alertAPI *AlertAPI) searchRule(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var (
keyword = alertAPI.GetParameterOrDefault(req, "keyword", "")
from = alertAPI.GetIntOrDefault(req, "from", 0)
size = alertAPI.GetIntOrDefault(req, "size", 20)
from = alertAPI.GetIntOrDefault(req, "from", 0)
size = alertAPI.GetIntOrDefault(req, "size", 20)
)
mustQuery := []util.MapStr{
}
mustQuery := []util.MapStr{}
clusterFilter, hasAllPrivilege := alertAPI.GetClusterFilter(req, "resource.resource_id")
if !hasAllPrivilege && clusterFilter == nil {
alertAPI.WriteJSON(w, elastic.SearchResponse{
}, http.StatusOK)
alertAPI.WriteJSON(w, elastic.SearchResponse{}, http.StatusOK)
return
}
if !hasAllPrivilege {
@ -663,7 +660,7 @@ func (alertAPI *AlertAPI) searchRule(w http.ResponseWriter, req *http.Request, p
alertAPI.WriteJSON(w, searchRes, http.StatusOK)
}
func (alertAPI *AlertAPI) getRuleAlertMessageNumbers(ruleIDs []string) ( map[string]interface{},error) {
func (alertAPI *AlertAPI) getRuleAlertMessageNumbers(ruleIDs []string) (map[string]interface{}, error) {
esClient := elastic.GetClient(global.MustLookupString(elastic.GlobalSystemElasticsearchID))
queryDsl := util.MapStr{
@ -693,7 +690,7 @@ func (alertAPI *AlertAPI) getRuleAlertMessageNumbers(ruleIDs []string) ( map[str
},
}
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.AlertMessage{}), util.MustToJSONBytes(queryDsl) )
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.AlertMessage{}), util.MustToJSONBytes(queryDsl))
if err != nil {
return nil, err
}
@ -736,10 +733,10 @@ func (alertAPI *AlertAPI) fetchAlertInfos(w http.ResponseWriter, req *http.Reque
},
}
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl) )
searchRes, err := esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl))
if err != nil {
log.Error(err)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
if len(searchRes.Hits.Hits) == 0 {
@ -751,7 +748,7 @@ func (alertAPI *AlertAPI) fetchAlertInfos(w http.ResponseWriter, req *http.Reque
for _, hit := range searchRes.Hits.Hits {
if ruleID, ok := hit.Source["rule_id"].(string); ok {
latestAlertInfos[ruleID] = util.MapStr{
"status": hit.Source["state"],
"status": hit.Source["state"],
}
}
}
@ -786,10 +783,10 @@ func (alertAPI *AlertAPI) fetchAlertInfos(w http.ResponseWriter, req *http.Reque
},
},
}
searchRes, err = esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl) )
searchRes, err = esClient.SearchWithRawQueryDSL(orm.GetWildcardIndexName(alerting.Alert{}), util.MustToJSONBytes(queryDsl))
if err != nil {
log.Error(err)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
return
}
for _, hit := range searchRes.Hits.Hits {
@ -862,7 +859,7 @@ func disableRule(obj *alerting.Rule) {
func (alertAPI *AlertAPI) sendTestMessage(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
typ := alertAPI.GetParameterOrDefault(req, "type", "notification")
rule := alerting.Rule{}
rule := alerting.Rule{}
err := alertAPI.DecodeJSON(req, &rule)
if err != nil {
log.Error(err)
@ -875,7 +872,7 @@ func (alertAPI *AlertAPI) sendTestMessage(w http.ResponseWriter, req *http.Reque
rule.ID = util.GetUUID()
}
eng := alerting2.GetEngine(rule.Resource.Type)
actionResults, err := eng.Test(&rule, typ)
actionResults, err := eng.Test(&rule, typ)
if err != nil {
log.Error(err)
alertAPI.WriteJSON(w, util.MapStr{
@ -942,14 +939,13 @@ func (alertAPI *AlertAPI) getPreviewMetricData(w http.ResponseWriter, req *http.
bkSize = duration.Seconds()
}
bucketSize, min, max, err := api.GetMetricRangeAndBucketSize(minStr, maxStr, int(bkSize), 15)
filterParam := &alerting.FilterParam{
Start: min,
End: max,
Start: min,
End: max,
BucketSize: fmt.Sprintf("%ds", bucketSize),
}
metricItem, _, err := getRuleMetricData(rule, filterParam)
metricItem, _, err := getRuleMetricData(rule, filterParam)
if err != nil {
log.Error(err)
alertAPI.WriteJSON(w, util.MapStr{
@ -963,7 +959,7 @@ func (alertAPI *AlertAPI) getPreviewMetricData(w http.ResponseWriter, req *http.
}
func (alertAPI *AlertAPI) getMetricData(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
rule := &alerting.Rule{
rule := &alerting.Rule{
ID: ps.ByName("rule_id"),
}
exists, err := orm.Get(rule)
@ -980,11 +976,11 @@ func (alertAPI *AlertAPI) getMetricData(w http.ResponseWriter, req *http.Request
)
bucketSize, min, max, err := api.GetMetricRangeAndBucketSize(minStr, maxStr, 60, 15)
filterParam := &alerting.FilterParam{
Start: min,
End: max,
Start: min,
End: max,
BucketSize: fmt.Sprintf("%ds", bucketSize),
}
metricItem, queryResult, err := getRuleMetricData(rule, filterParam)
metricItem, queryResult, err := getRuleMetricData(rule, filterParam)
if err != nil {
log.Error(err)
alertAPI.WriteJSON(w, util.MapStr{
@ -992,21 +988,21 @@ func (alertAPI *AlertAPI) getMetricData(w http.ResponseWriter, req *http.Request
}, http.StatusInternalServerError)
return
}
resBody := util.MapStr{
"metric": metricItem,
resBody := util.MapStr{
"metric": metricItem,
"bucket_label": rule.Metrics.BucketLabel,
}
if alertAPI.GetParameter(req, "debug") == "1" {
resBody["query"] = queryResult.Query
}
alertAPI.WriteJSON(w,resBody, http.StatusOK)
alertAPI.WriteJSON(w, resBody, http.StatusOK)
}
func getRuleMetricData( rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.AlertMetricItem, *alerting.QueryResult, error) {
func getRuleMetricData(rule *alerting.Rule, filterParam *alerting.FilterParam) (*alerting.AlertMetricItem, *alerting.QueryResult, error) {
eng := alerting2.GetEngine(rule.Resource.Type)
metricData, queryResult, err := eng.GetTargetMetricData(rule, true, filterParam)
if err != nil {
return nil,queryResult, err
return nil, queryResult, err
}
formatType := "num"
@ -1102,7 +1098,7 @@ func getRuleMetricData( rule *alerting.Rule, filterParam *alerting.FilterParam)
}
metricItem.BucketGroups = append(metricItem.BucketGroups, md.GroupValues)
metricItem.Lines = append(metricItem.Lines, &common.MetricLine{
Data: targetData,
Data: targetData,
BucketSize: filterParam.BucketSize,
Metric: common.MetricSummary{
Label: label,
@ -1112,7 +1108,7 @@ func getRuleMetricData( rule *alerting.Rule, filterParam *alerting.FilterParam)
},
})
}
return &metricItem,queryResult, nil
return &metricItem, queryResult, nil
}
func (alertAPI *AlertAPI) batchEnableRule(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -1213,7 +1209,7 @@ func (alertAPI *AlertAPI) batchDisableRule(w http.ResponseWriter, req *http.Requ
func (alertAPI *AlertAPI) searchFieldValues(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
var keyword = alertAPI.GetParameterOrDefault(req, "keyword", "")
var field = alertAPI.GetParameterOrDefault(req, "field", "category")
items , err := searchListItems(field, keyword, 20)
items, err := searchListItems(field, keyword, 20)
if err != nil {
log.Error(err)
alertAPI.WriteError(w, err.Error(), http.StatusInternalServerError)
@ -1222,7 +1218,7 @@ func (alertAPI *AlertAPI) searchFieldValues(w http.ResponseWriter, req *http.Req
alertAPI.WriteJSON(w, items, http.StatusOK)
}
func searchListItems(field, keyword string, size int) ([]string, error){
func searchListItems(field, keyword string, size int) ([]string, error) {
query := util.MapStr{
"size": 0,
"aggs": util.MapStr{
@ -1234,8 +1230,8 @@ func searchListItems(field, keyword string, size int) ([]string, error){
},
},
}
if v := strings.TrimSpace(keyword); v != ""{
query["query"]= util.MapStr{
if v := strings.TrimSpace(keyword); v != "" {
query["query"] = util.MapStr{
"query_string": util.MapStr{
"default_field": field,
"query": fmt.Sprintf("*%s*", v),
@ -1257,7 +1253,7 @@ func searchListItems(field, keyword string, size int) ([]string, error){
items := []string{}
for _, bk := range searchRes.Aggregations["items"].Buckets {
if v, ok := bk["key"].(string); ok {
if strings.Contains(v, keyword){
if strings.Contains(v, keyword) {
items = append(items, v)
}
}
@ -1265,7 +1261,7 @@ func searchListItems(field, keyword string, size int) ([]string, error){
return items, nil
}
func getRulesByID(ruleIDs []string) ([]alerting.Rule, error){
func getRulesByID(ruleIDs []string) ([]alerting.Rule, error) {
if len(ruleIDs) == 0 {
return nil, nil
}
@ -1292,4 +1288,4 @@ func getRulesByID(ruleIDs []string) ([]alerting.Rule, error){
rules = append(rules, rule)
}
return rules, nil
}
}

View File

@ -5,12 +5,13 @@
package data
import (
"infini.sh/console/core"
"infini.sh/console/core/security/enum"
"infini.sh/framework/core/api"
"infini.sh/framework/core/api/rbac/enum"
)
type DataAPI struct {
api.Handler
core.Handler
}
func InitAPI() {
@ -18,4 +19,4 @@ func InitAPI() {
api.HandleAPIMethod(api.POST, "/data/export", dataApi.RequirePermission(dataApi.exportData, enum.PermissionAlertChannelRead, enum.PermissionAlertRuleRead))
api.HandleAPIMethod(api.POST, "/data/import", dataApi.RequirePermission(dataApi.importData, enum.PermissionAlertChannelWrite, enum.PermissionAlertRuleWrite))
}
}

View File

@ -17,6 +17,7 @@ import (
type EmailAPI struct {
api.Handler
}
func InitAPI() {
email := EmailAPI{}
api.HandleAPIMethod(api.POST, "/email/server/_test", email.testEmailServer)
@ -72,4 +73,4 @@ func InitEmailServer() error {
return common.RefreshEmailServer()
}
return nil
}
}

View File

@ -4,9 +4,9 @@ import (
"fmt"
log "github.com/cihub/seelog"
"infini.sh/console/common"
"infini.sh/console/core/security"
"infini.sh/console/model"
"infini.sh/console/service"
"infini.sh/framework/core/api/rbac"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/event"
@ -51,7 +51,7 @@ func (handler APIHandler) ElasticsearchOverviewAction(w http.ResponseWriter, req
queryDsl["query"] = clusterFilter
}
user, auditLogErr := rbac.FromUserContext(req.Context())
user, auditLogErr := security.FromUserContext(req.Context())
if auditLogErr == nil && handler.GetHeader(req, "Referer", "") != "" {
auditLog, _ := model.NewAuditLogBuilderWithDefault().WithOperator(user.Username).
WithLogTypeAccess().WithResourceTypeClusterManagement().

View File

@ -1,6 +1,7 @@
package index_management
import (
"infini.sh/console/core"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/global"
"net/http"
@ -10,7 +11,6 @@ import (
"infini.sh/console/config"
model2 "infini.sh/console/model"
"infini.sh/framework/core/api"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/orm"
"infini.sh/framework/core/util"
@ -18,7 +18,7 @@ import (
type APIHandler struct {
Config *config.AppConfig
api.Handler
core.Handler
}
func (handler APIHandler) GetDictListAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
@ -117,4 +117,4 @@ func (handler APIHandler) UpdateDictItemAction(w http.ResponseWriter, req *http.
resp["payload"] = dict
handler.WriteJSON(w, resp, http.StatusOK)
}
}

View File

@ -3,9 +3,9 @@ package index_management
import (
log "github.com/cihub/seelog"
"infini.sh/console/common"
"infini.sh/console/core/security"
"infini.sh/console/model"
"infini.sh/console/service"
"infini.sh/framework/core/api/rbac"
httprouter "infini.sh/framework/core/api/router"
"infini.sh/framework/core/elastic"
"infini.sh/framework/core/radix"
@ -43,7 +43,7 @@ func (handler APIHandler) HandleGetMappingsAction(w http.ResponseWriter, req *ht
func (handler APIHandler) HandleCatIndicesAction(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
targetClusterID := ps.ByName("id")
user, auditLogErr := rbac.FromUserContext(req.Context())
user, auditLogErr := security.FromUserContext(req.Context())
if auditLogErr == nil && handler.GetHeader(req, "Referer", "") != "" {
auditLog, _ := model.NewAuditLogBuilderWithDefault().WithOperator(user.Username).
WithLogTypeAccess().WithResourceTypeClusterManagement().
@ -139,7 +139,7 @@ func (handler APIHandler) HandleCreateIndexAction(w http.ResponseWriter, req *ht
targetClusterID := ps.ByName("id")
client := elastic.GetClient(targetClusterID)
indexName := ps.ByName("index")
claims, auditLogErr := rbac.ValidateLogin(req.Header.Get("Authorization"))
claims, auditLogErr := security.ValidateLogin(req.Header.Get("Authorization"))
if auditLogErr == nil && handler.GetHeader(req, "Referer", "") != "" {
auditLog, _ := model.NewAuditLogBuilderWithDefault().WithOperator(claims.Username).
WithLogTypeOperation().WithResourceTypeClusterManagement().

View File

@ -2,6 +2,7 @@ package api
import (
"infini.sh/console/config"
"infini.sh/console/core/security/enum"
"infini.sh/console/plugin/api/alerting"
"infini.sh/console/plugin/api/data"
"infini.sh/console/plugin/api/email"
@ -12,7 +13,6 @@ import (
"infini.sh/console/plugin/api/notification"
"infini.sh/console/plugin/api/platform"
"infini.sh/framework/core/api"
"infini.sh/framework/core/api/rbac/enum"
"path"
)

View File

@ -4,10 +4,13 @@
package insight
import "infini.sh/framework/core/api"
import (
"infini.sh/console/core"
"infini.sh/framework/core/api"
)
type InsightAPI struct {
api.Handler
core.Handler
}
func InitAPI() {

Some files were not shown because too many files have changed in this diff Show More