Merge pull request 'code restructure' (#20) from slurm_zj into master

This commit is contained in:
devad 2022-10-25 14:54:38 +08:00
commit cc9eaf9cae
159 changed files with 11426 additions and 7169 deletions

3
.gitignore vendored
View File

@ -27,8 +27,7 @@ buf.lock
go.sum
# config file
aksk.conf
config.yaml
configs/tenanter.yaml
log/
/go_build_gitlink_org_cn_JCCE_PCM

View File

@ -1,64 +0,0 @@
# This files contains all configuration options for analysis running.
# More details please refer to: https://golangci-lint.run/usage/configuration/
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 5m
# which dirs to skip: issues from them won't be reported;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but default dirs are skipped independently
# from this option's value (see skip-dirs-use-default).
# "/" will be replaced by current OS file path separator to properly work
# on Windows.
skip-dirs:
- hack/tools/preferredimports # This code is directly lifted from the Kubernetes codebase, skip checking
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs-use-default: true
# One of 'readonly' and 'vendor'.
# - readonly: the go command is disallowed from the implicit automatic updating of go.mod described above.
# Instead, it fails when any changes to go.mod are needed. This setting is most useful to check
# that go.mod does not need updates, such as in a continuous integration and testing system.
# - vendor: the go command assumes that the vendor directory holds the correct copies of dependencies and ignores
# the dependency descriptions in go.mod.
modules-download-mode: readonly
linters:
enable:
# linters maintained by golang.org
- gofmt
- goimports
- govet
# linters default enabled by golangci-lint .
- deadcode
- errcheck
- gosimple
- ineffassign
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
# other linters supported by golangci-lint.
- gocyclo
- gosec
- whitespace
- revive
linters-settings:
goimports:
local-prefixes: github.com/karmada-io/karmada
gocyclo:
# minimal cyclomatic complexity to report
min-complexity: 15
issues:
# The list of ids of default excludes to include or disable. By default it's empty.
include:
# disable excluding of issues about comments from revive
# see https://golangci-lint.run/usage/configuration/#command-line-options for more info
- EXC0012
- EXC0013
- EXC0014

View File

@ -2,9 +2,7 @@
## protobuf 编译流程
```shell
buf mod update
buf build
buf generate
sh gen.sh
```
protobuf生成的文件在lan_trans下

View File

@ -0,0 +1,17 @@
version: v1
plugins:
- name: go
out: gen
opt:
- paths=source_relative
- name: go-grpc
out: gen
opt:
- paths=source_relative
- name: grpc-gateway
out: gen
opt:
- paths=source_relative
- grpc_api_configuration=idl/pod.yaml
- name: openapiv2
out: gen/openapiv2

View File

@ -1,12 +1,12 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: idl/pbpod/pod.proto
// source: idl/pod.proto
/*
Package pbpod is a reverse proxy.
Package podpb is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package pbpod
package podpb
import (
"context"
@ -269,20 +269,22 @@ func RegisterPodServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbpod.PodService/CreatePods")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pod.PodService/CreatePods", runtime.WithHTTPPathPattern("/apis/pod/createMulti"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_PodService_CreatePods_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_PodService_CreatePods_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_CreatePods_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_CreatePods_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -292,20 +294,22 @@ func RegisterPodServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbpod.PodService/CreatePod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pod.PodService/CreatePod", runtime.WithHTTPPathPattern("/apis/pod/create"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_PodService_CreatePod_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_PodService_CreatePod_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_CreatePod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_CreatePod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -315,20 +319,22 @@ func RegisterPodServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbpod.PodService/DeletePod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pod.PodService/DeletePod", runtime.WithHTTPPathPattern("/apis/pod/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_PodService_DeletePod_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_PodService_DeletePod_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_DeletePod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_DeletePod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -338,20 +344,22 @@ func RegisterPodServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbpod.PodService/UpdatePod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pod.PodService/UpdatePod", runtime.WithHTTPPathPattern("/apis/pod/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_PodService_UpdatePod_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_PodService_UpdatePod_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_UpdatePod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_UpdatePod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -361,20 +369,22 @@ func RegisterPodServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbpod.PodService/ListPodDetail")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pod.PodService/ListPodDetail", runtime.WithHTTPPathPattern("/apis/pod/detail"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_PodService_ListPodDetail_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_PodService_ListPodDetail_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_ListPodDetail_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_ListPodDetail_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -384,20 +394,22 @@ func RegisterPodServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbpod.PodService/ListPod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pod.PodService/ListPod", runtime.WithHTTPPathPattern("/apis/pod"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_PodService_ListPod_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_PodService_ListPod_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_ListPod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_ListPod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -407,20 +419,22 @@ func RegisterPodServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbpod.PodService/ListPodAll")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pod.PodService/ListPodAll", runtime.WithHTTPPathPattern("/apis/pod/all"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_PodService_ListPodAll_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_PodService_ListPodAll_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_ListPodAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_ListPodAll_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -469,19 +483,21 @@ func RegisterPodServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbpod.PodService/CreatePods")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pod.PodService/CreatePods", runtime.WithHTTPPathPattern("/apis/pod/createMulti"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PodService_CreatePods_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_PodService_CreatePods_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_CreatePods_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_CreatePods_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -489,19 +505,21 @@ func RegisterPodServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbpod.PodService/CreatePod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pod.PodService/CreatePod", runtime.WithHTTPPathPattern("/apis/pod/create"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PodService_CreatePod_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_PodService_CreatePod_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_CreatePod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_CreatePod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -509,19 +527,21 @@ func RegisterPodServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbpod.PodService/DeletePod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pod.PodService/DeletePod", runtime.WithHTTPPathPattern("/apis/pod/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PodService_DeletePod_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_PodService_DeletePod_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_DeletePod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_DeletePod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -529,19 +549,21 @@ func RegisterPodServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbpod.PodService/UpdatePod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pod.PodService/UpdatePod", runtime.WithHTTPPathPattern("/apis/pod/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PodService_UpdatePod_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_PodService_UpdatePod_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_UpdatePod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_UpdatePod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -549,19 +571,21 @@ func RegisterPodServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbpod.PodService/ListPodDetail")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pod.PodService/ListPodDetail", runtime.WithHTTPPathPattern("/apis/pod/detail"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PodService_ListPodDetail_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_PodService_ListPodDetail_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_ListPodDetail_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_ListPodDetail_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -569,19 +593,21 @@ func RegisterPodServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbpod.PodService/ListPod")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pod.PodService/ListPod", runtime.WithHTTPPathPattern("/apis/pod"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PodService_ListPod_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_PodService_ListPod_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_ListPod_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_ListPod_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -589,19 +615,21 @@ func RegisterPodServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbpod.PodService/ListPodAll")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pod.PodService/ListPodAll", runtime.WithHTTPPathPattern("/apis/pod/all"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PodService_ListPodAll_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_PodService_ListPodAll_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_PodService_ListPodAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_PodService_ListPodAll_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -615,7 +643,7 @@ var (
pattern_PodService_DeletePod_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "pod", "delete"}, ""))
pattern_PodService_UpdatePod_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "pod", "update"}, ""))
pattern_PodService_UpdatePod_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "pod", "delete"}, ""))
pattern_PodService_ListPodDetail_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "pod", "detail"}, ""))

View File

@ -2,9 +2,9 @@
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc (unknown)
// source: idl/pbpod/pod.proto
// source: idl/pod.proto
package pbpod
package podpb
import (
context "context"
@ -48,7 +48,7 @@ func NewPodServiceClient(cc grpc.ClientConnInterface) PodServiceClient {
func (c *podServiceClient) CreatePods(ctx context.Context, in *CreatePodsReq, opts ...grpc.CallOption) (*CreatePodsResp, error) {
out := new(CreatePodsResp)
err := c.cc.Invoke(ctx, "/pbpod.PodService/CreatePods", in, out, opts...)
err := c.cc.Invoke(ctx, "/pod.PodService/CreatePods", in, out, opts...)
if err != nil {
return nil, err
}
@ -57,7 +57,7 @@ func (c *podServiceClient) CreatePods(ctx context.Context, in *CreatePodsReq, op
func (c *podServiceClient) CreatePod(ctx context.Context, in *CreatePodReq, opts ...grpc.CallOption) (*CreatePodResp, error) {
out := new(CreatePodResp)
err := c.cc.Invoke(ctx, "/pbpod.PodService/CreatePod", in, out, opts...)
err := c.cc.Invoke(ctx, "/pod.PodService/CreatePod", in, out, opts...)
if err != nil {
return nil, err
}
@ -66,7 +66,7 @@ func (c *podServiceClient) CreatePod(ctx context.Context, in *CreatePodReq, opts
func (c *podServiceClient) DeletePod(ctx context.Context, in *DeletePodReq, opts ...grpc.CallOption) (*DeletePodResp, error) {
out := new(DeletePodResp)
err := c.cc.Invoke(ctx, "/pbpod.PodService/DeletePod", in, out, opts...)
err := c.cc.Invoke(ctx, "/pod.PodService/DeletePod", in, out, opts...)
if err != nil {
return nil, err
}
@ -75,7 +75,7 @@ func (c *podServiceClient) DeletePod(ctx context.Context, in *DeletePodReq, opts
func (c *podServiceClient) UpdatePod(ctx context.Context, in *UpdatePodReq, opts ...grpc.CallOption) (*UpdatePodResp, error) {
out := new(UpdatePodResp)
err := c.cc.Invoke(ctx, "/pbpod.PodService/UpdatePod", in, out, opts...)
err := c.cc.Invoke(ctx, "/pod.PodService/UpdatePod", in, out, opts...)
if err != nil {
return nil, err
}
@ -84,7 +84,7 @@ func (c *podServiceClient) UpdatePod(ctx context.Context, in *UpdatePodReq, opts
func (c *podServiceClient) ListPodDetail(ctx context.Context, in *ListPodDetailReq, opts ...grpc.CallOption) (*ListPodDetailResp, error) {
out := new(ListPodDetailResp)
err := c.cc.Invoke(ctx, "/pbpod.PodService/ListPodDetail", in, out, opts...)
err := c.cc.Invoke(ctx, "/pod.PodService/ListPodDetail", in, out, opts...)
if err != nil {
return nil, err
}
@ -93,7 +93,7 @@ func (c *podServiceClient) ListPodDetail(ctx context.Context, in *ListPodDetailR
func (c *podServiceClient) ListPod(ctx context.Context, in *ListPodReq, opts ...grpc.CallOption) (*ListPodResp, error) {
out := new(ListPodResp)
err := c.cc.Invoke(ctx, "/pbpod.PodService/ListPod", in, out, opts...)
err := c.cc.Invoke(ctx, "/pod.PodService/ListPod", in, out, opts...)
if err != nil {
return nil, err
}
@ -102,7 +102,7 @@ func (c *podServiceClient) ListPod(ctx context.Context, in *ListPodReq, opts ...
func (c *podServiceClient) ListPodAll(ctx context.Context, in *ListPodAllReq, opts ...grpc.CallOption) (*ListPodResp, error) {
out := new(ListPodResp)
err := c.cc.Invoke(ctx, "/pbpod.PodService/ListPodAll", in, out, opts...)
err := c.cc.Invoke(ctx, "/pod.PodService/ListPodAll", in, out, opts...)
if err != nil {
return nil, err
}
@ -178,7 +178,7 @@ func _PodService_CreatePods_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbpod.PodService/CreatePods",
FullMethod: "/pod.PodService/CreatePods",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PodServiceServer).CreatePods(ctx, req.(*CreatePodsReq))
@ -196,7 +196,7 @@ func _PodService_CreatePod_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbpod.PodService/CreatePod",
FullMethod: "/pod.PodService/CreatePod",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PodServiceServer).CreatePod(ctx, req.(*CreatePodReq))
@ -214,7 +214,7 @@ func _PodService_DeletePod_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbpod.PodService/DeletePod",
FullMethod: "/pod.PodService/DeletePod",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PodServiceServer).DeletePod(ctx, req.(*DeletePodReq))
@ -232,7 +232,7 @@ func _PodService_UpdatePod_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbpod.PodService/UpdatePod",
FullMethod: "/pod.PodService/UpdatePod",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PodServiceServer).UpdatePod(ctx, req.(*UpdatePodReq))
@ -250,7 +250,7 @@ func _PodService_ListPodDetail_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbpod.PodService/ListPodDetail",
FullMethod: "/pod.PodService/ListPodDetail",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PodServiceServer).ListPodDetail(ctx, req.(*ListPodDetailReq))
@ -268,7 +268,7 @@ func _PodService_ListPod_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbpod.PodService/ListPod",
FullMethod: "/pod.PodService/ListPod",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PodServiceServer).ListPod(ctx, req.(*ListPodReq))
@ -286,7 +286,7 @@ func _PodService_ListPodAll_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbpod.PodService/ListPodAll",
FullMethod: "/pod.PodService/ListPodAll",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PodServiceServer).ListPodAll(ctx, req.(*ListPodAllReq))
@ -298,7 +298,7 @@ func _PodService_ListPodAll_Handler(srv interface{}, ctx context.Context, dec fu
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var PodService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "pbpod.PodService",
ServiceName: "pod.PodService",
HandlerType: (*PodServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
@ -331,5 +331,5 @@ var PodService_ServiceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
Metadata: "idl/pbpod/pod.proto",
Metadata: "idl/pod.proto",
}

View File

@ -0,0 +1,316 @@
{
"swagger": "2.0",
"info": {
"title": "idl/pod.proto",
"version": "version not set"
},
"tags": [
{
"name": "PodService"
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {},
"definitions": {
"podCloudProvider": {
"type": "string",
"enum": [
"ali",
"tencent",
"huawei",
"k8s",
"harvester"
],
"default": "ali",
"description": "- ali: 0 - 阿里云\n - tencent: 1 - 腾讯云\n - huawei: 2 - 华为云\n - k8s: 3 - K8S\n - harvester: 3 - Harvester",
"title": "云提供商"
},
"podCreatePodReq": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/podCloudProvider"
},
"accountName": {
"type": "string",
"title": "账号名称"
},
"podId": {
"type": "string",
"title": "实例id"
},
"podName": {
"type": "string",
"title": "实例名称"
},
"regionId": {
"type": "integer",
"format": "int32",
"title": "地域,数据中心"
},
"containerImage": {
"type": "string",
"title": "镜像"
},
"containerName": {
"type": "string",
"title": "容器名称"
},
"cpuPod": {
"type": "string",
"title": "v cpu数"
},
"memoryPod": {
"type": "string",
"title": "内存MB"
},
"securityGroupId": {
"type": "string",
"title": "安全组ID 对应腾讯 SecurityGroupIds(腾讯必需)"
},
"subnetId": {
"type": "string",
"title": "子网ID 对应腾讯 SubnetId(腾讯必需)"
},
"vpcId": {
"type": "string",
"title": "VPC ID 对应腾讯 VpcId(腾讯必需)"
},
"namespace": {
"type": "string",
"title": "名空间"
},
"requestSource": {
"type": "string",
"title": "请求源,如果是从pcm sdk 过来的则单独生成tenanters实体"
}
}
},
"podCreatePodResp": {
"type": "object",
"properties": {
"finished": {
"type": "boolean",
"title": "查询是否完成,如果为否-false则可以将下面三个分页参数填入到请求中继续查询"
},
"requestId": {
"type": "string",
"title": "请求id出现问题后提供给云厂商排查问题"
},
"podId": {
"type": "string",
"title": "podId"
},
"podName": {
"type": "string",
"title": "podName"
}
}
},
"podCreatePodsResp": {
"type": "object",
"properties": {
"finished": {
"type": "boolean",
"title": "查询是否完成,如果为否-false则可以将下面三个分页参数填入到请求中继续查询"
},
"requestId": {
"type": "array",
"items": {
"type": "string"
},
"title": "请求id出现问题后提供给云厂商排查问题"
}
}
},
"podDeletePodResp": {
"type": "object",
"properties": {
"finished": {
"type": "boolean",
"title": "删除是否完成"
},
"requestId": {
"type": "string",
"title": "请求id出现问题后提供给云厂商排查问题"
},
"podId": {
"type": "string",
"title": "podId"
},
"podName": {
"type": "string",
"title": "podName"
}
}
},
"podListPodDetailResp": {
"type": "object",
"properties": {
"pods": {
"type": "array",
"items": {
"$ref": "#/definitions/podPodInstance"
},
"title": "Pod集合"
},
"finished": {
"type": "boolean",
"title": "查询是否完成,如果为否-false则可以将下面三个分页参数填入到请求中继续查询"
},
"pageNumber": {
"type": "integer",
"format": "int32",
"title": "分页相关参数,页码"
},
"pageSize": {
"type": "integer",
"format": "int32",
"title": "分页相关参数,每页数量"
},
"nextToken": {
"type": "string",
"title": "分页相关参数下一页的token"
},
"requestId": {
"type": "string",
"title": "请求id出现问题后提供给云厂商排查问题"
}
}
},
"podListPodResp": {
"type": "object",
"properties": {
"pods": {
"type": "array",
"items": {
"$ref": "#/definitions/podPodInstance"
},
"title": "pod list"
}
}
},
"podPodInstance": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/podCloudProvider"
},
"accountName": {
"type": "string",
"title": "账号名称"
},
"pcmId": {
"type": "string",
"title": "pcm id"
},
"podId": {
"type": "string",
"title": "实例id"
},
"podName": {
"type": "string",
"title": "实例名称"
},
"regionId": {
"type": "integer",
"format": "int32",
"title": "地域ID"
},
"regionName": {
"type": "string",
"title": "地域Name"
},
"containerImage": {
"type": "string",
"title": "镜像"
},
"containerName": {
"type": "string",
"title": "容器名称"
},
"cpuPod": {
"type": "string",
"title": "vcpu数"
},
"memoryPod": {
"type": "string",
"title": "内存MB"
},
"securityGroupId": {
"type": "string",
"title": "安全组ID 对应腾讯 SecurityGroupIds(腾讯必需)"
},
"subnetId": {
"type": "string",
"title": "子网ID 对应腾讯 SubnetId(腾讯必需)"
},
"vpcId": {
"type": "string",
"title": "VPC ID 对应腾讯 VpcId(腾讯必需)"
},
"namespace": {
"type": "string",
"title": "名空间"
},
"status": {
"type": "string",
"title": "实例状态"
}
}
},
"podUpdatePodResp": {
"type": "object",
"properties": {
"finished": {
"type": "boolean",
"title": "更新是否完成"
},
"requestId": {
"type": "string",
"title": "请求id出现问题后提供给云厂商排查问题"
},
"podId": {
"type": "string",
"title": "podId"
},
"podName": {
"type": "string",
"title": "podName"
}
}
},
"protobufAny": {
"type": "object",
"properties": {
"@type": {
"type": "string"
}
},
"additionalProperties": {}
},
"rpcStatus": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
}
}
}

View File

@ -1,14 +1,33 @@
syntax = "proto3";
package pbpod;
package pod;
option go_package = "code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod";
option go_package = "/podpb";
import "idl/pbtenant/tenant.proto";
import "google/api/annotations.proto";
//
enum CloudProvider {
// 0 -
ali = 0;
// 1 -
tencent = 1;
// 2 -
huawei = 2;
// 3 - K8S
k8s = 3;
// 3 - Harvester
harvester = 4;
}
message Region {
//id
int32 id = 1;
//name
string name = 2;
}
message PodInstance {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
//pcm id
@ -56,7 +75,7 @@ message CreatePodsResp {
message CreatePodReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
// id
@ -98,7 +117,7 @@ message CreatePodResp {
message DeletePodReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
// pcm id
@ -128,7 +147,7 @@ message DeletePodResp {
message UpdatePodReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
// pcm ID
@ -170,7 +189,7 @@ message UpdatePodResp {
message ListPodDetailReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Id tenant.proto
@ -207,7 +226,7 @@ message ListPodDetailResp {
message ListPodReq {
// cloud name
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string namespace =2;
//,pcm sdk tenanters实体
@ -221,13 +240,13 @@ message ListPodResp {
message GetPodRegionReq {
// cloud name
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
}
message GetPodRegionResp {
// region list
repeated pbtenant.Region regions = 1;
repeated Region regions = 1;
}
@ -241,55 +260,23 @@ message ListPodAllReq{}
service PodService {
// Pods
rpc CreatePods(CreatePodsReq) returns (CreatePodsResp) {
option (google.api.http) = {
post : "/apis/pod/createMulti"
body : "*"
};
}
rpc CreatePods(CreatePodsReq) returns (CreatePodsResp);
// Pod
rpc CreatePod(CreatePodReq) returns (CreatePodResp) {
option (google.api.http) = {
post : "/apis/pod/create"
body : "*"
};
}
rpc CreatePod(CreatePodReq) returns (CreatePodResp);
// Pod
rpc DeletePod(DeletePodReq) returns (DeletePodResp) {
option (google.api.http) = {
post : "/apis/pod/delete"
body : "*"
};
}
rpc DeletePod(DeletePodReq) returns (DeletePodResp);
// Pod
rpc UpdatePod(UpdatePodReq) returns (UpdatePodResp) {
option (google.api.http) = {
put : "/apis/pod/update"
body : "*"
};
}
rpc UpdatePod(UpdatePodReq) returns (UpdatePodResp);
// Pod明细
rpc ListPodDetail(ListPodDetailReq) returns (ListPodDetailResp) {
option (google.api.http) = {
get : "/apis/pod/detail"
};
}
rpc ListPodDetail(ListPodDetailReq) returns (ListPodDetailResp);
// Pod全量 -
rpc ListPod(ListPodReq) returns (ListPodResp) {
option (google.api.http) = {
get : "/apis/pod"
};
}
rpc ListPod(ListPodReq) returns (ListPodResp);
// Pod
rpc ListPodAll(ListPodAllReq) returns (ListPodResp) {
option (google.api.http) = {
get : "/apis/pod/all"
};
}
rpc ListPodAll(ListPodAllReq) returns (ListPodResp);
}

View File

@ -0,0 +1,23 @@
type: google.api.Service
config_version: 3
http:
rules:
- selector: pod.PodService.CreatePods
post: "/apis/pod/createMulti"
body: "*"
- selector: pod.PodService.CreatePod
post: "/apis/pod/create"
body: "*"
- selector: pod.PodService.DeletePod
post: "/apis/pod/delete"
body: "*"
- selector: pod.PodService.UpdatePod
put: "/apis/pod/delete"
body: "*"
- selector: pod.PodService.ListPodDetail
get: "/apis/pod/detail"
- selector: pod.PodService.ListPod
get: "/apis/pod"
- selector: pod.PodService.ListPodAll
get: "/apis/pod/all"

58
adaptor/pcm_pod/main.go Normal file
View File

@ -0,0 +1,58 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
podserver "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/config"
"code.gitlink.org.cn/JCCE/PCM.git/common/global"
"code.gitlink.org.cn/JCCE/PCM.git/common/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"flag"
"github.com/golang/glog"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"log"
)
func main() {
var configFile string
flag.StringVar(&configFile, "conf", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
if err := tenanter.LoadCloudConfigsFromFile(configFile); err != nil {
if !errors.Is(err, tenanter.ErrLoadTenanterFileEmpty) {
glog.Fatalf("LoadCloudConfigsFromFile error %+v", err)
}
glog.Warningf("LoadCloudConfigsFromFile empty file path %s", configFile)
}
glog.Infof("load tenant from file finished")
// 初始化配置以及数据库
config.InitConfig()
// 新建一个zap logger实例
logger, err := zap.NewDevelopment()
if err != nil {
log.Fatalf("cannot create logger: %v", err)
}
if err != nil {
logger.Fatal("cannot open private key", zap.Error(err))
}
if err != nil {
logger.Fatal("cannot read private key", zap.Error(err))
}
if err != nil {
logger.Fatal("cannot parse private key", zap.Error(err))
}
// logger.Sugar().Fatal 直接可以少几行判错的代码
logger.Sugar().Fatal(server.RunGRPCServer(&server.GRPCConfig{
Name: "pcm_pod",
Addr: global.S.PodInfo.Address,
RegisterFunc: func(g *grpc.Server) {
podpb.RegisterPodServiceServer(g, &podserver.Server{})
},
Logger: logger,
}))
}

View File

@ -1,10 +1,10 @@
package ali
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/server"
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"errors"
"flag"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
@ -17,9 +17,9 @@ import (
// api document: https://help.aliyun.com/api/eci/createcontainergroup.html
func CreateContainerGroup(request *CreateContainerGroupRequest) (response *CreateContainerGroupResponse, err error) {
provider := pbtenant.CloudProvider(request.ProviderId)
provider := pbpod.CloudProvider(request.ProviderId)
var configFile string
flag.StringVar(&configFile, "confAli", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "confAli", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -31,7 +31,7 @@ func CreateContainerGroup(request *CreateContainerGroupRequest) (response *Creat
}
glog.Infof("load tenant from file finished")
tenanters, err := tenanter.GetTenanters(provider)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(provider))
var regionId int32
var cpuPod string
var memoryPod string
@ -62,7 +62,7 @@ func CreateContainerGroup(request *CreateContainerGroupRequest) (response *Creat
requestPCM := &pbpod.CreatePodReq{
RequestSource: "ali",
Provider: provider,
Provider: pbpod.CloudProvider(provider),
AccountName: tenanters[0].AccountName(),
PodName: request.ContainerGroupName,
RegionId: regionId,

View File

@ -16,10 +16,9 @@
package ali
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/server"
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"errors"
"flag"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
@ -31,9 +30,9 @@ import (
// api document: https://help.aliyun.com/api/eci/deletecontainergroup.html
func DeleteContainerGroup(request *DeleteContainerGroupRequest) (response *DeleteContainerGroupResponse, err error) {
provider := pbtenant.CloudProvider(request.ProviderId)
provider := pbpod.CloudProvider(request.ProviderId)
var configFile string
flag.StringVar(&configFile, "confAli", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "confAli", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -63,7 +62,7 @@ func DeleteContainerGroup(request *DeleteContainerGroupRequest) (response *Delet
requestPCM := &pbpod.DeletePodReq{
RequestSource: "ali",
Provider: provider,
Provider: pbpod.CloudProvider(provider),
AccountName: request.AccountName,
PcmId: pcmId,
Namespace: request.Namespace,

View File

@ -1,9 +1,8 @@
package ali
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/server"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
@ -12,7 +11,7 @@ import (
// api document: https://help.aliyun.com/api/eci/describecontainergroups.html
func DescribeContainerGroups(request *DescribeContainerGroupsRequest) (response *DescribeContainerGroupsResponse, err error) {
provider := pbtenant.CloudProvider(request.ProviderId)
provider := pbpod.CloudProvider(request.ProviderId)
containerGroups := make([]DescribeContainerGroupsContainerGroup0, 0)
//trans Ali request to PCM request
requestPCM := &pbpod.ListPodReq{
@ -23,7 +22,7 @@ func DescribeContainerGroups(request *DescribeContainerGroupsRequest) (response
resp, err := server.ListPod(nil, requestPCM)
//trans PCM response pod set to Ali ContainerGroup set
//trans PCM response pcm_pod set to Ali ContainerGroup set
for k := range resp.Pods {
podId := resp.Pods[k].PodId
podName := resp.Pods[k].PodName

View File

@ -16,10 +16,9 @@
package ali
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/server"
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"errors"
"flag"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
@ -31,9 +30,9 @@ import (
// api document: https://help.aliyun.com/api/eci/updatecontainergroup.html
func UpdateContainerGroup(request *UpdateContainerGroupRequest) (response *UpdateContainerGroupResponse, err error) {
provider := pbtenant.CloudProvider(request.ProviderId)
provider := pbpod.CloudProvider(request.ProviderId)
var configFile string
flag.StringVar(&configFile, "confAli", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "confAli", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -60,7 +59,7 @@ func UpdateContainerGroup(request *UpdateContainerGroupRequest) (response *Updat
requestPCM := &pbpod.UpdatePodReq{
RequestSource: "ali",
Provider: provider,
Provider: pbpod.CloudProvider(provider),
AccountName: request.AccountName,
PcmId: request.PcmId,
PodId: request.ContainerGroupId,

View File

@ -17,10 +17,11 @@ limitations under the License.
// Package v1 contains API types that are common to all versions.
//
// The package contains two categories of types:
// - external (serialized) types that lack their own version (e.g TypeMeta)
// - internal (never-serialized) types that are needed by several different
// api groups, and so live here, to avoid duplication and/or import loops
// (e.g. LabelSelector).
// - external (serialized) types that lack their own version (e.g TypeMeta)
// - internal (never-serialized) types that are needed by several different
// api groups, and so live here, to avoid duplication and/or import loops
// (e.g. LabelSelector).
//
// In the future, we will probably move these categories of objects into
// separate packages.
package v1
@ -208,10 +209,10 @@ type ObjectMeta struct {
// finalizers list is empty. As long as the finalizers list contains items, deletion is blocked.
// Once the deletionTimestamp is set, this value may not be unset or be set further into the
// future, although it may be shortened or the resource may be deleted prior to this time.
// For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react
// by sending a graceful termination signal to the containers in the pod. After that 30 seconds,
// For example, a user may request that a pcm_pod is deleted in 30 seconds. The Kubelet will react
// by sending a graceful termination signal to the containers in the pcm_pod. After that 30 seconds,
// the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup,
// remove the pod from the API. In the presence of network partitions, this object may still
// remove the pcm_pod from the API. In the presence of network partitions, this object may still
// exist after this timestamp, until an administrator or automated process can determine the
// resource is fully terminated.
// If not set, graceful deletion of the object has not been requested.
@ -1376,17 +1377,18 @@ type PartialObjectMetadataList struct {
// Condition contains details for one aspect of the current state of this API Resource.
// ---
// This struct is intended for direct use as an array at the field path .status.conditions. For example,
// type FooStatus struct{
// // Represents the observations of a foo's current state.
// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
// // +patchMergeKey=type
// // +patchStrategy=merge
// // +listType=map
// // +listMapKey=type
// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
//
// // other fields
// }
// type FooStatus struct{
// // Represents the observations of a foo's current state.
// // Known .status.conditions.type are: "Available", "Progressing", and "Degraded"
// // +patchMergeKey=type
// // +patchStrategy=merge
// // +listType=map
// // +listMapKey=type
// Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
//
// // other fields
// }
type Condition struct {
// type of condition in CamelCase or in foo.example.com/CamelCase.
// ---

View File

@ -19,9 +19,9 @@ limitations under the License.
package kubernetes
import (
corev1 "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server/kubernetes/client-go/kubernetes/typed/core/v1"
"fmt"
corev1 "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/server/kubernetes/client-go/kubernetes/typed/core/v1"
discovery "k8s.io/client-go/discovery"
admissionregistrationv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1"
admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1"

View File

@ -1,12 +1,13 @@
package v1
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/server"
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/server/kubernetes/api/core/v1"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"context"
json "encoding/json"
"encoding/json"
"errors"
"flag"
"fmt"
@ -15,12 +16,11 @@ import (
"strconv"
"time"
v1 "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/server/kubernetes/api/core/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
corev1 "k8s.io/client-go/applyconfigurations/core/v1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
)
// PodsGetter has a method to return a PodInterface.
@ -62,7 +62,7 @@ func newPods(c *CoreV1Client, namespace string) *pods {
}
}
// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
// Get takes name of the pcm_pod, and returns the corresponding pcm_pod object, and an error if there is any.
func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) {
result = &v1.Pod{}
err = c.client.Get().
@ -85,12 +85,12 @@ func (c *pods) List(ctx context.Context, pod *v1.Pod, opts metav1.ListOptions) (
requestPCM := &pbpod.ListPodReq{
RequestSource: "huawei",
Provider: provider,
Provider: pbpod.CloudProvider(provider),
Namespace: "pcm",
}
resp, err := server.ListPod(nil, requestPCM)
//trans PCM response pod set to Ali ContainerGroup set
//trans PCM response pcm_pod set to Ali ContainerGroup set
var pods = make([]v1.Pod, len(resp.Pods))
for k := range resp.Pods {
podId := resp.Pods[k].PodId
@ -142,13 +142,13 @@ func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interf
Watch(ctx)
}
// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any.
// Create takes the representation of a pcm_pod and creates it. Returns the server's representation of the pcm_pod, and an error, if there is any.
func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) {
result = &v1.Pod{}
provider := pbtenant.CloudProvider(pod.Spec.ProviderId)
var configFile string
flag.StringVar(&configFile, "conf", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "conf", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -187,7 +187,7 @@ func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOption
requestPCM := &pbpod.CreatePodReq{
RequestSource: "huawei",
Provider: provider,
Provider: pbpod.CloudProvider(provider),
AccountName: tenanters[0].AccountName(),
PodName: pod.Name,
RegionId: regionId,
@ -235,13 +235,13 @@ func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOption
return
}
// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
// Update takes the representation of a pcm_pod and updates it. Returns the server's representation of the pcm_pod, and an error, if there is any.
func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
result = &v1.Pod{}
provider := pbtenant.CloudProvider(pod.Spec.ProviderId)
var configFile string
flag.StringVar(&configFile, "confHuawei", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "confHuawei", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -280,7 +280,7 @@ func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOption
requestPCM := &pbpod.UpdatePodReq{
RequestSource: "huawei",
Provider: provider,
Provider: pbpod.CloudProvider(provider),
PcmId: string(pod.ObjectMeta.UID),
PodId: string(pod.ObjectMeta.UID),
AccountName: tenanters[0].AccountName(),
@ -373,12 +373,12 @@ func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.Update
return
}
// Delete takes name of the pod and deletes it. Returns an error if one occurs.
// Delete takes name of the pcm_pod and deletes it. Returns an error if one occurs.
func (c *pods) Delete(ctx context.Context, namespace string, providerId int32, regionName string, accountName string, pcmId string, opts metav1.DeleteOptions) error {
provider := pbtenant.CloudProvider(providerId)
var configFile string
flag.StringVar(&configFile, "confHuawei", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "confHuawei", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -404,7 +404,7 @@ func (c *pods) Delete(ctx context.Context, namespace string, providerId int32, r
requestPCM := &pbpod.DeletePodReq{
RequestSource: "huawei",
Provider: provider,
Provider: pbpod.CloudProvider(provider),
AccountName: accountName,
PcmId: pcmId,
Namespace: namespace,
@ -436,7 +436,7 @@ func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions,
Error()
}
// Patch applies the patch and returns the patched pod.
// Patch applies the patch and returns the patched pcm_pod.
func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) {
result = &v1.Pod{}
err = c.client.Patch(pt).
@ -451,10 +451,10 @@ func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data
return
}
// Apply takes the given apply declarative configuration, applies it and returns the applied pod.
// Apply takes the given apply declarative configuration, applies it and returns the applied pcm_pod.
func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) {
if pod == nil {
return nil, fmt.Errorf("pod provided to Apply must not be nil")
return nil, fmt.Errorf("pcm_pod provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(pod)
@ -463,7 +463,7 @@ func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opt
}
name := pod.Name
if name == nil {
return nil, fmt.Errorf("pod.Name must be provided to Apply")
return nil, fmt.Errorf("pcm_pod.Name must be provided to Apply")
}
result = &v1.Pod{}
err = c.client.Patch(types.ApplyPatchType).
@ -481,7 +481,7 @@ func (c *pods) Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opt
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error) {
if pod == nil {
return nil, fmt.Errorf("pod provided to Apply must not be nil")
return nil, fmt.Errorf("pcm_pod provided to Apply must not be nil")
}
patchOpts := opts.ToPatchOptions()
data, err := json.Marshal(pod)
@ -491,7 +491,7 @@ func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguratio
name := pod.Name
if name == nil {
return nil, fmt.Errorf("pod.Name must be provided to Apply")
return nil, fmt.Errorf("pcm_pod.Name must be provided to Apply")
}
result = &v1.Pod{}
@ -507,7 +507,7 @@ func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguratio
return
}
// GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any.
// GetEphemeralContainers takes name of the pcm_pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any.
func (c *pods) GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) {
result = &v1.EphemeralContainers{}
err = c.client.Get().

View File

@ -35,7 +35,7 @@ type PodExpansion interface {
ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper
}
// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored).
// Bind applies the provided binding to the named pcm_pod in the current namespace (binding.Namespace is ignored).
func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error {
return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error()
}
@ -44,12 +44,12 @@ func (c *pods) Evict(ctx context.Context, eviction *policy.Eviction) error {
return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
}
// Get constructs a request for getting the logs for a pod
// Get constructs a request for getting the logs for a pcm_pod
func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request {
return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec)
}
// ProxyGet returns a response of the pod by calling it through the proxy.
// ProxyGet returns a response of the pcm_pod by calling it through the proxy.
func (c *pods) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper {
request := c.client.Get().
Namespace(c.ns).

View File

@ -0,0 +1,91 @@
package server
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"context"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type Server struct {
podpb.UnimplementedPodServiceServer
}
// GetProdRegions get available region for product
func (s *Server) GetProdRegions(ctx context.Context, req *podpb.GetPodRegionReq) (*podpb.GetPodRegionResp, error) {
resp, err := GetPodRegion(ctx, req)
if err != nil {
glog.Errorf("CreatePods error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// CreatePods create multiple pcm_pod on multiple clouds
func (s *Server) CreatePods(ctx context.Context, req *podpb.CreatePodsReq) (*podpb.CreatePodsResp, error) {
resp, err := CreatePods(ctx, req)
if err != nil {
glog.Errorf("CreatePods error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// CreatePod create pcm_pod on one cloud
func (s *Server) CreatePod(ctx context.Context, req *podpb.CreatePodReq) (*podpb.CreatePodResp, error) {
resp, err := CreatePod(ctx, req)
if err != nil {
glog.Errorf("CreatePod error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// DeletePod delete specified pcm_pod
func (s *Server) DeletePod(ctx context.Context, req *podpb.DeletePodReq) (*podpb.DeletePodResp, error) {
resp, err := DeletePod(ctx, req)
if err != nil {
glog.Errorf("DeletePod error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// UpdatePod update specified pcm_pod
func (s *Server) UpdatePod(ctx context.Context, req *podpb.UpdatePodReq) (*podpb.UpdatePodResp, error) {
resp, err := UpdatePod(ctx, req)
if err != nil {
glog.Errorf("UpdatePod error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
func (s *Server) ListPodDetail(ctx context.Context, req *podpb.ListPodDetailReq) (*podpb.ListPodDetailResp, error) {
resp, err := ListPodDetail(ctx, req)
if err != nil {
glog.Errorf("ListPodDetail error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
func (s *Server) ListPod(ctx context.Context, req *podpb.ListPodReq) (*podpb.ListPodResp, error) {
resp, err := ListPod(ctx, req)
if err != nil {
glog.Errorf("ListPod error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
func (s *Server) ListPodAll(ctx context.Context, req *podpb.ListPodAllReq) (*podpb.ListPodResp, error) {
resp, err := ListPodAll(ctx)
if err != nil {
glog.Errorf("ListPodAll error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}

View File

@ -1,36 +1,35 @@
package server
import (
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/service"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"context"
"flag"
"fmt"
"sync"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pod/service"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"github.com/golang/glog"
"github.com/pkg/errors"
)
// GetPodRegion get the available region for pod
// GetPodRegion get the available region for pcm_pod
func GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionReq) (resp *pbpod.GetPodRegionResp, err error) {
var (
regionInit tenanter.Region
regions []*pbtenant.Region
regions []*pbpod.Region
)
switch req.GetProvider() {
case pbtenant.CloudProvider_ali:
regionInit, _ = tenanter.NewRegion(req.GetProvider(), 2)
case pbtenant.CloudProvider_tencent:
regionInit, _ = tenanter.NewRegion(req.GetProvider(), 5)
case pbtenant.CloudProvider_huawei:
regionInit, _ = tenanter.NewRegion(req.GetProvider(), 5)
case pbpod.CloudProvider_ali:
regionInit, _ = tenanter.NewRegion(pbtenant.CloudProvider(req.GetProvider()), 2)
case pbpod.CloudProvider_tencent:
regionInit, _ = tenanter.NewRegion(pbtenant.CloudProvider(req.GetProvider()), 5)
case pbpod.CloudProvider_huawei:
regionInit, _ = tenanter.NewRegion(pbtenant.CloudProvider(req.GetProvider()), 5)
}
tenanters, err := tenanter.GetTenanters(req.GetProvider())
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.GetProvider()))
if err != nil {
return nil, errors.WithMessage(err, "getTenanters error")
}
@ -69,7 +68,7 @@ func CreatePods(ctx context.Context, req *pbpod.CreatePodsReq) (*pbpod.CreatePod
defer wg.Done()
resp, err := CreatePod(ctx, reqPod)
if err != nil || resp == nil {
fmt.Println(errors.Wrap(err, "Batch pod creation error"))
fmt.Println(errors.Wrap(err, "Batch pcm_pod creation error"))
return
}
c <- resp.RequestId
@ -100,12 +99,12 @@ func CreatePod(ctx context.Context, req *pbpod.CreatePodReq) (*pbpod.CreatePodRe
pod poder.Poder
)
tenanters, err := tenanter.GetTenanters(req.Provider)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
if err != nil {
return nil, errors.WithMessage(err, "getTenanters error")
}
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.WithMessagef(err, "provider %v regionId %v", req.Provider, req.RegionId)
}
@ -129,7 +128,7 @@ func DeletePod(ctx context.Context, req *pbpod.DeletePodReq) (*pbpod.DeletePodRe
//pcm adk过来的请求需要从用户本地读取配置文件
if len(req.RequestSource) > 0 {
var configFile string
flag.StringVar(&configFile, "conf", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "conf", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -142,14 +141,14 @@ func DeletePod(ctx context.Context, req *pbpod.DeletePodReq) (*pbpod.DeletePodRe
glog.Infof("load tenant from file finished")
}
tenanters, err := tenanter.GetTenanters(req.Provider)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
if err != nil {
return nil, errors.WithMessage(err, "getTenanters error")
}
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.WithMessagef(err, "provider %v regionId %v", req.Provider, req.RegionId)
return nil, errors.WithMessagef(err, "provider %v regionId %v", pbtenant.CloudProvider(req.Provider), req.RegionId)
}
for _, tenant := range tenanters {
@ -171,7 +170,7 @@ func UpdatePod(ctx context.Context, req *pbpod.UpdatePodReq) (*pbpod.UpdatePodRe
//pcm adk过来的请求需要从用户本地读取配置文件
if len(req.RequestSource) > 0 {
var configFile string
flag.StringVar(&configFile, "conf", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "conf", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -184,12 +183,12 @@ func UpdatePod(ctx context.Context, req *pbpod.UpdatePodReq) (*pbpod.UpdatePodRe
glog.Infof("load tenant from file finished")
}
tenanters, err := tenanter.GetTenanters(req.Provider)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
if err != nil {
return nil, errors.WithMessage(err, "getTenanters error")
}
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.WithMessagef(err, "provider %v regionId %v", req.Provider, req.RegionId)
}
@ -211,12 +210,12 @@ func ListPodDetail(ctx context.Context, req *pbpod.ListPodDetailReq) (*pbpod.Lis
pod poder.Poder
)
tenanters, err := tenanter.GetTenanters(req.Provider)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
if err != nil {
return nil, errors.WithMessage(err, "getTenanters error")
}
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.WithMessagef(err, "provider %v regionId %v", req.Provider, req.RegionId)
}
@ -243,7 +242,7 @@ func ListPod(ctx context.Context, req *pbpod.ListPodReq) (*pbpod.ListPodResp, er
//pcm adk过来的请求需要从用户本地读取配置文件
if len(req.RequestSource) > 0 {
var configFile string
flag.StringVar(&configFile, "conf", "configs/config.yaml", "config.yaml")
flag.StringVar(&configFile, "conf", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
@ -257,7 +256,7 @@ func ListPod(ctx context.Context, req *pbpod.ListPodReq) (*pbpod.ListPodResp, er
glog.Infof("load tenant from file finished")
}
tenanters, _ = tenanter.GetTenanters(req.Provider)
tenanters, _ = tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
//get the available region for product
reqPodRegion := &pbpod.GetPodRegionReq{Provider: req.GetProvider()}
@ -316,14 +315,14 @@ func ListPodAll(ctx context.Context) (*pbpod.ListPodResp, error) {
pods []*pbpod.PodInstance
)
wg.Add(len(pbtenant.CloudProvider_name))
for k := range pbtenant.CloudProvider_name {
wg.Add(len(pbpod.CloudProvider_name))
for k := range pbpod.CloudProvider_name {
go func(provider int32) {
defer wg.Done()
//针对私有K8S集群调用listAll时默认只查询ListPodDetailReq namespace下的pod
if provider == 3 {
resp, err := ListPod(ctx, &pbpod.ListPodReq{Provider: pbtenant.CloudProvider(provider), Namespace: "pcm"})
resp, err := ListPod(ctx, &pbpod.ListPodReq{Provider: pbpod.CloudProvider(provider), Namespace: "pcm"})
if err != nil {
glog.Errorf("List error %v", err)
return
@ -332,7 +331,7 @@ func ListPodAll(ctx context.Context) (*pbpod.ListPodResp, error) {
pods = append(pods, resp.Pods...)
mutex.Unlock()
} else {
resp, err := ListPod(ctx, &pbpod.ListPodReq{Provider: pbtenant.CloudProvider(provider)})
resp, err := ListPod(ctx, &pbpod.ListPodReq{Provider: pbpod.CloudProvider(provider)})
if err != nil {
glog.Errorf("List error %v", err)
return

View File

@ -1,6 +1,7 @@
package poder
import (
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"context"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
alieci "github.com/aliyun/alibaba-cloud-sdk-go/services/eci"
@ -10,8 +11,6 @@ import (
"sync"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"github.com/pkg/errors"
)
@ -25,7 +24,7 @@ type AliEci struct {
func (eci *AliEci) GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionReq) (*pbpod.GetPodRegionResp, error) {
regions := make([]*pbtenant.Region, 0)
regions := make([]*pbpod.Region, 0)
requestRegion := requests.NewCommonRequest()
requestRegion.Method = "POST"
@ -49,7 +48,7 @@ func (eci *AliEci) GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionReq)
regionsJson := respRegion.Get("Regions").GetIndex(i)
regionName, _ := regionsJson.Get("RegionId").String()
regionId, _ := tenanter.GetAliRegionId(regionName)
regionPod := &pbtenant.Region{
regionPod := &pbpod.Region{
Id: regionId,
Name: regionName,
}
@ -110,7 +109,7 @@ func (eci *AliEci) CreatePod(ctx context.Context, req *pbpod.CreatePodReq) (*pbp
return &pbpod.CreatePodResp{
Finished: isFinished,
RequestId: "Create Ali pod request ID:" + resp.RequestId,
RequestId: "Create Ali pcm_pod request ID:" + resp.RequestId,
PodId: resp.ContainerGroupId,
PodName: req.PodName,
}, nil
@ -133,7 +132,7 @@ func (eci *AliEci) DeletePod(ctx context.Context, req *pbpod.DeletePodReq) (*pbp
return &pbpod.DeletePodResp{
Finished: isFinished,
RequestId: "Delete Ali pod request ID:" + resp.RequestId,
RequestId: "Delete Ali pcm_pod request ID:" + resp.RequestId,
PodId: req.PodId,
PodName: req.PodName,
}, nil
@ -164,7 +163,7 @@ func (eci *AliEci) UpdatePod(ctx context.Context, req *pbpod.UpdatePodReq) (*pbp
return &pbpod.UpdatePodResp{
Finished: isFinished,
RequestId: "Update Ali pod request ID:" + resp.RequestId,
RequestId: "Update Ali pcm_pod request ID:" + resp.RequestId,
PodId: req.PodId,
PodName: req.PodName,
}, nil
@ -182,7 +181,7 @@ func (eci *AliEci) ListPodDetail(ctx context.Context, req *pbpod.ListPodDetailRe
var ecies = make([]*pbpod.PodInstance, len(resp.ContainerGroups))
for k, v := range resp.ContainerGroups {
ecies[k] = &pbpod.PodInstance{
Provider: pbtenant.CloudProvider_ali,
Provider: pbpod.CloudProvider_ali,
AccountName: eci.tenanter.AccountName(),
PcmId: v.ContainerGroupId,
PodId: v.ContainerGroupId,

View File

@ -1,6 +1,7 @@
package poder
import (
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"context"
"fmt"
util "github.com/alibabacloud-go/tea-utils/service"
@ -12,9 +13,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
corev1 "k8s.io/api/core/v1"
huaweicci "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@ -35,10 +33,10 @@ type HuaweiCci struct {
func (cci *HuaweiCci) GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionReq) (*pbpod.GetPodRegionResp, error) {
//todo
var (
regions []*pbtenant.Region
regions []*pbpod.Region
)
huaweiRegionName, _ := tenanter.GetHuaweiRegionName(5)
region := &pbtenant.Region{
region := &pbpod.Region{
Id: 5,
Name: huaweiRegionName,
}
@ -50,7 +48,7 @@ func (cci *HuaweiCci) GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionR
return resp, nil
}
//CCI auth through iam
// CCI auth through iam
const (
apiVersion = "client.authentication.k8s.io/v1beta1"
iamEndpoint = "https://iam.myhuaweicloud.com"
@ -87,7 +85,7 @@ func newHuaweiCciClient(region tenanter.Region, tenant tenanter.Tenanter) (Poder
}
if err != nil {
return nil, errors.Wrap(err, "init huawei pod client error")
return nil, errors.Wrap(err, "init huawei pcm_pod client error")
}
return &HuaweiCci{
@ -140,7 +138,7 @@ func (cci *HuaweiCci) CreatePod(ctx context.Context, req *pbpod.CreatePodReq) (*
return &pbpod.CreatePodResp{
Finished: isFinished,
RequestId: "Create huawei pod request ID:" + resp.GenerateName,
RequestId: "Create huawei pcm_pod request ID:" + resp.GenerateName,
PodId: string(resp.Generation),
PodName: resp.Name,
}, nil
@ -160,7 +158,7 @@ func (cci *HuaweiCci) DeletePod(ctx context.Context, req *pbpod.DeletePodReq) (*
return &pbpod.DeletePodResp{
Finished: isFinished,
RequestId: "Delete huawei pod request ID:" + req.PodName,
RequestId: "Delete huawei pcm_pod request ID:" + req.PodName,
PodId: req.PodName,
PodName: req.PodName,
}, nil
@ -184,7 +182,7 @@ func (cci *HuaweiCci) UpdatePod(ctx context.Context, req *pbpod.UpdatePodReq) (*
}
pod.Spec.Containers[0].Image = req.ContainerImage
resp, err := cci.cli.CoreV1().Pods(req.Namespace).Update(context.TODO(), &pod, metav1.UpdateOptions{})
glog.Info("Huawei update pod resp", resp)
glog.Info("Huawei update pcm_pod resp", resp)
if err != nil {
return nil, errors.Wrap(err, "Huaweiyun UpdatePod error")
}
@ -198,7 +196,7 @@ func (cci *HuaweiCci) UpdatePod(ctx context.Context, req *pbpod.UpdatePodReq) (*
return &pbpod.UpdatePodResp{
Finished: isFinished,
RequestId: "Update huawei pod request ID:" + resp.GenerateName,
RequestId: "Update huawei pcm_pod request ID:" + resp.GenerateName,
PodId: string(resp.Generation),
PodName: resp.Name,
}, nil
@ -211,11 +209,11 @@ func (cci *HuaweiCci) ListPodDetail(ctx context.Context, req *pbpod.ListPodDetai
if err != nil {
return nil, err
}
glog.Info("Huaweiyun ListDetail pod success", resp.Items)
glog.Info("Huaweiyun ListDetail pcm_pod success", resp.Items)
var pods = make([]*pbpod.PodInstance, len(resp.Items))
for k, v := range resp.Items {
pods[k] = &pbpod.PodInstance{
Provider: pbtenant.CloudProvider_huawei,
Provider: pbpod.CloudProvider_huawei,
AccountName: cci.tenanter.AccountName(),
PcmId: v.Name,
PodId: string(v.GetUID()),

View File

@ -1,9 +1,8 @@
package poder
import (
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"context"
"fmt"
"github.com/golang/glog"
@ -33,10 +32,10 @@ type K8SPoder struct {
func (k K8SPoder) GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionReq) (*pbpod.GetPodRegionResp, error) {
//todo
var (
regions []*pbtenant.Region
regions []*pbpod.Region
)
huaweiRegionName, _ := tenanter.GetK8SRegionName(0)
region := &pbtenant.Region{
region := &pbpod.Region{
Id: 0,
Name: huaweiRegionName,
}
@ -126,7 +125,7 @@ func (k *K8SPoder) CreatePod(ctx context.Context, req *pbpod.CreatePodReq) (*pbp
return &pbpod.CreatePodResp{
Finished: isFinished,
RequestId: "K8S pod Name:" + resp.Name,
RequestId: "K8S pcm_pod Name:" + resp.Name,
PodId: string(resp.UID),
PodName: resp.Name,
}, nil
@ -148,7 +147,7 @@ func (k K8SPoder) DeletePod(ctx context.Context, req *pbpod.DeletePodReq) (*pbpo
return &pbpod.DeletePodResp{
Finished: isFinished,
RequestId: "K8S pod Name:" + req.PodName,
RequestId: "K8S pcm_pod Name:" + req.PodName,
PodId: req.PodName,
PodName: req.PodName,
}, nil
@ -185,7 +184,7 @@ func (k K8SPoder) UpdatePod(ctx context.Context, req *pbpod.UpdatePodReq) (*pbpo
return &pbpod.UpdatePodResp{
Finished: isFinished,
RequestId: "K8S pod Name:" + req.PodName,
RequestId: "K8S pcm_pod Name:" + req.PodName,
PodId: string(resp.UID),
PodName: req.PodName,
}, nil
@ -201,7 +200,7 @@ func (k K8SPoder) ListPodDetail(ctx context.Context, req *pbpod.ListPodDetailReq
var pods = make([]*pbpod.PodInstance, len(resp.Items))
for k, v := range resp.Items {
pods[k] = &pbpod.PodInstance{
Provider: pbtenant.CloudProvider_k8s,
Provider: pbpod.CloudProvider_k8s,
AccountName: req.AccountName,
PcmId: v.Name,
PodId: string(v.GetUID()),

View File

@ -1,9 +1,8 @@
package poder
import (
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"context"
"github.com/golang/glog"
@ -11,8 +10,8 @@ import (
)
var (
ErrPodListNotSupported = errors.New("cloud not supported pod list")
ErrPoderPanic = errors.New("pod init panic")
ErrPodListNotSupported = errors.New("cloud not supported pcm_pod list")
ErrPoderPanic = errors.New("pcm_pod init panic")
)
type Poder interface {
@ -23,7 +22,7 @@ type Poder interface {
GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionReq) (*pbpod.GetPodRegionResp, error)
}
func NewPodClient(provider pbtenant.CloudProvider, region tenanter.Region, tenant tenanter.Tenanter) (poder Poder, err error) {
func NewPodClient(provider pbpod.CloudProvider, region tenanter.Region, tenant tenanter.Tenanter) (poder Poder, err error) {
// 部分sdk会在内部panic
defer func() {
if err1 := recover(); err1 != nil {
@ -33,13 +32,13 @@ func NewPodClient(provider pbtenant.CloudProvider, region tenanter.Region, tenan
}()
switch provider {
case pbtenant.CloudProvider_ali:
case pbpod.CloudProvider_ali:
return newAliEciClient(region, tenant)
case pbtenant.CloudProvider_tencent:
case pbpod.CloudProvider_tencent:
return newTencentEksClient(region, tenant)
case pbtenant.CloudProvider_huawei:
case pbpod.CloudProvider_huawei:
return newHuaweiCciClient(region, tenant)
case pbtenant.CloudProvider_k8s:
case pbpod.CloudProvider_k8s:
return newK8SClient(tenant)
//TODO aws
//case pbtenant.CloudProvider_aws:

View File

@ -1,16 +1,14 @@
package poder
import (
pbpod "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_pod/gen/idl"
"context"
"strconv"
"sync"
"github.com/golang/glog"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbpod"
"github.com/pkg/errors"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
@ -27,7 +25,7 @@ type TencentEks struct {
func (eks TencentEks) GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionReq) (*pbpod.GetPodRegionResp, error) {
regions := make([]*pbtenant.Region, 0)
regions := make([]*pbpod.Region, 0)
request := tencenteks.NewDescribeEKSContainerInstanceRegionsRequest()
resp, err := eks.cli.DescribeEKSContainerInstanceRegions(request)
@ -37,7 +35,7 @@ func (eks TencentEks) GetPodRegion(ctx context.Context, req *pbpod.GetPodRegionR
for _, eksRegion := range resp.Response.Regions {
regionId, _ := tenanter.GetTencentRegionId(*eksRegion.RegionName)
regionPod := &pbtenant.Region{
regionPod := &pbpod.Region{
Id: regionId,
Name: *eksRegion.RegionName,
}
@ -122,7 +120,7 @@ func (eks TencentEks) CreatePod(ctx context.Context, req *pbpod.CreatePodReq) (*
return &pbpod.CreatePodResp{
Finished: isFinished,
RequestId: "tencent pod create request id:" + *resp.Response.RequestId,
RequestId: "tencent pcm_pod create request id:" + *resp.Response.RequestId,
PodId: *resp.Response.EksCiIds[0],
PodName: req.PodName,
}, nil
@ -145,7 +143,7 @@ func (eks *TencentEks) DeletePod(ctx context.Context, req *pbpod.DeletePodReq) (
return &pbpod.DeletePodResp{
Finished: isFinished,
RequestId: "tencent pod delete request id:" + *resp.Response.RequestId,
RequestId: "tencent pcm_pod delete request id:" + *resp.Response.RequestId,
PodId: req.PodId,
PodName: req.PodName,
}, nil
@ -172,7 +170,7 @@ func (eks *TencentEks) UpdatePod(ctx context.Context, req *pbpod.UpdatePodReq) (
return &pbpod.UpdatePodResp{
Finished: isFinished,
RequestId: "tencent pod update request id:" + *resp.Response.RequestId,
RequestId: "tencent pcm_pod update request id:" + *resp.Response.RequestId,
PodId: req.PodId,
PodName: req.PodName,
}, nil
@ -182,12 +180,12 @@ func (eks TencentEks) ListPodDetail(ctx context.Context, req *pbpod.ListPodDetai
request := tencenteks.NewDescribeEKSContainerInstancesRequest()
resp, err := eks.cli.DescribeEKSContainerInstances(request)
if err != nil {
return nil, errors.Wrap(err, "Tencent ListDetail pod error")
return nil, errors.Wrap(err, "Tencent ListDetail pcm_pod error")
}
var ekspods = make([]*pbpod.PodInstance, len(resp.Response.EksCis))
for k, v := range resp.Response.EksCis {
ekspods[k] = &pbpod.PodInstance{
Provider: pbtenant.CloudProvider_tencent,
Provider: pbpod.CloudProvider_tencent,
AccountName: eks.tenanter.AccountName(),
PcmId: *v.EksCiId,
PodId: *v.EksCiId,

View File

@ -0,0 +1,17 @@
version: v1
plugins:
- name: go
out: gen
opt:
- paths=source_relative
- name: go-grpc
out: gen
opt:
- paths=source_relative
- name: grpc-gateway
out: gen
opt:
- paths=source_relative
- grpc_api_configuration=idl/slurm.yaml
- name: openapiv2
out: gen/openapiv2

View File

@ -0,0 +1,13 @@
version: v1
name: buf.build/JCCE/PCM
breaking:
use:
- FILE
lint:
use:
- DEFAULT
# ignore:
# - google/type/datetime.proto
deps:
- buf.build/googleapis/googleapis
- buf.build/grpc-ecosystem/grpc-gateway

View File

@ -0,0 +1,3 @@
package slurm
//todo

View File

@ -0,0 +1,23 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/extra"
"fmt"
)
import "os"
import "strconv"
func main() {
if len(os.Args) < 2 {
fmt.Printf("Please specify Job ID\n")
return
}
id, _ := strconv.Atoi(os.Args[1])
fmt.Printf("try to cancel %d\n", id)
err := extra.Cancel_job(uint32(id))
if err != nil {
fmt.Printf(err.Error())
}
}

View File

@ -0,0 +1,28 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/extra"
"fmt"
)
import "os"
import "strconv"
func main() {
if len(os.Args) < 2 {
fmt.Printf("Please specify Job ID\n")
return
}
id, _ := strconv.Atoi(os.Args[1])
jobs, err := extra.Get_job_info_accounting(uint32(id))
if err != nil {
fmt.Printf(err.Error())
return
}
fmt.Printf("JobId\tuser\taccount\tstate\t\tJobName\n")
for i := range jobs {
fmt.Printf("%d\t%s\t%s\t%s\t%s\n", jobs[i].JobId, jobs[i].User, jobs[i].Account, jobs[i].State, jobs[i].JobName)
}
}

View File

@ -0,0 +1,21 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
"fmt"
)
func main() {
job_list := job_info.Get_all_jobs()
fmt.Printf("Found %d jobs \n", job_list.Record_count)
/* a little bit nicer */
fmt.Printf("Id\tName\t\tPartion\tUser\tRuntime\tStatus\t\t(Reason)\tNodes\tPriority\n")
fmt.Printf("________________________________________________________________________________________________\n")
for i := range job_list.Job_list {
job := job_list.Job_list[i]
fmt.Printf("%d\t%s\t%s\t %s\t%s\t%s\t%s\t%d\n",
job.Job_id, job.Name, job.Partition, job_info.Get_job_runtime(job).String(), job.Job_stateS,
job_info.Reason_to_string(job.State_reason), job.Nodes, job.Priority)
}
}

View File

@ -0,0 +1,37 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/nodeinfo"
"fmt"
)
func main() {
node_list := node_info.Get_all_nodes()
fmt.Printf("Found %d nodes \n", node_list.Record_count)
for i := range node_list.Node_list {
node := node_list.Node_list[i]
fmt.Println("Node_hostname: " + node.Node_hostname)
fmt.Println("State: " + node_info.State_to_string(node.Node_state))
fmt.Println("Arch: " + node.Arch)
fmt.Println("Boards: " + string(node.Boards))
fmt.Println("Boot_time: " + string(node.Boot_time))
fmt.Println("Cores: " + string(node.Cores))
fmt.Println("Cpu_load: " + string(node.Cpu_load))
fmt.Println("Cpus: " + string(node.Cpus))
fmt.Println("Features: " + node.Features)
fmt.Println("Gres: " + node.Gres)
fmt.Println("Node_addr: " + node.Node_addr)
fmt.Println("Node_hostname: " + node.Node_hostname)
fmt.Println("Os: " + node.Os)
fmt.Println("Real_memory: " + string(node.Real_memory))
fmt.Println("Reason_time: " + string(node.Reason_time))
fmt.Println("Node_state: " + string(node.Node_state))
fmt.Println("Reason_uid: " + string(node.Reason_uid))
fmt.Println("Slurmd_start_time: " + string(node.Slurmd_start_time))
fmt.Println("Sockets: " + string(node.Sockets))
fmt.Println("Threads: " + string(node.Threads))
fmt.Println("Tmp_disk: " + string(node.Tmp_disk))
fmt.Println("Weight: " + string(node.Weight))
}
}

View File

@ -0,0 +1,17 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
"fmt"
)
func main() {
version := int(0)
var config slurm.Ctl_conf
version = slurm.Version()
fmt.Printf("Version is %s\n", slurm.VersionString(version))
config = slurm.GetConfig()
slurm.Print_Ctl_conf(config)
}

View File

@ -0,0 +1,40 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
"fmt"
"os"
"strconv"
)
func main() {
if len(os.Args) < 2 {
fmt.Printf("Please specify Job ID\n")
return
}
id, _ := strconv.Atoi(os.Args[1])
job_list := job_info.Get_job(uint32(id))
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n", msg)
return
}
for i := range job_list.Job_list {
job_info.Print_Job_info(job_list.Job_list[i])
}
fmt.Printf("Id\tName\t\tPartion\tUser\tRuntime\tStatus\t\t(Reason)\tNodes\tPriority\n")
fmt.Printf("________________________________________________________________________________________________\n")
for i := range job_list.Job_list {
job := job_list.Job_list[i]
fmt.Printf("%d\t%s\t%s %s\t%s\t%s\t%s\t%d\n",
job.Job_id, job.Name, job.Partition, job_info.Get_job_runtime(job).String(), job.Job_stateS,
job_info.Reason_to_string(job.State_reason), job.Nodes, job.Priority)
}
end_time := job_info.Get_job_endtime(uint32(id))
fmt.Printf("End-Time: %s\n", end_time)
}

View File

@ -0,0 +1,39 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/nodeinfo"
"fmt"
)
func main() {
/* name := os.Args[1]
fmt.Println(string(name))*/
node_list := node_info.Get_node_info("localhost")
fmt.Println(string(node_list.Record_count))
for i := range node_list.Node_list {
node := node_list.Node_list[i]
fmt.Println("Node_hostname: " + node.Node_hostname)
fmt.Println("State: " + node_info.State_to_string(node.Node_state))
fmt.Println("Arch: " + node.Arch)
fmt.Println("Boards: " + string(node.Boards))
fmt.Println("Boot_time: " + string(node.Boot_time))
fmt.Println("Cores: " + string(node.Cores))
fmt.Println("Cpu_load: " + string(node.Cpu_load))
fmt.Println("Cpus: " + string(node.Cpus))
fmt.Println("Features: " + node.Features)
fmt.Println("Gres: " + node.Gres)
fmt.Println("Node_addr: " + node.Node_addr)
fmt.Println("Node_hostname: " + node.Node_hostname)
fmt.Println("Os: " + node.Os)
fmt.Println("Real_memory: " + string(node.Real_memory))
fmt.Println("Reason_time: " + string(node.Reason_time))
fmt.Println("Node_state: " + string(node.Node_state))
fmt.Println("Reason_uid: " + string(node.Reason_uid))
fmt.Println("Slurmd_start_time: " + string(node.Slurmd_start_time))
fmt.Println("Sockets: " + string(node.Sockets))
fmt.Println("Threads: " + string(node.Threads))
fmt.Println("Tmp_disk: " + string(node.Tmp_disk))
fmt.Println("Weight: " + string(node.Weight))
}
}

View File

@ -0,0 +1,21 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/partitioninfo"
"fmt"
)
func main() {
partition_list := partition_info.Get_partitions()
fmt.Printf("Found %d partions \n", partition_list.Record_count)
for i := range partition_list.Partition_list {
partition := partition_list.Partition_list[i]
fmt.Printf("%s\t %s\t %d\t %d\n", partition.Name, partition.Nodes, partition.Max_time, partition.Node_inx)
fmt.Println("partitionName: " + partition.Name)
fmt.Println("partitionNodes: " + partition.Nodes)
fmt.Println(partition.Max_time)
fmt.Println(partition.Node_inx)
}
}

View File

@ -0,0 +1,33 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
"fmt"
"os"
)
func main() {
if len(os.Args) < 2 {
fmt.Printf("Please specify username\n")
return
}
name := os.Args[1]
job_list := job_info.Get_user_jobs(name)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n", msg)
return
}
fmt.Printf("Id\tName\t\tPartion\tUser\tRuntime\tStatus\t\t(Reason)\tNodes\tPriority\n")
fmt.Printf("________________________________________________________________________________________________\n")
for i := range job_list.Job_list {
job := job_list.Job_list[i]
fmt.Printf("%d\t%s\t%s %s\t%s\t%s\t%s\t%d\n",
job.Job_id, job.Name, job.Partition, job_info.Get_job_runtime(job).String(), job.Job_stateS,
job_info.Reason_to_string(job.State_reason), job.Nodes, job.Priority)
}
}

View File

@ -0,0 +1,109 @@
# Submission of jobs
This folder shows in a few more examples of how jobs can be submitted in Slurm. Some examples use containers.
Attention: The parameters for job names and partitions probably have to be adjusted!
# Simple Jobs
## submit_job.go
In this example, a simple bash-Jobs is submitted. The used partition is *long* (adapt probably).
```
job_desc.Partition="long"
```
The job sets two environment variables and executes a
```
hostname
env | grep SLUM
```
On a single node of the cluster (single task job).
The application does not wait until the hob is completed, but dirctly returns.
The (std) output is written to
out-jobid.txt, the std- error to err-jobid.txt
```
job_desc.Std_out = ("./out-%j.txt")
job_desc.Std_err = ("./err-%j.txt")
````
## update_job.go
This example allows to update the qos and the partition a job is running on. This can help to move the job to another queue with another partition.
Note to users: In theory, the API allows the update of the number of nodes and the tasks per node. However, since this is only allowed by root or a slurm admin, we do not include an example here.
Synthax
```
./update_job JobId qos partition
```
(Note: This requires that the Job with the Id JobID is already submitted and in a pending state)
# Container jobs
The following examples all submit a job that starts singulrity containers.
These containers, if they do not exist, are created. However, problems can arise if the user does not have sudo permissions..
## The containers
The first container is an MPI container. This is used by and `submit_mpi_containier.go` and `submit_mpi_and_update.go`. The definition is stored in `mpi_container.def`
It can also be created with the command
```
sudo singularity build mpi_container.img mpi_container.def
```
The program mpi_pingppong (source code enclosed: `mpi_pingpong.c` ) is built into the container. It performs a ping-pong test between two processes.
This container uses the hybrid model, which assumes that MPI is installed on the cluter (to start the job) and installs it in the container itself. Works with OpenMPI.
The second container is an openmp container, including a sample OpenMP programm openmp_example (source code: ` openmp_example.c`).
It can also be created with the command:
```
sudo singularity build openmp_container.img openmp_container.def
```
This container is used bei `submit_openmp_container.go`.
## submit_mpi_containier.go
Submits a mpi-container job to the cluster. It runs to Processes on two nodes
```
job_desc.Min_nodes =uint32(2)
job_desc.Num_tasks = uint32(2)
```
The application blocks, until the job is completed. The (std) output is written to
jobid-out.txt, the std- error to jobId-err.txt
```
job_desc.Std_out = ("./%j-out.txt")
job_desc.Std_err = ("./%j-err.txt")
```
## submit_omp_container.go
Submits two openMP jobs to the cluster and wait, until they are completed.
Both jobs allocate *one process* for the job, but *two CPUs per task/process* (for multi-threading).
```
job_desc.Num_tasks = uint32(1)
job_desc.Cpus_per_task = uint16(2)
```
The first job reads the environment variable ` SLURM_JOB_CPUS_PER_NODE` and sets the number of openMP threads to exactly the number of cpus that are available per task/process.
```
job_desc.Script+= "export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE\n"
```
The second job sets the number of threads to 4 (which is oversuscribing because more threads are started than processes) and executes the same job.
```
job_desc.Script+= "export OMP_NUM_THREADS=4\n"
```
The program waits until both jobs are completed. The results are written to the two outputs files, similiar to `submit_mpi_container.go`
### submit_mpi_and_update.go
This application is dooing the same as `submit_mpi_container.go`
```
ops.Qos = "shortjobs"
ops.Partition = "short"
```
This situation, can, for example, be created my submitting longer, other jobs bevore in the background (depending on the partion size) and than start this application:
```
./submit_mpi_containier & ./submit_mpi_containier & ./submit_mpi_and_update
```

View File

@ -0,0 +1,37 @@
Bootstrap: docker
From: ubuntu:latest
%files
mpi_pingpong.c /opt
%environment
export OMPI_DIR=/home0/opt/openmpi
export SINGULARITY_OMPI_DIR=$OMPI_DIR
export SINGULARITYENV_APPEND_PATH=$OMPI_DIR/bin
export SINGULAIRTYENV_APPEND_LD_LIBRARY_PATH=$OMPI_DIR/lib
%post
echo "Installing required packages..."
apt-get update && apt-get install -y wget git bash gcc gfortran g++ make file
echo "Installing Open MPI"
export OMPI_DIR=/home0/opt/openmpi
export OMPI_VERSION=4.0.3
export OMPI_URL="https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-$OMPI_VERSION.tar.bz2"
mkdir -p /tmp/ompi
mkdir -p /opt
chmod a+w /opt/
chmod a+r /opt/
ls -la /tmp/ompi
# Download
cd /tmp/ompi && wget -O openmpi-$OMPI_VERSION.tar.bz2 $OMPI_URL && tar -xjf openmpi-$OMPI_VERSION.tar.bz2
ls -la
# Compile and install
cd /tmp/ompi/openmpi-$OMPI_VERSION && ./configure --prefix=$OMPI_DIR && make install
# Set env variables so we can compile our application
export PATH=$OMPI_DIR/bin:$PATH
export LD_LIBRARY_PATH=$OMPI_DIR/lib:$LD_LIBRARY_PATH
export MANPATH=$OMPI_DIR/share/man:$MANPATH
# rm -r tmp/mpi
echo "Compiling the MPI application..."
cd /opt && mpicc -o mpi_pingpong mpi_pingpong.c

View File

@ -0,0 +1,65 @@
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_ITER 1000
int main (int argc, char **argv) {
int rc;
int size;
int myrank;
size_t max_send = 1<<22;
char *send_buf = (char*)malloc(sizeof(char)*max_send);
char *recv_buf = (char*)malloc(sizeof(char)*max_send);
size_t send_size;
clock_t start, end;
rc = MPI_Init (&argc, &argv);
if (rc != MPI_SUCCESS) {
fprintf (stderr, "MPI_Init() failed");
return EXIT_FAILURE;
}
rc = MPI_Comm_size (MPI_COMM_WORLD, &size);
if (rc != MPI_SUCCESS) {
fprintf (stderr, "MPI_Comm_size() failed");
goto exit_with_error;
}
if(size!= 2) {
fprintf(stderr, "This process requieres exact two processes\n");
}
rc = MPI_Comm_rank (MPI_COMM_WORLD, &myrank);
if (rc != MPI_SUCCESS) {
fprintf (stderr, "MPI_Comm_rank() failed");
goto exit_with_error;
}
if(myrank==0)
fprintf (stdout, "Size\t Time(ms)\n");
for(send_size=1 ; send_size<= max_send; send_size*=2){
for (int i = 0; i<MAX_ITER+2; i++) {
if(i == 2)
start = clock();
if(myrank == 0){
MPI_Send(send_buf, send_size, MPI_CHAR, 1, 0x4, MPI_COMM_WORLD);
MPI_Recv(recv_buf, send_size, MPI_CHAR, 1, 0x5, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
}
else {
MPI_Recv(recv_buf, send_size, MPI_CHAR, 0, 0x4, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Send(send_buf, send_size, MPI_CHAR, 0, 0x5, MPI_COMM_WORLD);
}
}
end= clock();
double time_taken = (double)(end-start)/CLOCKS_PER_SEC;
if(myrank == 0 )
fprintf(stdout, "%ld\t %f\n", send_size, time_taken);
}
MPI_Finalize();
return EXIT_SUCCESS;
exit_with_error:
MPI_Finalize();
return EXIT_FAILURE;
}

View File

@ -0,0 +1,18 @@
Bootstrap: docker
From: ubuntu:latest
%files
openmp_example.c /opt
%environment
export OMPI_DIR=/home0/opt/openmpi
export SINGULARITY_OMPI_DIR=$OMPI_DIR
export SINGULARITYENV_APPEND_PATH=$OMPI_DIR/bin
export SINGULAIRTYENV_APPEND_LD_LIBRARY_PATH=$OMPI_DIR/lib
%post
echo "Installing required packages..."
apt-get update && apt-get install -y wget git bash gcc gfortran g++ make file
echo "Compiling the MPI application..."
cd /opt && gcc -o openmp_example -fopenmp openmp_example.c

View File

@ -0,0 +1,14 @@
#include <stdio.h>
#include <omp.h>
int main() {
#pragma omp parallel
{
int id = omp_get_thread_num();
int data = id;
int total = omp_get_num_threads();
printf("Greetings from thread %d out of %d with Data %d\n", id, total, data);
}
printf("parallel for ends.\n");
return 0;
}

View File

@ -0,0 +1,38 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
submit_job "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/submitjob"
)
import "os/user"
import "os"
import "strconv"
import "fmt"
func main() {
job_desc := submit_job.Job_descriptor{}
job_desc.Script = "#! /bin/bash\n hostname \n env | grep SLURM "
dir, _ := os.Getwd()
user, _ := user.Current()
userid, _ := strconv.Atoi(user.Uid)
job_desc.User_id = uint32(userid)
groupid, _ := strconv.Atoi(user.Gid)
job_desc.Group_id = uint32(groupid)
job_desc.Name = "test_job"
job_desc.Partition = "debug"
job_desc.Time_limit = uint32(2)
job_desc.Min_nodes = uint32(1)
job_desc.Std_out = ("./out-%j.txt")
job_desc.Std_err = ("./err-%j.txt")
job_desc.Work_dir = dir
job_desc.Environment = []string{"SLURM_GO_JOB=TRUE", "SLURM_CONTAINER_JOB=FALSE"}
answer := submit_job.Submit_job(&job_desc)
if answer.Error_code != 0 {
msg := slurm.GetErrorString(answer.Error_code)
fmt.Printf("Error: %s\n", msg)
return
}
fmt.Printf("Submitted Job %d\n", answer.Job_id)
}

View File

@ -0,0 +1,122 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
job_info "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
submit_job "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/submitjob"
)
import "os"
import "strconv"
import "fmt"
import "os/exec"
import "path/filepath"
import "time"
import "os/user"
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func build_container(file_name, container_name string) {
cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build", container_name, file_name)
fmt.Print("Now build new container")
fmt.Printf("%s\n", cmd.String())
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
fmt.Printf("error in creating container %s \n", err)
// return
}
fmt.Printf("%s\n", stdoutStderr)
}
func main() {
job_desc := submit_job.Job_descriptor{}
dir, _ := os.Getwd()
container := filepath.Join(dir, "mpi_container.img")
definition := filepath.Join(dir, "mpi_container.def")
if !fileExists(container) {
build_container(definition, container)
}
if !fileExists(container) {
return
}
/* use Cmd to create our script */
job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n srun hostname \n"
cmd := exec.Command("/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec", container, "/opt/mpi_pingpong")
job_desc.Script += cmd.String()
fmt.Printf("cmd %s\n", job_desc.Script)
user, _ := user.Current()
userid, _ := strconv.Atoi(user.Uid)
job_desc.User_id = uint32(userid)
groupid, _ := strconv.Atoi(user.Gid)
job_desc.Group_id = uint32(groupid)
job_desc.Name = "flex_mpi_job"
job_desc.Partition = "long"
job_desc.Time_limit = uint32(60)
job_desc.Ntasks_per_node = uint16(1)
job_desc.Num_tasks = uint32(2)
job_desc.Std_out = ("./%j-out.txt")
job_desc.Std_err = ("./%j-err.txt")
job_desc.Work_dir = dir
time.Sleep(3 * time.Second)
answer := submit_job.Submit_job(&job_desc)
if answer.Error_code != 0 {
msg := slurm.GetErrorString(answer.Error_code)
fmt.Printf("Error: %s\n", msg)
return
}
fmt.Printf("Submitted Job %d\n", answer.Job_id)
time.Sleep(5 * time.Second)
job_list := job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n", msg)
return
}
job := job_list.Job_list[0]
fmt.Printf("job %d is %s\n", answer.Job_id, job.Job_stateS)
state := job.Job_stateS
if state == "Pending" {
fmt.Printf("Move job %d to another partition \n", answer.Job_id)
var ops submit_job.Update_job_options
ops.Qos = "shortjobs"
ops.Partition = "short"
err2 := submit_job.Update_job(ops, uint32(answer.Job_id))
if err2 != uint32(0) {
fmt.Printf("error %s \n", slurm.GetErrorString(err2))
}
}
for state == "Pending" || state == "Running" {
time.Sleep(2 * time.Second)
job_list = job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n", msg)
return
}
job = job_list.Job_list[0]
state = job.Job_stateS
fmt.Printf("job %d is %s\n", answer.Job_id, job.Job_stateS)
}
fmt.Printf("Total runtime Job %d %s\n", job.Job_id, job_info.Get_job_runtime(job).String())
}

View File

@ -0,0 +1,111 @@
package main
//
//import (
// "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
// job_info "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
// submit_job "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/submitjob"
// "fmt"
// "os"
// "os/exec"
// "os/user"
// "path/filepath"
// "strconv"
// "time"
//)
//
//func fileExists(filename string) bool {
// info, err := os.Stat(filename)
// if os.IsNotExist(err) {
// return false
// }
// return !info.IsDir()
//}
//func build_container(file_name, container_name string) {
//
// cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build", container_name, file_name)
// fmt.Print("Now build new container")
// fmt.Printf("%s\n", cmd.String())
// stdoutStderr, err := cmd.CombinedOutput()
// if err != nil {
// fmt.Printf("error in creating container %s \n", err)
//
// fmt.Printf("%s\n", stdoutStderr)
// // return
// }
// fmt.Printf("%s\n", stdoutStderr)
//}
//
//func main() {
// job_desc := submit_job.Job_descriptor{}
//
// dir, _ := os.Getwd()
// container := filepath.Join(dir, "mpi_container.img")
// definition := filepath.Join(dir, "mpi_container.def")
// if !fileExists(container) {
// build_container(definition, container)
// }
//
// if !fileExists(container) {
// return
// }
// /* use Cmd to create our script */
//
// job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
// cmd := exec.Command("/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec", container, "/opt/mpi_pingpong")
// job_desc.Script += cmd.String()
// fmt.Printf("cmd %s\n", job_desc.Script)
// user, _ := user.Current()
// userid, _ := strconv.Atoi(user.Uid)
// job_desc.User_id = uint32(userid)
// groupid, _ := strconv.Atoi(user.Gid)
//
// job_desc.Group_id = uint32(groupid)
// job_desc.Name = "mpi_job"
// job_desc.Partition = "long"
// job_desc.Time_limit = uint32(60)
// job_desc.Min_nodes = uint32(2)
// job_desc.Num_tasks = uint32(2)
// job_desc.Std_out = ("./%j-out.txt")
// job_desc.Std_err = ("./%j-err.txt")
// job_desc.Work_dir = dir
//
// answer := submit_job.Submit_job(&job_desc)
// if answer.Error_code != 0 {
// msg := slurm.GetErrorString(answer.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
// }
// fmt.Printf("Submitted Job %d\n", answer.Job_id)
//
// job_list := job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
//
// }
// job := job_list.Job_list[0]
//
// fmt.Printf("job %d is %s\n", answer.Job_id, job.Job_stateS)
// state := job.Job_stateS
// for state == "Pending" || state == "Running" {
// time.Sleep(2 * time.Second)
// job_list = job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// state = job.Job_stateS
//
// fmt.Printf("job %d is %s\n", answer.Job_id, job.Job_stateS)
//
// }
//
// fmt.Printf("Total runtime Job %d: %s\n", job.Job_id, job_info.Get_job_runtime(job).String())
//
//}

View File

@ -0,0 +1,159 @@
package main
//
//import (
// "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
// job_info "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
// submit_job "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/submitjob"
// "fmt"
// "os"
// "os/exec"
// "os/user"
// "path/filepath"
// "strconv"
// "time"
//)
//
//func fileExists(filename string) bool {
// info, err := os.Stat(filename)
// if os.IsNotExist(err) {
// return false
// }
// return !info.IsDir()
//}
//func build_container(file_name, container_name string) {
//
// cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build", container_name, file_name)
// fmt.Print("Now build new container")
// fmt.Printf("%s\n", cmd.String())
// stdoutStderr, err := cmd.CombinedOutput()
// if err != nil {
// fmt.Printf("error in creating container %s \n", err)
//
// fmt.Printf("%s\n", stdoutStderr)
// // return
// }
// fmt.Printf("%s\n", stdoutStderr)
//}
//
//func main() {
// job_desc := submit_job.Job_descriptor{}
//
// dir, _ := os.Getwd()
// container := filepath.Join(dir, "openmp_container.img")
// definition := filepath.Join(dir, "openmp_container.def")
// if !fileExists(container) {
// build_container(definition, container)
// }
//
// if !fileExists(container) {
// return
// }
// /* use Cmd to create our script */
//
// job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
// job_desc.Script += "export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE\n"
// cmd := exec.Command("/usr/local/bin/singularity", "exec", container, "/opt/openmp_example")
//
// job_desc.Script += cmd.String()
// fmt.Printf("cmd %s\n", job_desc.Script)
// user, _ := user.Current()
// userid, _ := strconv.Atoi(user.Uid)
// job_desc.User_id = uint32(userid)
// groupid, _ := strconv.Atoi(user.Gid)
//
// job_desc.Group_id = uint32(groupid)
// job_desc.Name = "test_job"
// job_desc.Partition = "long"
// job_desc.Time_limit = uint32(60)
// job_desc.Min_nodes = uint32(1)
// job_desc.Num_tasks = uint32(1)
//
// job_desc.Cpus_per_task = uint16(2)
// job_desc.Std_out = ("./%j-out.txt")
// job_desc.Std_err = ("./%j-err.txt")
// job_desc.Work_dir = dir
//
// answer := submit_job.Submit_job(&job_desc)
// if answer.Error_code != 0 {
// msg := slurm.GetErrorString(answer.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
// }
// fmt.Printf("Submitted Job %d\n", answer.Job_id)
//
// /*Now, we submit the same jon again, ut with some oversubsciption */
// job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
// job_desc.Script += "export OMP_NUM_THREADS=4\n"
//
// job_desc.Script += cmd.String()
// fmt.Printf("cmd %s\n", job_desc.Script)
// answer2 := submit_job.Submit_job(&job_desc)
// if answer2.Error_code != 0 {
// msg := slurm.GetErrorString(answer.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
// }
// fmt.Printf("Submitted Job %d\n", answer2.Job_id)
//
// job_list := job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
//
// }
// job := job_list.Job_list[0]
//
// fmt.Printf("job is %s\n", job.Job_stateS)
// state := job.Job_stateS
// for state == "Pending" || state == "Running" {
// time.Sleep(2 * time.Second)
// job_list = job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// state = job.Job_stateS
//
// fmt.Printf("job is %s\n", job.Job_stateS)
//
// }
//
// fmt.Printf("Total runtime first job %s\n", job_info.Get_job_runtime(job).String())
// /*wait for second job */
// job_list = job_info.Get_job(answer2.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// fmt.Printf("job is %s\n", job.Job_stateS)
// state = job.Job_stateS
// for state == "Pending" || state == "Running" {
// time.Sleep(2 * time.Second)
// job_list = job_info.Get_job(answer2.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n", msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// state = job.Job_stateS
//
// fmt.Printf("job is %s\n", job.Job_stateS)
//
// }
//
// fmt.Printf("Total runtime second job %s\n", job_info.Get_job_runtime(job).String())
//
//}

View File

@ -0,0 +1,30 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
submit_job "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/submitjob"
)
import "os"
import "strconv"
import "fmt"
func main() {
if len(os.Args) < 4 {
fmt.Printf("Synthax specify JobID, qos and partition \n")
return
}
var ops submit_job.Update_job_options
id, err := strconv.Atoi(os.Args[1])
if err != nil {
fmt.Printf("Invalid job id (no int) %s\n", os.Args[1])
return
}
ops.Qos = os.Args[2]
ops.Partition = os.Args[3]
err2 := submit_job.Update_job(ops, uint32(id))
if err2 != uint32(0) {
fmt.Printf("error %s \n", slurm.GetErrorString(err2))
}
}

View File

@ -0,0 +1,118 @@
/*These are some extra functions to work with slurm in go
** They are seperated, since they don't use the slurm-API
** but wrap arround the SLURM comand line tools */
package extra
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
job_info "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
)
var slurm_path string
func find_slurm_path() {
var err error
var path string
path = os.Getenv("SLURM_PATH")
if path == " " {
path, err = exec.LookPath("sinfo")
if err != nil {
fmt.Printf("could not find slurm executables\n Either add slum-bins to your PATH or define SLURM_PATH\n")
} else {
slurm_path = strings.TrimSuffix(path, "bin/sinfo")
}
} else {
test_path := filepath.Join(path, "bin/sinfo")
_, err := os.Stat(test_path)
if os.IsNotExist(err) {
fmt.Printf("Slurm executable sinfo does no exist at %s\n", test_path)
} else {
slurm_path = path
}
}
}
func Cancel_job(JobId uint32) error {
find_slurm_path()
if slurm_path == "" {
return errors.New("Cannot find slurm executable")
}
job_list := job_info.Get_job(JobId)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf(msg)
return errors.New(msg)
}
path := filepath.Join(slurm_path, "bin", "scancel")
cmd := exec.Command(path, strconv.FormatInt(int64(JobId), 10))
fmt.Print(cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
msg := string(out) + err.Error()
return errors.New(msg)
}
return nil
}
type Acc_Job_info struct {
JobId uint32
User string
Account string
State string
JobName string
}
var sacct_format_string string
func parse_sacct_output(input string) []Acc_Job_info {
var job_list []Acc_Job_info
lines := strings.Split(string(input), "\n")
fmt.Printf("len %d\n", len(lines)-1)
for l := range lines {
var job_info Acc_Job_info
elements := strings.Split(lines[l], "|")
if len(elements) < 5 {
break //Well, this is not clean, but keep it like this for Now
}
id, ierr := strconv.Atoi(elements[0])
if ierr != nil {
break //we have no useable entry here but something like 323.batch . Ignore these for now
}
job_info.JobId = uint32(id)
job_info.User = elements[1]
job_info.Account = elements[2]
job_info.State = elements[3]
job_info.JobName = elements[4]
job_list = append(job_list, job_info)
}
return job_list
}
func Get_job_info_accounting(JobId uint32) ([]Acc_Job_info, error) {
sacct_format_string = "JobId,user,account,state,JobName"
find_slurm_path()
if slurm_path == "" {
return nil, errors.New("Cannot find slurm executable")
}
path := filepath.Join(slurm_path, "bin", "sacct")
cmd := exec.Command(path, "-j", strconv.FormatInt(int64(JobId), 10), "--format", sacct_format_string, "-p", "-n")
//fmt.Printf(cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
msg := string(out) + err.Error()
return nil, errors.New(msg)
}
list := parse_sacct_output(string(out))
return list, nil
}

View File

@ -0,0 +1,480 @@
package job_info
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
inline uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int32_t int32_ptr(int32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
struct job_info_msg *get_job_info(){
struct job_info_msg* job_buffer;
if(slurm_load_jobs ((time_t) NULL,
&job_buffer, SHOW_ALL)) {
return NULL;
}
return job_buffer;
}
struct job_info_msg *get_single_job_info(uint32_t id){
struct job_info_msg* job_buffer;
if( slurm_load_job (&job_buffer, id, SHOW_DETAIL)) {
return NULL;
}
return job_buffer;
}
struct job_info* job_from_list(struct job_info_msg *list, int i){
return &list->job_array[i];
}
void free_job_buffer(void* buffer){
slurm_free_job_info_msg ((struct job_info_msg*)buffer);
}
struct job_info_msg *get_user_job_info(uint32_t id){
struct job_info_msg* job_buffer;
if( slurm_load_job_user(&job_buffer, id, SHOW_DETAIL))
return NULL;
return job_buffer;
}
int64_t get_job_endtime(int32_t jobid){
time_t end_time;
if(slurm_get_end_time (jobid, &end_time))
return -1;
else
return end_time;
}
int char_len(char* c) {
uint i = 0;
while(c[i]!='\0') i++;
return i;
}
*/
import "C"
import "fmt"
import "os/user"
import "strconv"
import "time"
func Reason_to_string(state uint16) string {
switch s := C.uint16_t(state); s {
case C.WAIT_NO_REASON:
return "wait no reason"
}
return "unkown reason"
}
func state_to_string(state uint32) string {
switch s := C.uint32_t(state); s {
case C.JOB_PENDING:
return "Pending" /* queued waiting for initiation */
case C.JOB_RUNNING:
return "Running" /* allocated resources and executing */
case C.JOB_SUSPENDED:
return "Suspended" /* allocated resources, execution suspended */
case C.JOB_COMPLETE:
return "Complete" /* completed execution successfully */
case C.JOB_CANCELLED:
return "Cancelled" /* cancelled by user */
case C.JOB_FAILED:
return "Failed" /* completed execution unsuccessfully */
case C.JOB_TIMEOUT:
return "Timeout" /* terminated on reaching time limit */
case C.JOB_NODE_FAIL:
return "Node Fail" /* terminated on node failure */
case C.JOB_PREEMPTED:
return "Preempted" /* terminated due to preemption */
}
return "Unknow status" /* not a real state, last entry in table */
}
type Job_info struct {
Account string
Alloc_node string
Alloc_sid uint32
//array_bitmap void;
Array_job_id uint32
Array_task_id uint32
Assoc_id uint32
Batch_flag uint16
Batch_host string
Boards_per_node uint16
Command string
Comment string
Contiguous uint16
Cores_per_socket uint16
Cpus_per_task uint16
Dependency string
Derived_ec uint32
Eligible_time int64
End_time int64
Exc_nodes string
Exc_node_inx int32
Exit_code uint32
Features string
//gres_detail_str char**;
Group_id uint32
Job_id uint32
Job_state uint32
Job_stateS string
Licenses string
Max_cpus uint32
Max_nodes uint32
Name string
Network string
Nodes string
Nice uint32
Node_inx int32
Ntasks_per_core uint16
Ntasks_per_node uint16
Ntasks_per_socket uint16
Ntasks_per_board uint16
Num_cpus uint32
Num_nodes uint32
Partition string
Pn_min_memory uint64
Pn_min_cpus uint16
Pn_min_tmp_disk uint32
Preempt_time int64
Pre_sus_time int64
Priority uint32
Profile uint32
Qos string
Req_nodes string
Req_node_inx int32
Req_switch uint32
Requeue uint16
Resize_time int64
Restart_cnt uint16
Resv_name string
Shared uint16
Show_flags uint16
Sockets_per_board uint16
Sockets_per_node uint16
Start_time int64
State_desc string
State_reason uint16
Submit_time int64
Suspend_time int64
Time_limit uint32
Time_min uint32
Threads_per_core uint16
User_id uint32
//accurate void;
Wait4switch uint32
Wckey string
Work_dir string
}
func Job_info_convert_c_to_go(c_struct *C.struct_job_info) Job_info {
var go_struct Job_info
go_struct.Account = C.GoString(c_struct.account)
go_struct.Alloc_node = C.GoString(c_struct.alloc_node)
go_struct.Alloc_sid = uint32(c_struct.alloc_sid)
go_struct.Array_job_id = uint32(c_struct.array_job_id)
go_struct.Array_task_id = uint32(c_struct.array_task_id)
go_struct.Assoc_id = uint32(c_struct.assoc_id)
go_struct.Batch_flag = uint16(c_struct.batch_flag)
go_struct.Batch_host = C.GoString(c_struct.batch_host)
go_struct.Boards_per_node = uint16(c_struct.boards_per_node)
go_struct.Command = C.GoString(c_struct.command)
go_struct.Comment = C.GoString(c_struct.comment)
go_struct.Contiguous = uint16(c_struct.contiguous)
go_struct.Cores_per_socket = uint16(c_struct.cores_per_socket)
go_struct.Cpus_per_task = uint16(c_struct.cpus_per_task)
go_struct.Dependency = C.GoString(c_struct.dependency)
go_struct.Derived_ec = uint32(c_struct.derived_ec)
go_struct.Eligible_time = int64(c_struct.eligible_time)
go_struct.End_time = int64(c_struct.end_time)
go_struct.Exc_nodes = C.GoString(c_struct.exc_nodes)
go_struct.Exc_node_inx = int32(C.int32_ptr(c_struct.exc_node_inx))
go_struct.Exit_code = uint32(c_struct.exit_code)
go_struct.Features = C.GoString(c_struct.features)
go_struct.Group_id = uint32(c_struct.group_id)
go_struct.Job_id = uint32(c_struct.job_id)
go_struct.Job_state = uint32(c_struct.job_state)
go_struct.Job_stateS = state_to_string(uint32(c_struct.job_state))
go_struct.Licenses = C.GoString(c_struct.licenses)
go_struct.Max_cpus = uint32(c_struct.max_cpus)
go_struct.Max_nodes = uint32(c_struct.max_nodes)
go_struct.Name = C.GoString(c_struct.name)
go_struct.Network = C.GoString(c_struct.network)
go_struct.Nodes = C.GoString(c_struct.nodes)
go_struct.Nice = uint32(c_struct.nice)
go_struct.Node_inx = int32(C.int32_ptr(c_struct.node_inx))
go_struct.Ntasks_per_core = uint16(c_struct.ntasks_per_core)
go_struct.Ntasks_per_node = uint16(c_struct.ntasks_per_node)
go_struct.Ntasks_per_socket = uint16(c_struct.ntasks_per_socket)
go_struct.Ntasks_per_board = uint16(c_struct.ntasks_per_board)
go_struct.Num_cpus = uint32(c_struct.num_cpus)
go_struct.Num_nodes = uint32(c_struct.num_nodes)
go_struct.Partition = C.GoString(c_struct.partition)
go_struct.Pn_min_memory = uint64(c_struct.pn_min_memory)
go_struct.Pn_min_cpus = uint16(c_struct.pn_min_cpus)
go_struct.Pn_min_tmp_disk = uint32(c_struct.pn_min_tmp_disk)
go_struct.Preempt_time = int64(c_struct.preempt_time)
go_struct.Pre_sus_time = int64(c_struct.pre_sus_time)
go_struct.Priority = uint32(c_struct.priority)
go_struct.Profile = uint32(c_struct.profile)
go_struct.Qos = C.GoString(c_struct.qos)
go_struct.Req_nodes = C.GoString(c_struct.req_nodes)
go_struct.Req_node_inx = int32(C.int32_ptr(c_struct.req_node_inx))
go_struct.Req_switch = uint32(c_struct.req_switch)
go_struct.Requeue = uint16(c_struct.requeue)
go_struct.Resize_time = int64(c_struct.resize_time)
go_struct.Restart_cnt = uint16(c_struct.restart_cnt)
go_struct.Resv_name = C.GoString(c_struct.resv_name)
go_struct.Shared = uint16(c_struct.shared)
go_struct.Show_flags = uint16(c_struct.show_flags)
go_struct.Sockets_per_board = uint16(c_struct.sockets_per_board)
go_struct.Sockets_per_node = uint16(c_struct.sockets_per_node)
go_struct.Start_time = int64(c_struct.start_time)
go_struct.State_desc = C.GoString(c_struct.state_desc)
go_struct.State_reason = uint16(c_struct.state_reason)
go_struct.Submit_time = int64(c_struct.submit_time)
go_struct.Suspend_time = int64(c_struct.suspend_time)
go_struct.Time_limit = uint32(c_struct.time_limit)
go_struct.Time_min = uint32(c_struct.time_min)
go_struct.Threads_per_core = uint16(c_struct.threads_per_core)
go_struct.User_id = uint32(c_struct.user_id)
go_struct.Wait4switch = uint32(c_struct.wait4switch)
go_struct.Wckey = C.GoString(c_struct.wckey)
go_struct.Work_dir = C.GoString(c_struct.work_dir)
return go_struct
}
func Print_Job_info(go_struct Job_info) {
fmt.Printf("%s:\t %s\n", "account", go_struct.Account)
fmt.Printf("%s:\t %s\n", "alloc node", go_struct.Alloc_node)
fmt.Printf("%s:\t %d\n", "alloc sid", go_struct.Alloc_sid)
fmt.Printf("%s:\t %d\n", "array job id", go_struct.Array_job_id)
fmt.Printf("%s:\t %d\n", "array task id", go_struct.Array_task_id)
fmt.Printf("%s:\t %d\n", "assoc id", go_struct.Assoc_id)
fmt.Printf("%s:\t %d\n", "batch flag", go_struct.Batch_flag)
fmt.Printf("%s:\t %s\n", "batch host", go_struct.Batch_host)
fmt.Printf("%s:\t %d\n", "boards per node", go_struct.Boards_per_node)
fmt.Printf("%s:\t %s\n", "command", go_struct.Command)
fmt.Printf("%s:\t %s\n", "comment", go_struct.Comment)
fmt.Printf("%s:\t %d\n", "contiguous", go_struct.Contiguous)
fmt.Printf("%s:\t %d\n", "cores per socket", go_struct.Cores_per_socket)
fmt.Printf("%s:\t %d\n", "cpus per task", go_struct.Cpus_per_task)
fmt.Printf("%s:\t %s\n", "dependency", go_struct.Dependency)
fmt.Printf("%s:\t %d\n", "derived ec", go_struct.Derived_ec)
fmt.Printf("%s:\t %d\n", "eligible time", go_struct.Eligible_time)
fmt.Printf("%s:\t %d\n", "end time", go_struct.End_time)
fmt.Printf("%s:\t %s\n", "exc nodes", go_struct.Exc_nodes)
fmt.Printf("%s:\t %d\n", "exc node inx", go_struct.Exc_node_inx)
fmt.Printf("%s:\t %d\n", "exit code", go_struct.Exit_code)
fmt.Printf("%s:\t %s\n", "features", go_struct.Features)
fmt.Printf("%s:\t %d\n", "group id", go_struct.Group_id)
fmt.Printf("%s:\t %d\n", "job id", go_struct.Job_id)
fmt.Printf("%s:\t %d\n", "job state", go_struct.Job_state)
fmt.Printf("%s:\t %s\n", "licenses", go_struct.Licenses)
fmt.Printf("%s:\t %d\n", "max cpus", go_struct.Max_cpus)
fmt.Printf("%s:\t %d\n", "max nodes", go_struct.Max_nodes)
fmt.Printf("%s:\t %s\n", "name", go_struct.Name)
fmt.Printf("%s:\t %s\n", "network", go_struct.Network)
fmt.Printf("%s:\t %s\n", "nodes", go_struct.Nodes)
fmt.Printf("%s:\t %d\n", "nice", go_struct.Nice)
fmt.Printf("%s:\t %d\n", "node inx", go_struct.Node_inx)
fmt.Printf("%s:\t %d\n", "ntasks per core", go_struct.Ntasks_per_core)
fmt.Printf("%s:\t %d\n", "ntasks per node", go_struct.Ntasks_per_node)
fmt.Printf("%s:\t %d\n", "ntasks per socket", go_struct.Ntasks_per_socket)
fmt.Printf("%s:\t %d\n", "ntasks per board", go_struct.Ntasks_per_board)
fmt.Printf("%s:\t %d\n", "num cpus", go_struct.Num_cpus)
fmt.Printf("%s:\t %d\n", "num nodes", go_struct.Num_nodes)
fmt.Printf("%s:\t %s\n", "partition", go_struct.Partition)
fmt.Printf("%s:\t %d\n", "pn min memory", go_struct.Pn_min_memory)
fmt.Printf("%s:\t %d\n", "pn min cpus", go_struct.Pn_min_cpus)
fmt.Printf("%s:\t %d\n", "pn min tmp disk", go_struct.Pn_min_tmp_disk)
fmt.Printf("%s:\t %d\n", "preempt time", go_struct.Preempt_time)
fmt.Printf("%s:\t %d\n", "pre sus time", go_struct.Pre_sus_time)
fmt.Printf("%s:\t %d\n", "priority", go_struct.Priority)
fmt.Printf("%s:\t %d\n", "profile", go_struct.Profile)
fmt.Printf("%s:\t %s\n", "qos", go_struct.Qos)
fmt.Printf("%s:\t %s\n", "req nodes", go_struct.Req_nodes)
fmt.Printf("%s:\t %d\n", "req node inx", go_struct.Req_node_inx)
fmt.Printf("%s:\t %d\n", "req switch", go_struct.Req_switch)
fmt.Printf("%s:\t %d\n", "requeue", go_struct.Requeue)
fmt.Printf("%s:\t %d\n", "resize time", go_struct.Resize_time)
fmt.Printf("%s:\t %d\n", "restart cnt", go_struct.Restart_cnt)
fmt.Printf("%s:\t %s\n", "resv name", go_struct.Resv_name)
fmt.Printf("%s:\t %d\n", "shared", go_struct.Shared)
fmt.Printf("%s:\t %d\n", "show flags", go_struct.Show_flags)
fmt.Printf("%s:\t %d\n", "sockets per board", go_struct.Sockets_per_board)
fmt.Printf("%s:\t %d\n", "sockets per node", go_struct.Sockets_per_node)
fmt.Printf("%s:\t %d\n", "start time", go_struct.Start_time)
fmt.Printf("%s:\t %s\n", "state desc", go_struct.State_desc)
fmt.Printf("%s:\t %d\n", "state reason", go_struct.State_reason)
fmt.Printf("%s:\t %d\n", "submit time", go_struct.Submit_time)
fmt.Printf("%s:\t %d\n", "suspend time", go_struct.Suspend_time)
fmt.Printf("%s:\t %d\n", "time limit", go_struct.Time_limit)
fmt.Printf("%s:\t %d\n", "time min", go_struct.Time_min)
fmt.Printf("%s:\t %d\n", "threads per core", go_struct.Threads_per_core)
fmt.Printf("%s:\t %d\n", "user id", go_struct.User_id)
fmt.Printf("%s:\t %d\n", "wait4switch", go_struct.Wait4switch)
fmt.Printf("%s:\t %s\n", "wckey", go_struct.Wckey)
fmt.Printf("%s:\t %s\n", "work dir", go_struct.Work_dir)
}
type Job_info_msg struct {
Last_update int64
Record_count uint32
Error_code uint32
Job_list []Job_info
}
func Get_job_runtime(job Job_info) time.Duration {
start_time := time.Unix(job.Start_time, 0)
current_time := time.Now()
diff := current_time.Sub(start_time).Round(time.Second)
if int64(diff) < 0 {
return 0
}
end_time := time.Unix(job.End_time, 0)
diff2 := current_time.Sub(end_time)
if int64(diff2) < 0 {
return diff
}
return end_time.Sub(start_time)
}
func Get_job_endtime(id uint32) time.Time {
c_time := C.get_job_endtime(C.int32_t(id))
return time.Unix(int64(c_time), 0)
}
func Get_all_jobs() Job_info_msg {
var go_job_buffer Job_info_msg
c_job_buffer := C.get_job_info()
if c_job_buffer == nil {
go_job_buffer.Last_update = int64(0)
go_job_buffer.Record_count = uint32(0)
go_job_buffer.Error_code = uint32(C.slurm_get_errno())
return go_job_buffer
}
go_job_buffer.Last_update = int64(c_job_buffer.last_update)
go_job_buffer.Record_count = uint32(c_job_buffer.record_count)
go_job_buffer.Job_list = make([]Job_info, c_job_buffer.record_count, c_job_buffer.record_count)
for i := uint32(0); i < go_job_buffer.Record_count; i++ {
job := C.job_from_list(c_job_buffer, C.int(i))
go_job := Job_info_convert_c_to_go(job)
go_job_buffer.Job_list[i] = go_job
}
C.slurm_free_job_info_msg(c_job_buffer)
return go_job_buffer
}
func Get_job(id uint32) Job_info_msg {
var go_job_buffer Job_info_msg
c_job_buffer := C.get_single_job_info(C.uint32_t(id))
if c_job_buffer == nil {
go_job_buffer.Last_update = int64(0)
go_job_buffer.Record_count = uint32(0)
go_job_buffer.Error_code = uint32(C.slurm_get_errno())
return go_job_buffer
}
go_job_buffer.Last_update = int64(c_job_buffer.last_update)
go_job_buffer.Record_count = uint32(c_job_buffer.record_count)
go_job_buffer.Job_list = make([]Job_info, c_job_buffer.record_count, c_job_buffer.record_count)
for i := uint32(0); i < go_job_buffer.Record_count; i++ {
job := C.job_from_list(c_job_buffer, C.int(i))
go_job := Job_info_convert_c_to_go(job)
go_job_buffer.Job_list[i] = go_job
}
C.slurm_free_job_info_msg(c_job_buffer)
return go_job_buffer
}
func Get_user_jobs(name string) Job_info_msg {
var go_job_buffer Job_info_msg
user, err := user.Lookup(name)
if err != nil {
fmt.Printf("Error %s\n", err.Error())
go_job_buffer.Last_update = int64(0)
go_job_buffer.Record_count = uint32(0)
go_job_buffer.Error_code = C.ESLURMD_UID_NOT_FOUND
return go_job_buffer
}
userid, _ := strconv.Atoi(user.Uid)
c_job_buffer := C.get_user_job_info(C.uint32_t(userid))
if c_job_buffer == nil {
go_job_buffer.Last_update = int64(0)
go_job_buffer.Record_count = uint32(0)
go_job_buffer.Error_code = uint32(C.slurm_get_errno())
return go_job_buffer
}
go_job_buffer.Last_update = int64(c_job_buffer.last_update)
go_job_buffer.Record_count = uint32(c_job_buffer.record_count)
go_job_buffer.Job_list = make([]Job_info, c_job_buffer.record_count, c_job_buffer.record_count)
for i := uint32(0); i < go_job_buffer.Record_count; i++ {
job := C.job_from_list(c_job_buffer, C.int(i))
go_job := Job_info_convert_c_to_go(job)
go_job_buffer.Job_list[i] = go_job
}
C.slurm_free_job_info_msg(c_job_buffer)
return go_job_buffer
}

View File

@ -0,0 +1,232 @@
package node_info
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
inline uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int32_t int32_ptr(int32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
struct node_info_msg *get_node_info(){
struct node_info_msg* node_buffer;
if(slurm_load_node ((time_t) NULL,
&node_buffer, SHOW_ALL))
return NULL;
return node_buffer;
}
struct node_info_msg *get_single_node_info(char* name){
struct node_info_msg* node_buffer;
if( slurm_load_node_single (&node_buffer, name, SHOW_DETAIL))
return NULL;
return node_buffer;
}
struct node_info* node_from_list(struct node_info_msg *list, int i){
return &list->node_array[i];
}
void free_node_buffer(void* buffer){
slurm_free_node_info_msg ((struct node_info_msg*)buffer);
}
*/
import "C"
import "fmt"
import "unsafe"
type Node_info struct {
Arch string
Boards uint16
Boot_time int64
Cores uint16
Cpu_load uint32
Cpus uint16
Features string
Gres string
Name string
Node_addr string
Node_hostname string
Node_state uint32
Os string
Real_memory uint64
Reason string
Reason_time int64
Reason_uid uint32
Slurmd_start_time int64
Sockets uint16
Threads uint16
Tmp_disk uint32
Weight uint32
}
func Node_info_convert_c_to_go(c_struct *C.struct_node_info) Node_info {
var go_struct Node_info
go_struct.Arch = C.GoString(c_struct.arch)
go_struct.Boards = uint16(c_struct.boards)
go_struct.Boot_time = int64(c_struct.boot_time)
go_struct.Cores = uint16(c_struct.cores)
go_struct.Cpu_load = uint32(c_struct.cpu_load)
go_struct.Cpus = uint16(c_struct.cpus)
go_struct.Features = C.GoString(c_struct.features)
go_struct.Gres = C.GoString(c_struct.gres)
go_struct.Name = C.GoString(c_struct.name)
go_struct.Node_addr = C.GoString(c_struct.node_addr)
go_struct.Node_hostname = C.GoString(c_struct.node_hostname)
go_struct.Node_state = uint32(c_struct.node_state)
go_struct.Os = C.GoString(c_struct.os)
go_struct.Real_memory = uint64(c_struct.real_memory)
go_struct.Reason = C.GoString(c_struct.reason)
go_struct.Reason_time = int64(c_struct.reason_time)
go_struct.Reason_uid = uint32(c_struct.reason_uid)
go_struct.Slurmd_start_time = int64(c_struct.slurmd_start_time)
go_struct.Sockets = uint16(c_struct.sockets)
go_struct.Threads = uint16(c_struct.threads)
go_struct.Tmp_disk = uint32(c_struct.tmp_disk)
go_struct.Weight = uint32(c_struct.weight)
return go_struct
}
func State_to_string(state uint32) string {
switch s := C.uint16_t(state); s {
case C.NODE_STATE_UNKNOWN:
return "node state unknown"
case C.NODE_STATE_DOWN:
return "node state down"
case C.NODE_STATE_IDLE:
return "node state idle"
case C.NODE_STATE_ALLOCATED:
return "node state allocated"
case C.NODE_STATE_ERROR:
return "node state error"
case C.NODE_STATE_MIXED:
return "node state mixed"
case C.NODE_STATE_FUTURE:
return "node state future"
case C.NODE_STATE_END:
return "node state end"
}
return "Unkown state"
}
func Print_node_info(go_struct Node_info) {
fmt.Printf("%s:\t %s\n", "arch", go_struct.Arch)
fmt.Printf("%s:\t %d\n", "boards", go_struct.Boards)
fmt.Printf("%s:\t %d\n", "boot time", go_struct.Boot_time)
fmt.Printf("%s:\t %d\n", "cores", go_struct.Cores)
fmt.Printf("%s:\t %d\n", "cpu load", go_struct.Cpu_load)
fmt.Printf("%s:\t %d\n", "cpus", go_struct.Cpus)
fmt.Printf("%s:\t %s\n", "features", go_struct.Features)
fmt.Printf("%s:\t %s\n", "gres", go_struct.Gres)
fmt.Printf("%s:\t %s\n", "name", go_struct.Name)
fmt.Printf("%s:\t %s\n", "node addr", go_struct.Node_addr)
fmt.Printf("%s:\t %s\n", "node hostname", go_struct.Node_hostname)
fmt.Printf("%s:\t %d\n", "node state", go_struct.Node_state)
fmt.Printf("%s:\t %s\n", "os", go_struct.Os)
fmt.Printf("%s:\t %d\n", "real memory", go_struct.Real_memory)
fmt.Printf("%s:\t %s\n", "reason", go_struct.Reason)
fmt.Printf("%s:\t %d\n", "reason time", go_struct.Reason_time)
fmt.Printf("%s:\t %d\n", "reason uid", go_struct.Reason_uid)
fmt.Printf("%s:\t %d\n", "slurmd start time", go_struct.Slurmd_start_time)
fmt.Printf("%s:\t %d\n", "sockets", go_struct.Sockets)
fmt.Printf("%s:\t %d\n", "threads", go_struct.Threads)
fmt.Printf("%s:\t %d\n", "tmp disk", go_struct.Tmp_disk)
fmt.Printf("%s:\t %d\n", "weight", go_struct.Weight)
}
type Node_info_msg struct {
Last_update int64
Record_count uint32
Error_code uint32
Node_list []Node_info
}
func Get_all_nodes() Node_info_msg {
var go_node_buffer Node_info_msg
c_node_buffer := C.get_node_info()
if c_node_buffer == nil {
go_node_buffer.Last_update = int64(0)
go_node_buffer.Record_count = uint32(0)
go_node_buffer.Error_code = uint32(C.slurm_get_errno())
return go_node_buffer
}
go_node_buffer.Last_update = int64(c_node_buffer.last_update)
go_node_buffer.Record_count = uint32(c_node_buffer.record_count)
go_node_buffer.Node_list = make([]Node_info, c_node_buffer.record_count, c_node_buffer.record_count)
for i := uint32(0); i < go_node_buffer.Record_count; i++ {
node := C.node_from_list(c_node_buffer, C.int(i))
go_node := Node_info_convert_c_to_go(node)
go_node_buffer.Node_list[i] = go_node
}
C.slurm_free_node_info_msg(c_node_buffer)
return go_node_buffer
}
func Get_node_info(name string) Node_info_msg {
var go_node_buffer Node_info_msg
c_name := C.CString(name)
defer C.free(unsafe.Pointer(c_name))
c_node_buffer := C.get_single_node_info(c_name)
if c_node_buffer == nil {
go_node_buffer.Last_update = int64(0)
go_node_buffer.Record_count = uint32(0)
go_node_buffer.Error_code = uint32(C.slurm_get_errno())
return go_node_buffer
}
go_node_buffer.Last_update = int64(c_node_buffer.last_update)
go_node_buffer.Record_count = uint32(c_node_buffer.record_count)
go_node_buffer.Node_list = make([]Node_info, c_node_buffer.record_count, c_node_buffer.record_count)
for i := uint32(0); i < go_node_buffer.Record_count; i++ {
node := C.node_from_list(c_node_buffer, C.int(i))
go_node := Node_info_convert_c_to_go(node)
go_node_buffer.Node_list[i] = go_node
}
C.slurm_free_node_info_msg(c_node_buffer)
return go_node_buffer
}

View File

@ -0,0 +1,183 @@
package partition_info
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
int32_t int32_ptr(int32_t* pointer, int off) {
if (NULL == pointer) {
return -1;}
pointer+=off;
return *pointer;
}
uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
struct partition_info_msg *get_partition_info(){
struct partition_info_msg* partition_buffer;
if( slurm_load_partitions ((time_t) NULL,
&partition_buffer, SHOW_ALL))
return NULL;
return partition_buffer;
}
struct partition_info* partition_from_list(struct partition_info_msg *list, int i){
return &list->partition_array[i];
}
void free_partition_buffer(void* buffer){
slurm_free_partition_info_msg ((struct partition_info_msg*)buffer);
}
int find_node_inx(int32_t* node){
int ret = 0;
while(*node != -1) { node++; ret++;};
return ret;
}
*/
import "C"
import "fmt"
type Partition_info struct {
Allow_alloc_nodes string
Allow_groups string
Alternate string
Cr_type uint16
Def_mem_per_cpu uint64
Default_time uint32
Flags uint16
Grace_time uint32
Max_cpus_per_node uint32
Max_mem_per_cpu uint64
Max_nodes uint32
Max_share uint16
Max_time uint32
Min_nodes uint32
Name string
Node_inx []int32
Nodes string
Preempt_mode uint16
State_up uint16
Total_cpus uint32
Total_nodes uint32
}
func Partition_info_convert_c_to_go(c_struct *C.struct_partition_info) Partition_info {
var go_struct Partition_info
go_struct.Allow_alloc_nodes = C.GoString(c_struct.allow_alloc_nodes)
go_struct.Allow_groups = C.GoString(c_struct.allow_groups)
go_struct.Alternate = C.GoString(c_struct.alternate)
go_struct.Cr_type = uint16(c_struct.cr_type)
go_struct.Def_mem_per_cpu = uint64(c_struct.def_mem_per_cpu)
go_struct.Default_time = uint32(c_struct.default_time)
go_struct.Flags = uint16(c_struct.flags)
go_struct.Grace_time = uint32(c_struct.grace_time)
go_struct.Max_cpus_per_node = uint32(c_struct.max_cpus_per_node)
go_struct.Max_mem_per_cpu = uint64(c_struct.max_mem_per_cpu)
go_struct.Max_nodes = uint32(c_struct.max_nodes)
go_struct.Max_share = uint16(c_struct.max_share)
go_struct.Max_time = uint32(c_struct.max_time)
go_struct.Min_nodes = uint32(c_struct.min_nodes)
go_struct.Name = C.GoString(c_struct.name)
t := C.find_node_inx(c_struct.node_inx)
fmt.Printf("%d", t)
go_struct.Node_inx = make([]int32, t, t)
for i := int32(0); i < int32(t); i++ {
go_struct.Node_inx[i] = int32(C.int32_ptr(c_struct.node_inx, C.int(i)))
}
go_struct.Nodes = C.GoString(c_struct.nodes)
go_struct.Preempt_mode = uint16(c_struct.preempt_mode)
go_struct.State_up = uint16(c_struct.state_up)
go_struct.Total_cpus = uint32(c_struct.total_cpus)
go_struct.Total_nodes = uint32(c_struct.total_nodes)
return go_struct
}
func Print_Partition_info(go_struct Partition_info) {
fmt.Printf("%s:\t %s\n", "allow alloc nodes", go_struct.Allow_alloc_nodes)
fmt.Printf("%s:\t %s\n", "allow groups", go_struct.Allow_groups)
fmt.Printf("%s:\t %s\n", "alternate", go_struct.Alternate)
fmt.Printf("%s:\t %d\n", "cr type", go_struct.Cr_type)
fmt.Printf("%s:\t %d\n", "def mem per cpu", go_struct.Def_mem_per_cpu)
fmt.Printf("%s:\t %d\n", "default time", go_struct.Default_time)
fmt.Printf("%s:\t %d\n", "flags", go_struct.Flags)
fmt.Printf("%s:\t %d\n", "grace time", go_struct.Grace_time)
fmt.Printf("%s:\t %d\n", "max cpus per node", go_struct.Max_cpus_per_node)
fmt.Printf("%s:\t %d\n", "max mem per cpu", go_struct.Max_mem_per_cpu)
fmt.Printf("%s:\t %d\n", "max nodes", go_struct.Max_nodes)
fmt.Printf("%s:\t %d\n", "max share", go_struct.Max_share)
fmt.Printf("%s:\t %d\n", "max time", go_struct.Max_time)
fmt.Printf("%s:\t %d\n", "min nodes", go_struct.Min_nodes)
fmt.Printf("%s:\t %s\n", "name", go_struct.Name)
fmt.Printf("%s:\t %d\n", "node inx", go_struct.Node_inx)
fmt.Printf("%s:\t %s\n", "nodes", go_struct.Nodes)
fmt.Printf("%s:\t %d\n", "preempt mode", go_struct.Preempt_mode)
fmt.Printf("%s:\t %d\n", "state up", go_struct.State_up)
fmt.Printf("%s:\t %d\n", "total cpus", go_struct.Total_cpus)
fmt.Printf("%s:\t %d\n", "total nodes", go_struct.Total_nodes)
}
type Partition_info_msg struct {
Last_update int64
Record_count uint32
Partition_list []Partition_info
}
func Get_partitions() Partition_info_msg {
var go_partition_buffer Partition_info_msg
c_partition_buffer := C.get_partition_info()
if c_partition_buffer == nil {
go_partition_buffer.Last_update = int64(0)
go_partition_buffer.Record_count = uint32(0)
return go_partition_buffer
}
go_partition_buffer.Last_update = int64(c_partition_buffer.last_update)
go_partition_buffer.Record_count = uint32(c_partition_buffer.record_count)
go_partition_buffer.Partition_list = make([]Partition_info, c_partition_buffer.record_count, c_partition_buffer.record_count)
for i := uint32(0); i < go_partition_buffer.Record_count; i++ {
partition := C.partition_from_list(c_partition_buffer, C.int(i))
go_partition := Partition_info_convert_c_to_go(partition)
go_partition_buffer.Partition_list[i] = go_partition
}
C.slurm_free_partition_info_msg(c_partition_buffer)
return go_partition_buffer
}

View File

@ -0,0 +1,574 @@
package slurm
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
#ifndef ptr_convert
#define ptr_convert
struct slurm_ctl_conf* get_config(uint64_t time){
slurm_ctl_conf_t * config = NULL;
slurm_load_ctl_conf ((time_t) time, &config);
return config;
}
inline uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int32_t int32_ptr(int32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
#endif
*/
import "C"
import "fmt"
type Ctl_conf struct {
Last_update int64
Accounting_storage_tres string
Accounting_storage_enforce uint16
Accounting_storage_backup_host string
Accounting_storage_host string
Accounting_storage_loc string
Accounting_storage_pass string
Accounting_storage_port uint32
Accounting_storage_type string
Accounting_storage_user string
Acctng_store_job_comment uint16
//acct_gather_conf void;
Acct_gather_energy_type string
Acct_gather_profile_type string
Acct_gather_filesystem_type string
Acct_gather_node_freq uint16
Authtype string
Batch_start_timeout uint16
Boot_time int64
//cgroup_conf void;
Checkpoint_type string
Cluster_name string
Complete_wait uint16
Debug_flags uint64
Def_mem_per_cpu uint64
Disable_root_jobs uint16
Enforce_part_limits uint16
Epilog string
Epilog_msg_time uint32
Epilog_slurmctld string
Ext_sensors_type string
Ext_sensors_freq uint16
//ext_sensors_conf void;
Fast_schedule uint16
First_job_id uint32
Get_env_timeout uint16
Gres_plugins string
Hash_val uint32
Health_check_interval uint16
Health_check_node_state uint16
Health_check_program string
Inactive_limit uint16
Job_acct_gather_freq string
Job_acct_gather_type string
Job_ckpt_dir string
Job_comp_host string
Job_comp_loc string
Job_comp_pass string
Job_comp_port uint32
Job_comp_type string
Job_comp_user string
Job_credential_private_key string
Job_credential_public_certificate string
Job_file_append uint16
Job_requeue uint16
Job_submit_plugins string
Keep_alive_time uint16
Kill_on_bad_exit uint16
Kill_wait uint16
Launch_type string
Licenses string
Licenses_used string
Mail_prog string
Max_array_sz uint32
Max_job_cnt uint32
Max_job_id uint32
Max_mem_per_cpu uint64
Max_step_cnt uint32
Max_tasks_per_node uint16
Min_job_age uint32
Mpi_default string
Mpi_params string
Msg_timeout uint16
Next_job_id uint32
//node_features_conf void;
Node_prefix string
Over_time_limit uint16
Plugindir string
Plugstack string
Preempt_mode uint16
Preempt_type string
Priority_decay_hl uint32
Priority_calc_period uint32
Priority_favor_small uint16
Priority_flags uint16
Priority_max_age uint32
Priority_reset_period uint16
Priority_type string
Priority_weight_age uint32
Priority_weight_fs uint32
Priority_weight_js uint32
Priority_weight_part uint32
Priority_weight_qos uint32
Private_data uint16
Proctrack_type string
Prolog string
Prolog_slurmctld string
Propagate_prio_process uint16
Propagate_rlimits string
Propagate_rlimits_except string
Reboot_program string
Reconfig_flags uint16
Resume_program string
Resume_rate uint16
Resume_timeout uint16
Resv_epilog string
Resv_over_run uint16
Resv_prolog string
Ret2service uint16
Salloc_default_command string
Sched_logfile string
Sched_log_level uint16
Sched_params string
Sched_time_slice uint16
Schedtype string
Select_type string
//select_conf_key_pairs void;
Select_type_param uint16
Slurm_conf string
Slurm_user_id uint32
Slurm_user_name string
Slurmd_user_id uint32
Slurmd_user_name string
Slurmctld_debug uint16
Slurmctld_logfile string
Slurmctld_pidfile string
Slurmctld_plugstack string
//slurmctld_plugstack_conf void;
Slurmctld_port uint32
Slurmctld_port_count uint16
Slurmctld_timeout uint16
Slurmd_debug uint16
Slurmd_logfile string
Slurmd_pidfile string
Slurmd_port uint32
Slurmd_spooldir string
Slurmd_timeout uint16
Srun_epilog string
Srun_prolog string
State_save_location string
Suspend_exc_nodes string
Suspend_exc_parts string
Suspend_program string
Suspend_rate uint16
Suspend_time uint32
Suspend_timeout uint16
Switch_type string
Task_epilog string
Task_plugin string
Task_plugin_param uint32
Task_prolog string
Tmp_fs string
Topology_plugin string
Track_wckey uint16
Tree_width uint16
Unkillable_program string
Unkillable_timeout uint16
Use_pam uint16
Version string
Vsize_factor uint16
Wait_time uint16
}
func Ctl_conf_convert_c_to_go(c_struct *C.struct_slurm_ctl_conf) Ctl_conf {
var go_struct Ctl_conf
go_struct.Last_update = int64(c_struct.last_update)
go_struct.Accounting_storage_enforce = uint16(c_struct.accounting_storage_enforce)
go_struct.Accounting_storage_backup_host = C.GoString(c_struct.accounting_storage_backup_host)
go_struct.Accounting_storage_host = C.GoString(c_struct.accounting_storage_host)
go_struct.Accounting_storage_loc = C.GoString(c_struct.accounting_storage_loc)
go_struct.Accounting_storage_pass = C.GoString(c_struct.accounting_storage_pass)
go_struct.Accounting_storage_port = uint32(c_struct.accounting_storage_port)
go_struct.Accounting_storage_type = C.GoString(c_struct.accounting_storage_type)
go_struct.Accounting_storage_user = C.GoString(c_struct.accounting_storage_user)
go_struct.Acctng_store_job_comment = uint16(c_struct.acctng_store_job_comment)
go_struct.Acct_gather_energy_type = C.GoString(c_struct.acct_gather_energy_type)
go_struct.Acct_gather_profile_type = C.GoString(c_struct.acct_gather_profile_type)
go_struct.Acct_gather_filesystem_type = C.GoString(c_struct.acct_gather_filesystem_type)
go_struct.Acct_gather_node_freq = uint16(c_struct.acct_gather_node_freq)
go_struct.Authtype = C.GoString(c_struct.authtype)
go_struct.Batch_start_timeout = uint16(c_struct.batch_start_timeout)
go_struct.Boot_time = int64(c_struct.boot_time)
go_struct.Checkpoint_type = C.GoString(c_struct.checkpoint_type)
go_struct.Cluster_name = C.GoString(c_struct.cluster_name)
go_struct.Complete_wait = uint16(c_struct.complete_wait)
go_struct.Debug_flags = uint64(c_struct.debug_flags)
go_struct.Def_mem_per_cpu = uint64(c_struct.def_mem_per_cpu)
go_struct.Disable_root_jobs = uint16(c_struct.disable_root_jobs)
go_struct.Enforce_part_limits = uint16(c_struct.enforce_part_limits)
go_struct.Epilog = C.GoString(c_struct.epilog)
go_struct.Epilog_msg_time = uint32(c_struct.epilog_msg_time)
go_struct.Epilog_slurmctld = C.GoString(c_struct.epilog_slurmctld)
go_struct.Ext_sensors_type = C.GoString(c_struct.ext_sensors_type)
go_struct.Ext_sensors_freq = uint16(c_struct.ext_sensors_freq)
go_struct.Fast_schedule = uint16(c_struct.fast_schedule)
go_struct.First_job_id = uint32(c_struct.first_job_id)
go_struct.Get_env_timeout = uint16(c_struct.get_env_timeout)
go_struct.Gres_plugins = C.GoString(c_struct.gres_plugins)
go_struct.Hash_val = uint32(c_struct.hash_val)
go_struct.Health_check_interval = uint16(c_struct.health_check_interval)
go_struct.Health_check_node_state = uint16(c_struct.health_check_node_state)
go_struct.Health_check_program = C.GoString(c_struct.health_check_program)
go_struct.Inactive_limit = uint16(c_struct.inactive_limit)
go_struct.Job_acct_gather_freq = C.GoString(c_struct.job_acct_gather_freq)
go_struct.Job_acct_gather_type = C.GoString(c_struct.job_acct_gather_type)
go_struct.Job_ckpt_dir = C.GoString(c_struct.job_ckpt_dir)
go_struct.Job_comp_host = C.GoString(c_struct.job_comp_host)
go_struct.Job_comp_loc = C.GoString(c_struct.job_comp_loc)
go_struct.Job_comp_pass = C.GoString(c_struct.job_comp_pass)
go_struct.Job_comp_port = uint32(c_struct.job_comp_port)
go_struct.Job_comp_type = C.GoString(c_struct.job_comp_type)
go_struct.Job_comp_user = C.GoString(c_struct.job_comp_user)
go_struct.Job_credential_private_key = C.GoString(c_struct.job_credential_private_key)
go_struct.Job_credential_public_certificate = C.GoString(c_struct.job_credential_public_certificate)
go_struct.Job_file_append = uint16(c_struct.job_file_append)
go_struct.Job_requeue = uint16(c_struct.job_requeue)
go_struct.Job_submit_plugins = C.GoString(c_struct.job_submit_plugins)
go_struct.Keep_alive_time = uint16(c_struct.keep_alive_time)
go_struct.Kill_on_bad_exit = uint16(c_struct.kill_on_bad_exit)
go_struct.Kill_wait = uint16(c_struct.kill_wait)
go_struct.Launch_type = C.GoString(c_struct.launch_type)
go_struct.Licenses = C.GoString(c_struct.licenses)
go_struct.Licenses_used = C.GoString(c_struct.licenses_used)
go_struct.Mail_prog = C.GoString(c_struct.mail_prog)
go_struct.Max_array_sz = uint32(c_struct.max_array_sz)
go_struct.Max_job_cnt = uint32(c_struct.max_job_cnt)
go_struct.Max_job_id = uint32(c_struct.max_job_id)
go_struct.Max_mem_per_cpu = uint64(c_struct.max_mem_per_cpu)
go_struct.Max_step_cnt = uint32(c_struct.max_step_cnt)
go_struct.Max_tasks_per_node = uint16(c_struct.max_tasks_per_node)
go_struct.Min_job_age = uint32(c_struct.min_job_age)
go_struct.Mpi_default = C.GoString(c_struct.mpi_default)
go_struct.Mpi_params = C.GoString(c_struct.mpi_params)
go_struct.Msg_timeout = uint16(c_struct.msg_timeout)
go_struct.Next_job_id = uint32(c_struct.next_job_id)
go_struct.Node_prefix = C.GoString(c_struct.node_prefix)
go_struct.Over_time_limit = uint16(c_struct.over_time_limit)
go_struct.Plugindir = C.GoString(c_struct.plugindir)
go_struct.Plugstack = C.GoString(c_struct.plugstack)
go_struct.Preempt_mode = uint16(c_struct.preempt_mode)
go_struct.Preempt_type = C.GoString(c_struct.preempt_type)
go_struct.Priority_decay_hl = uint32(c_struct.priority_decay_hl)
go_struct.Priority_calc_period = uint32(c_struct.priority_calc_period)
go_struct.Priority_favor_small = uint16(c_struct.priority_favor_small)
go_struct.Priority_flags = uint16(c_struct.priority_flags)
go_struct.Priority_max_age = uint32(c_struct.priority_max_age)
go_struct.Priority_reset_period = uint16(c_struct.priority_reset_period)
go_struct.Priority_type = C.GoString(c_struct.priority_type)
go_struct.Priority_weight_age = uint32(c_struct.priority_weight_age)
go_struct.Priority_weight_fs = uint32(c_struct.priority_weight_fs)
go_struct.Priority_weight_js = uint32(c_struct.priority_weight_js)
go_struct.Priority_weight_part = uint32(c_struct.priority_weight_part)
go_struct.Priority_weight_qos = uint32(c_struct.priority_weight_qos)
go_struct.Private_data = uint16(c_struct.private_data)
go_struct.Proctrack_type = C.GoString(c_struct.proctrack_type)
go_struct.Prolog = C.GoString(c_struct.prolog)
go_struct.Prolog_slurmctld = C.GoString(c_struct.prolog_slurmctld)
go_struct.Propagate_prio_process = uint16(c_struct.propagate_prio_process)
go_struct.Propagate_rlimits = C.GoString(c_struct.propagate_rlimits)
go_struct.Propagate_rlimits_except = C.GoString(c_struct.propagate_rlimits_except)
go_struct.Reboot_program = C.GoString(c_struct.reboot_program)
go_struct.Reconfig_flags = uint16(c_struct.reconfig_flags)
go_struct.Resume_program = C.GoString(c_struct.resume_program)
go_struct.Resume_rate = uint16(c_struct.resume_rate)
go_struct.Resume_timeout = uint16(c_struct.resume_timeout)
go_struct.Resv_epilog = C.GoString(c_struct.resv_epilog)
go_struct.Resv_over_run = uint16(c_struct.resv_over_run)
go_struct.Resv_prolog = C.GoString(c_struct.resv_prolog)
go_struct.Ret2service = uint16(c_struct.ret2service)
go_struct.Salloc_default_command = C.GoString(c_struct.salloc_default_command)
go_struct.Sched_logfile = C.GoString(c_struct.sched_logfile)
go_struct.Sched_log_level = uint16(c_struct.sched_log_level)
go_struct.Sched_params = C.GoString(c_struct.sched_params)
go_struct.Sched_time_slice = uint16(c_struct.sched_time_slice)
go_struct.Schedtype = C.GoString(c_struct.schedtype)
go_struct.Select_type = C.GoString(c_struct.select_type)
go_struct.Select_type_param = uint16(c_struct.select_type_param)
go_struct.Slurm_conf = C.GoString(c_struct.slurm_conf)
go_struct.Slurm_user_id = uint32(c_struct.slurm_user_id)
go_struct.Slurm_user_name = C.GoString(c_struct.slurm_user_name)
go_struct.Slurmd_user_id = uint32(c_struct.slurmd_user_id)
go_struct.Slurmd_user_name = C.GoString(c_struct.slurmd_user_name)
go_struct.Slurmctld_debug = uint16(c_struct.slurmctld_debug)
go_struct.Slurmctld_logfile = C.GoString(c_struct.slurmctld_logfile)
go_struct.Slurmctld_pidfile = C.GoString(c_struct.slurmctld_pidfile)
go_struct.Slurmctld_plugstack = C.GoString(c_struct.slurmctld_plugstack)
go_struct.Slurmctld_port = uint32(c_struct.slurmctld_port)
go_struct.Slurmctld_port_count = uint16(c_struct.slurmctld_port_count)
go_struct.Slurmctld_timeout = uint16(c_struct.slurmctld_timeout)
go_struct.Slurmd_debug = uint16(c_struct.slurmd_debug)
go_struct.Slurmd_logfile = C.GoString(c_struct.slurmd_logfile)
go_struct.Slurmd_pidfile = C.GoString(c_struct.slurmd_pidfile)
go_struct.Slurmd_port = uint32(c_struct.slurmd_port)
go_struct.Slurmd_spooldir = C.GoString(c_struct.slurmd_spooldir)
go_struct.Slurmd_timeout = uint16(c_struct.slurmd_timeout)
go_struct.Srun_epilog = C.GoString(c_struct.srun_epilog)
go_struct.Srun_prolog = C.GoString(c_struct.srun_prolog)
go_struct.State_save_location = C.GoString(c_struct.state_save_location)
go_struct.Suspend_exc_nodes = C.GoString(c_struct.suspend_exc_nodes)
go_struct.Suspend_exc_parts = C.GoString(c_struct.suspend_exc_parts)
go_struct.Suspend_program = C.GoString(c_struct.suspend_program)
go_struct.Suspend_rate = uint16(c_struct.suspend_rate)
go_struct.Suspend_time = uint32(c_struct.suspend_time)
go_struct.Suspend_timeout = uint16(c_struct.suspend_timeout)
go_struct.Switch_type = C.GoString(c_struct.switch_type)
go_struct.Task_epilog = C.GoString(c_struct.task_epilog)
go_struct.Task_plugin = C.GoString(c_struct.task_plugin)
go_struct.Task_plugin_param = uint32(c_struct.task_plugin_param)
go_struct.Task_prolog = C.GoString(c_struct.task_prolog)
go_struct.Tmp_fs = C.GoString(c_struct.tmp_fs)
go_struct.Topology_plugin = C.GoString(c_struct.topology_plugin)
go_struct.Track_wckey = uint16(c_struct.track_wckey)
go_struct.Tree_width = uint16(c_struct.tree_width)
go_struct.Unkillable_program = C.GoString(c_struct.unkillable_program)
go_struct.Unkillable_timeout = uint16(c_struct.unkillable_timeout)
go_struct.Use_pam = uint16(c_struct.use_pam)
go_struct.Version = C.GoString(c_struct.version)
go_struct.Vsize_factor = uint16(c_struct.vsize_factor)
go_struct.Wait_time = uint16(c_struct.wait_time)
return go_struct
}
func Print_Ctl_conf(go_struct Ctl_conf) {
fmt.Printf("%s:\t %d\n", "last update", go_struct.Last_update)
fmt.Printf("%s:\t %s\n", "accounting storage tres", go_struct.Accounting_storage_tres)
fmt.Printf("%s:\t %d\n", "accounting storage enforce", go_struct.Accounting_storage_enforce)
fmt.Printf("%s:\t %s\n", "accounting storage backup host", go_struct.Accounting_storage_backup_host)
fmt.Printf("%s:\t %s\n", "accounting storage host", go_struct.Accounting_storage_host)
fmt.Printf("%s:\t %s\n", "accounting storage loc", go_struct.Accounting_storage_loc)
fmt.Printf("%s:\t %s\n", "accounting storage pass", go_struct.Accounting_storage_pass)
fmt.Printf("%s:\t %d\n", "accounting storage port", go_struct.Accounting_storage_port)
fmt.Printf("%s:\t %s\n", "accounting storage type", go_struct.Accounting_storage_type)
fmt.Printf("%s:\t %s\n", "accounting storage user", go_struct.Accounting_storage_user)
fmt.Printf("%s:\t %d\n", "acctng store job comment", go_struct.Acctng_store_job_comment)
fmt.Printf("%s:\t %s\n", "acct gather energy type", go_struct.Acct_gather_energy_type)
fmt.Printf("%s:\t %s\n", "acct gather profile type", go_struct.Acct_gather_profile_type)
fmt.Printf("%s:\t %s\n", "acct gather filesystem type", go_struct.Acct_gather_filesystem_type)
fmt.Printf("%s:\t %d\n", "acct gather node freq", go_struct.Acct_gather_node_freq)
fmt.Printf("%s:\t %s\n", "authtype", go_struct.Authtype)
fmt.Printf("%s:\t %d\n", "batch start timeout", go_struct.Batch_start_timeout)
fmt.Printf("%s:\t %d\n", "boot time", go_struct.Boot_time)
fmt.Printf("%s:\t %s\n", "checkpoint type", go_struct.Checkpoint_type)
fmt.Printf("%s:\t %s\n", "cluster name", go_struct.Cluster_name)
fmt.Printf("%s:\t %d\n", "complete wait", go_struct.Complete_wait)
fmt.Printf("%s:\t %d\n", "debug flags", go_struct.Debug_flags)
fmt.Printf("%s:\t %d\n", "def mem per cpu", go_struct.Def_mem_per_cpu)
fmt.Printf("%s:\t %d\n", "disable root jobs", go_struct.Disable_root_jobs)
fmt.Printf("%s:\t %d\n", "enforce part limits", go_struct.Enforce_part_limits)
fmt.Printf("%s:\t %s\n", "epilog", go_struct.Epilog)
fmt.Printf("%s:\t %d\n", "epilog msg time", go_struct.Epilog_msg_time)
fmt.Printf("%s:\t %s\n", "epilog slurmctld", go_struct.Epilog_slurmctld)
fmt.Printf("%s:\t %s\n", "ext sensors type", go_struct.Ext_sensors_type)
fmt.Printf("%s:\t %d\n", "ext sensors freq", go_struct.Ext_sensors_freq)
fmt.Printf("%s:\t %d\n", "fast schedule", go_struct.Fast_schedule)
fmt.Printf("%s:\t %d\n", "first job id", go_struct.First_job_id)
fmt.Printf("%s:\t %d\n", "get env timeout", go_struct.Get_env_timeout)
fmt.Printf("%s:\t %s\n", "gres plugins", go_struct.Gres_plugins)
fmt.Printf("%s:\t %d\n", "hash val", go_struct.Hash_val)
fmt.Printf("%s:\t %d\n", "health check interval", go_struct.Health_check_interval)
fmt.Printf("%s:\t %d\n", "health check node state", go_struct.Health_check_node_state)
fmt.Printf("%s:\t %s\n", "health check program", go_struct.Health_check_program)
fmt.Printf("%s:\t %d\n", "inactive limit", go_struct.Inactive_limit)
fmt.Printf("%s:\t %s\n", "job acct gather freq", go_struct.Job_acct_gather_freq)
fmt.Printf("%s:\t %s\n", "job acct gather type", go_struct.Job_acct_gather_type)
fmt.Printf("%s:\t %s\n", "job ckpt dir", go_struct.Job_ckpt_dir)
fmt.Printf("%s:\t %s\n", "job comp host", go_struct.Job_comp_host)
fmt.Printf("%s:\t %s\n", "job comp loc", go_struct.Job_comp_loc)
fmt.Printf("%s:\t %s\n", "job comp pass", go_struct.Job_comp_pass)
fmt.Printf("%s:\t %d\n", "job comp port", go_struct.Job_comp_port)
fmt.Printf("%s:\t %s\n", "job comp type", go_struct.Job_comp_type)
fmt.Printf("%s:\t %s\n", "job comp user", go_struct.Job_comp_user)
fmt.Printf("%s:\t %s\n", "job credential private key", go_struct.Job_credential_private_key)
fmt.Printf("%s:\t %s\n", "job credential public certificate", go_struct.Job_credential_public_certificate)
fmt.Printf("%s:\t %d\n", "job file append", go_struct.Job_file_append)
fmt.Printf("%s:\t %d\n", "job requeue", go_struct.Job_requeue)
fmt.Printf("%s:\t %s\n", "job submit plugins", go_struct.Job_submit_plugins)
fmt.Printf("%s:\t %d\n", "keep alive time", go_struct.Keep_alive_time)
fmt.Printf("%s:\t %d\n", "kill on bad exit", go_struct.Kill_on_bad_exit)
fmt.Printf("%s:\t %d\n", "kill wait", go_struct.Kill_wait)
fmt.Printf("%s:\t %s\n", "launch type", go_struct.Launch_type)
fmt.Printf("%s:\t %s\n", "licenses", go_struct.Licenses)
fmt.Printf("%s:\t %s\n", "licenses used", go_struct.Licenses_used)
fmt.Printf("%s:\t %s\n", "mail prog", go_struct.Mail_prog)
fmt.Printf("%s:\t %d\n", "max array sz", go_struct.Max_array_sz)
fmt.Printf("%s:\t %d\n", "max job cnt", go_struct.Max_job_cnt)
fmt.Printf("%s:\t %d\n", "max job id", go_struct.Max_job_id)
fmt.Printf("%s:\t %d\n", "max mem per cpu", go_struct.Max_mem_per_cpu)
fmt.Printf("%s:\t %d\n", "max step cnt", go_struct.Max_step_cnt)
fmt.Printf("%s:\t %d\n", "max tasks per node", go_struct.Max_tasks_per_node)
fmt.Printf("%s:\t %d\n", "min job age", go_struct.Min_job_age)
fmt.Printf("%s:\t %s\n", "mpi default", go_struct.Mpi_default)
fmt.Printf("%s:\t %s\n", "mpi params", go_struct.Mpi_params)
fmt.Printf("%s:\t %d\n", "msg timeout", go_struct.Msg_timeout)
fmt.Printf("%s:\t %d\n", "next job id", go_struct.Next_job_id)
fmt.Printf("%s:\t %s\n", "node prefix", go_struct.Node_prefix)
fmt.Printf("%s:\t %d\n", "over time limit", go_struct.Over_time_limit)
fmt.Printf("%s:\t %s\n", "plugindir", go_struct.Plugindir)
fmt.Printf("%s:\t %s\n", "plugstack", go_struct.Plugstack)
fmt.Printf("%s:\t %d\n", "preempt mode", go_struct.Preempt_mode)
fmt.Printf("%s:\t %s\n", "preempt type", go_struct.Preempt_type)
fmt.Printf("%s:\t %d\n", "priority decay hl", go_struct.Priority_decay_hl)
fmt.Printf("%s:\t %d\n", "priority calc period", go_struct.Priority_calc_period)
fmt.Printf("%s:\t %d\n", "priority favor small", go_struct.Priority_favor_small)
fmt.Printf("%s:\t %d\n", "priority flags", go_struct.Priority_flags)
fmt.Printf("%s:\t %d\n", "priority max age", go_struct.Priority_max_age)
fmt.Printf("%s:\t %d\n", "priority reset period", go_struct.Priority_reset_period)
fmt.Printf("%s:\t %s\n", "priority type", go_struct.Priority_type)
fmt.Printf("%s:\t %d\n", "priority weight age", go_struct.Priority_weight_age)
fmt.Printf("%s:\t %d\n", "priority weight fs", go_struct.Priority_weight_fs)
fmt.Printf("%s:\t %d\n", "priority weight js", go_struct.Priority_weight_js)
fmt.Printf("%s:\t %d\n", "priority weight part", go_struct.Priority_weight_part)
fmt.Printf("%s:\t %d\n", "priority weight qos", go_struct.Priority_weight_qos)
fmt.Printf("%s:\t %d\n", "private data", go_struct.Private_data)
fmt.Printf("%s:\t %s\n", "proctrack type", go_struct.Proctrack_type)
fmt.Printf("%s:\t %s\n", "prolog", go_struct.Prolog)
fmt.Printf("%s:\t %s\n", "prolog slurmctld", go_struct.Prolog_slurmctld)
fmt.Printf("%s:\t %d\n", "propagate prio process", go_struct.Propagate_prio_process)
fmt.Printf("%s:\t %s\n", "propagate rlimits", go_struct.Propagate_rlimits)
fmt.Printf("%s:\t %s\n", "propagate rlimits except", go_struct.Propagate_rlimits_except)
fmt.Printf("%s:\t %s\n", "reboot program", go_struct.Reboot_program)
fmt.Printf("%s:\t %d\n", "reconfig flags", go_struct.Reconfig_flags)
fmt.Printf("%s:\t %s\n", "resume program", go_struct.Resume_program)
fmt.Printf("%s:\t %d\n", "resume rate", go_struct.Resume_rate)
fmt.Printf("%s:\t %d\n", "resume timeout", go_struct.Resume_timeout)
fmt.Printf("%s:\t %s\n", "resv epilog", go_struct.Resv_epilog)
fmt.Printf("%s:\t %d\n", "resv over run", go_struct.Resv_over_run)
fmt.Printf("%s:\t %s\n", "resv prolog", go_struct.Resv_prolog)
fmt.Printf("%s:\t %d\n", "ret2service", go_struct.Ret2service)
fmt.Printf("%s:\t %s\n", "salloc default command", go_struct.Salloc_default_command)
fmt.Printf("%s:\t %s\n", "sched logfile", go_struct.Sched_logfile)
fmt.Printf("%s:\t %d\n", "sched log level", go_struct.Sched_log_level)
fmt.Printf("%s:\t %s\n", "sched params", go_struct.Sched_params)
fmt.Printf("%s:\t %d\n", "sched time slice", go_struct.Sched_time_slice)
fmt.Printf("%s:\t %s\n", "schedtype", go_struct.Schedtype)
fmt.Printf("%s:\t %s\n", "select type", go_struct.Select_type)
fmt.Printf("%s:\t %d\n", "select type param", go_struct.Select_type_param)
fmt.Printf("%s:\t %s\n", "slurm conf", go_struct.Slurm_conf)
fmt.Printf("%s:\t %d\n", "slurm user id", go_struct.Slurm_user_id)
fmt.Printf("%s:\t %s\n", "slurm user name", go_struct.Slurm_user_name)
fmt.Printf("%s:\t %d\n", "slurmd user id", go_struct.Slurmd_user_id)
fmt.Printf("%s:\t %s\n", "slurmd user name", go_struct.Slurmd_user_name)
fmt.Printf("%s:\t %d\n", "slurmctld debug", go_struct.Slurmctld_debug)
fmt.Printf("%s:\t %s\n", "slurmctld logfile", go_struct.Slurmctld_logfile)
fmt.Printf("%s:\t %s\n", "slurmctld pidfile", go_struct.Slurmctld_pidfile)
fmt.Printf("%s:\t %s\n", "slurmctld plugstack", go_struct.Slurmctld_plugstack)
fmt.Printf("%s:\t %d\n", "slurmctld port", go_struct.Slurmctld_port)
fmt.Printf("%s:\t %d\n", "slurmctld port count", go_struct.Slurmctld_port_count)
fmt.Printf("%s:\t %d\n", "slurmctld timeout", go_struct.Slurmctld_timeout)
fmt.Printf("%s:\t %d\n", "slurmd debug", go_struct.Slurmd_debug)
fmt.Printf("%s:\t %s\n", "slurmd logfile", go_struct.Slurmd_logfile)
fmt.Printf("%s:\t %s\n", "slurmd pidfile", go_struct.Slurmd_pidfile)
fmt.Printf("%s:\t %d\n", "slurmd port", go_struct.Slurmd_port)
fmt.Printf("%s:\t %s\n", "slurmd spooldir", go_struct.Slurmd_spooldir)
fmt.Printf("%s:\t %d\n", "slurmd timeout", go_struct.Slurmd_timeout)
fmt.Printf("%s:\t %s\n", "srun epilog", go_struct.Srun_epilog)
fmt.Printf("%s:\t %s\n", "srun prolog", go_struct.Srun_prolog)
fmt.Printf("%s:\t %s\n", "state save location", go_struct.State_save_location)
fmt.Printf("%s:\t %s\n", "suspend exc nodes", go_struct.Suspend_exc_nodes)
fmt.Printf("%s:\t %s\n", "suspend exc parts", go_struct.Suspend_exc_parts)
fmt.Printf("%s:\t %s\n", "suspend program", go_struct.Suspend_program)
fmt.Printf("%s:\t %d\n", "suspend rate", go_struct.Suspend_rate)
fmt.Printf("%s:\t %d\n", "suspend time", go_struct.Suspend_time)
fmt.Printf("%s:\t %d\n", "suspend timeout", go_struct.Suspend_timeout)
fmt.Printf("%s:\t %s\n", "switch type", go_struct.Switch_type)
fmt.Printf("%s:\t %s\n", "task epilog", go_struct.Task_epilog)
fmt.Printf("%s:\t %s\n", "task plugin", go_struct.Task_plugin)
fmt.Printf("%s:\t %d\n", "task plugin param", go_struct.Task_plugin_param)
fmt.Printf("%s:\t %s\n", "task prolog", go_struct.Task_prolog)
fmt.Printf("%s:\t %s\n", "tmp fs", go_struct.Tmp_fs)
fmt.Printf("%s:\t %s\n", "topology plugin", go_struct.Topology_plugin)
fmt.Printf("%s:\t %d\n", "track wckey", go_struct.Track_wckey)
fmt.Printf("%s:\t %d\n", "tree width", go_struct.Tree_width)
fmt.Printf("%s:\t %s\n", "unkillable program", go_struct.Unkillable_program)
fmt.Printf("%s:\t %d\n", "unkillable timeout", go_struct.Unkillable_timeout)
fmt.Printf("%s:\t %d\n", "use pam", go_struct.Use_pam)
fmt.Printf("%s:\t %s\n", "version", go_struct.Version)
fmt.Printf("%s:\t %d\n", "vsize factor", go_struct.Vsize_factor)
fmt.Printf("%s:\t %d\n", "wait time", go_struct.Wait_time)
}
func Version() int {
ver := C.long(0)
ver = C.slurm_api_version()
return int(ver)
}
func VersionString(v int) string {
var major, minor, micro int
var version string
major = ((v >> 16) & 0xff)
minor = ((v >> 8) & 0xff)
micro = (v & 0xff)
version = fmt.Sprintf("%d.%d.%d", major, minor, micro)
return version
}
func GetConfig() Ctl_conf {
c_config := C.get_config(C.uint64_t(0))
ret_config := Ctl_conf_convert_c_to_go(c_config)
return ret_config
}
func GetErrorString(errno uint32) string {
msg := C.GoString(C.slurm_strerror(C.int(errno)))
return msg
}

View File

@ -0,0 +1,752 @@
package submit_job
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
#include <stdint.h>
#ifndef ptr_convert
#define ptr_convert
inline uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
inline int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
inline uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
inline int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
inline uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
inline int32_t int32_ptr(int32_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
inline uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
inline int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return 0;}
return *pointer;
}
#endif
struct submit_response_msg *submit_job(struct job_descriptor *desc)
{
struct submit_response_msg *resp_msg;
if (slurm_submit_batch_job(desc,
&resp_msg)) {
return NULL;
}
return resp_msg;
}
int update_job (struct job_descriptor *msg) {
return slurm_update_job (msg);
}
void free_submit_response_msg(struct submit_response_msg *msg)
{
slurm_free_submit_response_response_msg(msg);
}
*/
import "C"
import (
job_info "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/jobinfo"
"fmt"
)
import "unsafe"
type Job_descriptor struct {
Account string
Acctg_freq string
Alloc_node string
Alloc_resp_port uint16
Alloc_sid uint32
Argc uint32
Argv []string
Array_inx string
Begin_time int64
Ckpt_interval uint16
Ckpt_dir string
Comment string
Contiguous uint16
Cpu_bind string
Cpu_bind_type uint16
Dependency string
End_time int64
Environment []string
Env_size uint32
Exc_nodes string
Features string
Group_id uint32
Immediate uint16
Job_id uint32
Kill_on_node_fail uint16
Licenses string
Mail_type uint16
Mail_user string
Mem_bind string
Mem_bind_type uint16
Name string
Network string
Nice uint32
Num_tasks uint32
Open_mode uint8
Other_port uint16
Overcommit uint8
Partition string
Plane_size uint16
Priority uint32
Profile uint32
Qos string
Reboot uint16
Resp_host string
Req_nodes string
Requeue uint16
Reservation string
Script string
//script_buf void;
Shared uint16
//spank_job_env char**;
Spank_job_env_size uint32
Task_dist uint32
Time_limit uint32
Time_min uint32
User_id uint32
Wait_all_nodes uint16
Warn_signal uint16
Warn_time uint16
Work_dir string
Cpus_per_task uint16
Min_cpus uint32
Max_cpus uint32
Min_nodes uint32
Max_nodes uint32
Boards_per_node uint16
Sockets_per_board uint16
Sockets_per_node uint16
Cores_per_socket uint16
Threads_per_core uint16
Ntasks_per_node uint16
Ntasks_per_socket uint16
Ntasks_per_core uint16
Ntasks_per_board uint16
Pn_min_cpus uint16
Pn_min_memory uint64
Pn_min_tmp_disk uint32
Req_switch uint32
Std_err string
Std_in string
Std_out string
Tres_req_cnt uint64
Wait4switch uint32
Wckey string
}
func Job_descriptor_convert_c_to_go(c_struct *C.struct_job_descriptor) Job_descriptor {
var go_struct Job_descriptor
go_struct.Account = C.GoString(c_struct.account)
go_struct.Acctg_freq = C.GoString(c_struct.acctg_freq)
go_struct.Alloc_node = C.GoString(c_struct.alloc_node)
go_struct.Alloc_resp_port = uint16(c_struct.alloc_resp_port)
go_struct.Alloc_sid = uint32(c_struct.alloc_sid)
go_struct.Argc = uint32(c_struct.argc)
go_struct.Array_inx = C.GoString(c_struct.array_inx)
go_struct.Begin_time = int64(c_struct.begin_time)
go_struct.Ckpt_interval = uint16(c_struct.ckpt_interval)
go_struct.Ckpt_dir = C.GoString(c_struct.ckpt_dir)
go_struct.Comment = C.GoString(c_struct.comment)
go_struct.Contiguous = uint16(c_struct.contiguous)
go_struct.Cpu_bind = C.GoString(c_struct.cpu_bind)
go_struct.Cpu_bind_type = uint16(c_struct.cpu_bind_type)
go_struct.Dependency = C.GoString(c_struct.dependency)
go_struct.End_time = int64(c_struct.end_time)
go_struct.Env_size = uint32(c_struct.env_size)
go_struct.Exc_nodes = C.GoString(c_struct.exc_nodes)
go_struct.Features = C.GoString(c_struct.features)
go_struct.Group_id = uint32(c_struct.group_id)
go_struct.Immediate = uint16(c_struct.immediate)
go_struct.Job_id = uint32(c_struct.job_id)
go_struct.Kill_on_node_fail = uint16(c_struct.kill_on_node_fail)
go_struct.Licenses = C.GoString(c_struct.licenses)
go_struct.Mail_type = uint16(c_struct.mail_type)
go_struct.Mail_user = C.GoString(c_struct.mail_user)
go_struct.Mem_bind = C.GoString(c_struct.mem_bind)
go_struct.Mem_bind_type = uint16(c_struct.mem_bind_type)
go_struct.Name = C.GoString(c_struct.name)
go_struct.Network = C.GoString(c_struct.network)
go_struct.Nice = uint32(c_struct.nice)
go_struct.Num_tasks = uint32(c_struct.num_tasks)
go_struct.Open_mode = uint8(c_struct.open_mode)
go_struct.Other_port = uint16(c_struct.other_port)
go_struct.Overcommit = uint8(c_struct.overcommit)
go_struct.Partition = C.GoString(c_struct.partition)
go_struct.Plane_size = uint16(c_struct.plane_size)
go_struct.Priority = uint32(c_struct.priority)
go_struct.Profile = uint32(c_struct.profile)
go_struct.Qos = C.GoString(c_struct.qos)
go_struct.Reboot = uint16(c_struct.reboot)
go_struct.Resp_host = C.GoString(c_struct.resp_host)
go_struct.Req_nodes = C.GoString(c_struct.req_nodes)
go_struct.Requeue = uint16(c_struct.requeue)
go_struct.Reservation = C.GoString(c_struct.reservation)
go_struct.Script = C.GoString(c_struct.script)
go_struct.Shared = uint16(c_struct.shared)
go_struct.Spank_job_env_size = uint32(c_struct.spank_job_env_size)
go_struct.Task_dist = uint32(c_struct.task_dist)
go_struct.Time_limit = uint32(c_struct.time_limit)
go_struct.Time_min = uint32(c_struct.time_min)
go_struct.User_id = uint32(c_struct.user_id)
go_struct.Wait_all_nodes = uint16(c_struct.wait_all_nodes)
go_struct.Warn_signal = uint16(c_struct.warn_signal)
go_struct.Warn_time = uint16(c_struct.warn_time)
go_struct.Work_dir = C.GoString(c_struct.work_dir)
go_struct.Cpus_per_task = uint16(c_struct.cpus_per_task)
go_struct.Min_cpus = uint32(c_struct.min_cpus)
go_struct.Max_cpus = uint32(c_struct.max_cpus)
go_struct.Min_nodes = uint32(c_struct.min_nodes)
go_struct.Max_nodes = uint32(c_struct.max_nodes)
go_struct.Boards_per_node = uint16(c_struct.boards_per_node)
go_struct.Sockets_per_board = uint16(c_struct.sockets_per_board)
go_struct.Sockets_per_node = uint16(c_struct.sockets_per_node)
go_struct.Cores_per_socket = uint16(c_struct.cores_per_socket)
go_struct.Threads_per_core = uint16(c_struct.threads_per_core)
go_struct.Ntasks_per_node = uint16(c_struct.ntasks_per_node)
go_struct.Ntasks_per_socket = uint16(c_struct.ntasks_per_socket)
go_struct.Ntasks_per_core = uint16(c_struct.ntasks_per_core)
go_struct.Ntasks_per_board = uint16(c_struct.ntasks_per_board)
go_struct.Pn_min_cpus = uint16(c_struct.pn_min_cpus)
go_struct.Pn_min_memory = uint64(c_struct.pn_min_memory)
go_struct.Pn_min_tmp_disk = uint32(c_struct.pn_min_tmp_disk)
go_struct.Req_switch = uint32(c_struct.req_switch)
go_struct.Std_err = C.GoString(c_struct.std_err)
go_struct.Std_in = C.GoString(c_struct.std_in)
go_struct.Std_out = C.GoString(c_struct.std_out)
go_struct.Wait4switch = uint32(c_struct.wait4switch)
go_struct.Wckey = C.GoString(c_struct.wckey)
return go_struct
}
func Print_Job_descriptor(go_struct Job_descriptor) {
fmt.Printf("%s:\t %s\n", "account", go_struct.Account)
fmt.Printf("%s:\t %s\n", "acctg freq", go_struct.Acctg_freq)
fmt.Printf("%s:\t %s\n", "alloc node", go_struct.Alloc_node)
fmt.Printf("%s:\t %d\n", "alloc resp port", go_struct.Alloc_resp_port)
fmt.Printf("%s:\t %d\n", "alloc sid", go_struct.Alloc_sid)
fmt.Printf("%s:\t %d\n", "argc", go_struct.Argc)
fmt.Printf("%s:\t %s\n", "array inx", go_struct.Array_inx)
fmt.Printf("%s:\t %d\n", "begin time", go_struct.Begin_time)
fmt.Printf("%s:\t %d\n", "ckpt interval", go_struct.Ckpt_interval)
fmt.Printf("%s:\t %s\n", "ckpt dir", go_struct.Ckpt_dir)
fmt.Printf("%s:\t %s\n", "comment", go_struct.Comment)
fmt.Printf("%s:\t %d\n", "contiguous", go_struct.Contiguous)
fmt.Printf("%s:\t %s\n", "cpu bind", go_struct.Cpu_bind)
fmt.Printf("%s:\t %d\n", "cpu bind type", go_struct.Cpu_bind_type)
fmt.Printf("%s:\t %s\n", "dependency", go_struct.Dependency)
fmt.Printf("%s:\t %d\n", "end time", go_struct.End_time)
fmt.Printf("%s:\t %d\n", "env size", go_struct.Env_size)
fmt.Printf("%s:\t %s\n", "exc nodes", go_struct.Exc_nodes)
fmt.Printf("%s:\t %s\n", "features", go_struct.Features)
fmt.Printf("%s:\t %d\n", "group id", go_struct.Group_id)
fmt.Printf("%s:\t %d\n", "immediate", go_struct.Immediate)
fmt.Printf("%s:\t %d\n", "job id", go_struct.Job_id)
fmt.Printf("%s:\t %d\n", "kill on node fail", go_struct.Kill_on_node_fail)
fmt.Printf("%s:\t %s\n", "licenses", go_struct.Licenses)
fmt.Printf("%s:\t %d\n", "mail type", go_struct.Mail_type)
fmt.Printf("%s:\t %s\n", "mail user", go_struct.Mail_user)
fmt.Printf("%s:\t %s\n", "mem bind", go_struct.Mem_bind)
fmt.Printf("%s:\t %d\n", "mem bind type", go_struct.Mem_bind_type)
fmt.Printf("%s:\t %s\n", "name", go_struct.Name)
fmt.Printf("%s:\t %s\n", "network", go_struct.Network)
fmt.Printf("%s:\t %d\n", "nice", go_struct.Nice)
fmt.Printf("%s:\t %d\n", "num tasks", go_struct.Num_tasks)
fmt.Printf("%s:\t %d\n", "open mode", go_struct.Open_mode)
fmt.Printf("%s:\t %d\n", "other port", go_struct.Other_port)
fmt.Printf("%s:\t %d\n", "overcommit", go_struct.Overcommit)
fmt.Printf("%s:\t %s\n", "partition", go_struct.Partition)
fmt.Printf("%s:\t %d\n", "plane size", go_struct.Plane_size)
fmt.Printf("%s:\t %d\n", "priority", go_struct.Priority)
fmt.Printf("%s:\t %d\n", "profile", go_struct.Profile)
fmt.Printf("%s:\t %s\n", "qos", go_struct.Qos)
fmt.Printf("%s:\t %d\n", "reboot", go_struct.Reboot)
fmt.Printf("%s:\t %s\n", "resp host", go_struct.Resp_host)
fmt.Printf("%s:\t %s\n", "req nodes", go_struct.Req_nodes)
fmt.Printf("%s:\t %d\n", "requeue", go_struct.Requeue)
fmt.Printf("%s:\t %s\n", "reservation", go_struct.Reservation)
fmt.Printf("%s:\t %s\n", "script", go_struct.Script)
fmt.Printf("%s:\t %d\n", "shared", go_struct.Shared)
fmt.Printf("%s:\t %d\n", "spank job env size", go_struct.Spank_job_env_size)
fmt.Printf("%s:\t %d\n", "task dist", go_struct.Task_dist)
fmt.Printf("%s:\t %d\n", "time limit", go_struct.Time_limit)
fmt.Printf("%s:\t %d\n", "time min", go_struct.Time_min)
fmt.Printf("%s:\t %d\n", "user id", go_struct.User_id)
fmt.Printf("%s:\t %d\n", "wait all nodes", go_struct.Wait_all_nodes)
fmt.Printf("%s:\t %d\n", "warn signal", go_struct.Warn_signal)
fmt.Printf("%s:\t %d\n", "warn time", go_struct.Warn_time)
fmt.Printf("%s:\t %s\n", "work dir", go_struct.Work_dir)
fmt.Printf("%s:\t %d\n", "cpus per task", go_struct.Cpus_per_task)
fmt.Printf("%s:\t %d\n", "min cpus", go_struct.Min_cpus)
fmt.Printf("%s:\t %d\n", "max cpus", go_struct.Max_cpus)
fmt.Printf("%s:\t %d\n", "min nodes", go_struct.Min_nodes)
fmt.Printf("%s:\t %d\n", "max nodes", go_struct.Max_nodes)
fmt.Printf("%s:\t %d\n", "boards per node", go_struct.Boards_per_node)
fmt.Printf("%s:\t %d\n", "sockets per board", go_struct.Sockets_per_board)
fmt.Printf("%s:\t %d\n", "sockets per node", go_struct.Sockets_per_node)
fmt.Printf("%s:\t %d\n", "cores per socket", go_struct.Cores_per_socket)
fmt.Printf("%s:\t %d\n", "threads per core", go_struct.Threads_per_core)
fmt.Printf("%s:\t %d\n", "ntasks per node", go_struct.Ntasks_per_node)
fmt.Printf("%s:\t %d\n", "ntasks per socket", go_struct.Ntasks_per_socket)
fmt.Printf("%s:\t %d\n", "ntasks per core", go_struct.Ntasks_per_core)
fmt.Printf("%s:\t %d\n", "ntasks per board", go_struct.Ntasks_per_board)
fmt.Printf("%s:\t %d\n", "pn min cpus", go_struct.Pn_min_cpus)
fmt.Printf("%s:\t %d\n", "pn min memory", go_struct.Pn_min_memory)
fmt.Printf("%s:\t %d\n", "pn min tmp disk", go_struct.Pn_min_tmp_disk)
fmt.Printf("%s:\t %d\n", "req switch", go_struct.Req_switch)
fmt.Printf("%s:\t %s\n", "std err", go_struct.Std_err)
fmt.Printf("%s:\t %s\n", "std in", go_struct.Std_in)
fmt.Printf("%s:\t %s\n", "std out", go_struct.Std_out)
fmt.Printf("%s:\t %d\n", "tres req cnt", go_struct.Tres_req_cnt)
fmt.Printf("%s:\t %d\n", "wait4switch", go_struct.Wait4switch)
fmt.Printf("%s:\t %s\n", "wckey", go_struct.Wckey)
}
type Update_job_options struct {
Partition string
Qos string
Num_tasks uint32
Ntasks_per_node uint16
Ntasks_per_socket uint16
Ntasks_per_core uint16
Min_nodes uint32
Max_nodes uint32
}
type Submit_response_msg struct {
Job_id uint32
Step_id uint32
Error_code uint32
}
func submit_response_msg_convert_c_to_go(c_struct *C.struct_submit_response_msg) Submit_response_msg {
var go_struct Submit_response_msg
go_struct.Job_id = uint32(c_struct.job_id)
go_struct.Step_id = uint32(c_struct.step_id)
go_struct.Error_code = uint32(c_struct.error_code)
return go_struct
}
func Print_submit_response_msg(go_struct Submit_response_msg) {
fmt.Printf("%s:\t %d\n", "job id", go_struct.Job_id)
fmt.Printf("%s:\t %d\n", "step id", go_struct.Step_id)
fmt.Printf("%s:\t %d\n", "error code", go_struct.Error_code)
}
/*This is an ugly function, since we start to convert everyting back*/
func Submit_job(go_struct *Job_descriptor) Submit_response_msg {
var c_struct C.struct_job_descriptor
C.slurm_init_job_desc_msg(&c_struct)
if go_struct.Account != "" {
account_s := C.CString(go_struct.Account)
defer C.free(unsafe.Pointer(account_s))
c_struct.account = account_s
}
if go_struct.Acctg_freq != "" {
acctg_freq_s := C.CString(go_struct.Acctg_freq)
defer C.free(unsafe.Pointer(acctg_freq_s))
c_struct.acctg_freq = acctg_freq_s
}
if go_struct.Alloc_node != "" {
alloc_node_s := C.CString(go_struct.Alloc_node)
defer C.free(unsafe.Pointer(alloc_node_s))
c_struct.alloc_node = alloc_node_s
}
if go_struct.Alloc_resp_port != 0 {
c_struct.alloc_resp_port = C.uint16_t(go_struct.Alloc_resp_port)
}
if go_struct.Alloc_sid != 0 {
c_struct.alloc_sid = C.uint32_t(go_struct.Alloc_sid)
}
if len(go_struct.Argv) > 0 {
c_struct.argc = C.uint32_t(len(go_struct.Argv))
cArray := C.malloc(C.size_t(C.size_t(len(go_struct.Argv)) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
for i := 0; i < len(go_struct.Argv); i++ {
a[i] = C.CString(go_struct.Argv[i])
}
c_struct.argv = (**C.char)(cArray)
fmt.Printf("test\n")
}
if go_struct.Array_inx != "" {
array_inx_s := C.CString(go_struct.Array_inx)
defer C.free(unsafe.Pointer(array_inx_s))
c_struct.array_inx = array_inx_s
}
if go_struct.Begin_time != 0 {
c_struct.begin_time = C.int64_t(go_struct.Begin_time)
}
if go_struct.Ckpt_interval != 0 {
c_struct.ckpt_interval = C.uint16_t(go_struct.Ckpt_interval)
}
if go_struct.Ckpt_dir != "" {
ckpt_dir_s := C.CString(go_struct.Ckpt_dir)
defer C.free(unsafe.Pointer(ckpt_dir_s))
c_struct.ckpt_dir = ckpt_dir_s
}
if go_struct.Comment != "" {
comment_s := C.CString(go_struct.Comment)
defer C.free(unsafe.Pointer(comment_s))
c_struct.comment = comment_s
}
if go_struct.Contiguous != 0 {
c_struct.contiguous = C.uint16_t(go_struct.Contiguous)
}
if go_struct.Cpu_bind != "" {
cpu_bind_s := C.CString(go_struct.Cpu_bind)
defer C.free(unsafe.Pointer(cpu_bind_s))
c_struct.cpu_bind = cpu_bind_s
}
if go_struct.Cpu_bind_type != 0 {
c_struct.cpu_bind_type = C.uint16_t(go_struct.Cpu_bind_type)
}
if go_struct.Dependency != "" {
dependency_s := C.CString(go_struct.Dependency)
defer C.free(unsafe.Pointer(dependency_s))
c_struct.dependency = dependency_s
}
if go_struct.End_time != 0 {
c_struct.end_time = C.int64_t(go_struct.End_time)
}
if len(go_struct.Environment) > 0 {
c_struct.env_size = C.uint32_t(len(go_struct.Environment))
cArray := C.malloc(C.size_t(C.size_t(len(go_struct.Environment)) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
for i := 0; i < len(go_struct.Environment); i++ {
a[i] = C.CString(go_struct.Environment[i])
defer C.free(unsafe.Pointer(a[i]))
}
c_struct.environment = (**C.char)(cArray)
} else {
c_struct.env_size = 1
cArray := C.malloc(C.size_t(C.size_t(1) * C.size_t(unsafe.Sizeof(uintptr(0)))))
a := (*[1<<30 - 1]*C.char)(cArray)
a[0] = C.CString("SLURM_GO_JOB=TRUE")
defer C.free(unsafe.Pointer(a[0]))
c_struct.environment = (**C.char)(cArray)
}
if go_struct.Exc_nodes != "" {
exc_nodes_s := C.CString(go_struct.Exc_nodes)
defer C.free(unsafe.Pointer(exc_nodes_s))
c_struct.exc_nodes = exc_nodes_s
}
if go_struct.Features != "" {
features_s := C.CString(go_struct.Features)
defer C.free(unsafe.Pointer(features_s))
c_struct.features = features_s
}
if go_struct.Group_id != 0 {
c_struct.group_id = C.uint32_t(go_struct.Group_id)
}
if go_struct.Immediate != 0 {
c_struct.immediate = C.uint16_t(go_struct.Immediate)
}
if go_struct.Job_id != 0 {
c_struct.job_id = C.uint32_t(go_struct.Job_id)
}
if go_struct.Kill_on_node_fail != 0 {
c_struct.kill_on_node_fail = C.uint16_t(go_struct.Kill_on_node_fail)
}
if go_struct.Licenses != "" {
licenses_s := C.CString(go_struct.Licenses)
defer C.free(unsafe.Pointer(licenses_s))
c_struct.licenses = licenses_s
}
if go_struct.Mail_type != 0 {
c_struct.mail_type = C.uint16_t(go_struct.Mail_type)
}
if go_struct.Mail_user != "" {
mail_user_s := C.CString(go_struct.Mail_user)
defer C.free(unsafe.Pointer(mail_user_s))
c_struct.mail_user = mail_user_s
}
if go_struct.Mem_bind != "" {
mem_bind_s := C.CString(go_struct.Mem_bind)
defer C.free(unsafe.Pointer(mem_bind_s))
c_struct.mem_bind = mem_bind_s
}
if go_struct.Mem_bind_type != 0 {
c_struct.mem_bind_type = C.uint16_t(go_struct.Mem_bind_type)
}
if go_struct.Name != "" {
name_s := C.CString(go_struct.Name)
defer C.free(unsafe.Pointer(name_s))
c_struct.name = name_s
}
if go_struct.Network != "" {
network_s := C.CString(go_struct.Network)
defer C.free(unsafe.Pointer(network_s))
c_struct.network = network_s
}
//if go_struct.Nice != 0 {
// c_struct.nice = C.uint32_t(go_struct.Nice)
//}
if go_struct.Num_tasks != 0 {
c_struct.num_tasks = C.uint32_t(go_struct.Num_tasks)
}
if go_struct.Open_mode != 0 {
c_struct.open_mode = C.uint8_t(go_struct.Open_mode)
}
if go_struct.Other_port != 0 {
c_struct.other_port = C.uint16_t(go_struct.Other_port)
}
if go_struct.Overcommit != 0 {
c_struct.overcommit = C.uint8_t(go_struct.Overcommit)
}
if go_struct.Partition != "" {
partition_s := C.CString(go_struct.Partition)
defer C.free(unsafe.Pointer(partition_s))
c_struct.partition = partition_s
}
if go_struct.Plane_size != 0 {
c_struct.plane_size = C.uint16_t(go_struct.Plane_size)
}
if go_struct.Priority != 0 {
c_struct.priority = C.uint32_t(go_struct.Priority)
}
if go_struct.Profile != 0 {
c_struct.profile = C.uint32_t(go_struct.Profile)
}
if go_struct.Qos != "" {
qos_s := C.CString(go_struct.Qos)
defer C.free(unsafe.Pointer(qos_s))
c_struct.qos = qos_s
}
if go_struct.Reboot != 0 {
c_struct.reboot = C.uint16_t(go_struct.Reboot)
}
if go_struct.Resp_host != "" {
resp_host_s := C.CString(go_struct.Resp_host)
defer C.free(unsafe.Pointer(resp_host_s))
c_struct.resp_host = resp_host_s
}
if go_struct.Req_nodes != "" {
req_nodes_s := C.CString(go_struct.Req_nodes)
defer C.free(unsafe.Pointer(req_nodes_s))
c_struct.req_nodes = req_nodes_s
}
if go_struct.Requeue != 0 {
c_struct.requeue = C.uint16_t(go_struct.Requeue)
}
if go_struct.Reservation != "" {
reservation_s := C.CString(go_struct.Reservation)
defer C.free(unsafe.Pointer(reservation_s))
c_struct.reservation = reservation_s
}
if go_struct.Script != "" {
script_s := C.CString(go_struct.Script)
defer C.free(unsafe.Pointer(script_s))
c_struct.script = script_s
}
if go_struct.Shared != 0 {
c_struct.shared = C.uint16_t(go_struct.Shared)
}
if go_struct.Spank_job_env_size != 0 {
c_struct.spank_job_env_size = C.uint32_t(go_struct.Spank_job_env_size)
}
//if go_struct.Task_dist != 0 {
// c_struct.task_dist = C.uint32_t(go_struct.Task_dist)
//}
if go_struct.Time_limit != 0 {
c_struct.time_limit = C.uint32_t(go_struct.Time_limit)
}
if go_struct.Time_min != 0 {
c_struct.time_min = C.uint32_t(go_struct.Time_min)
}
if go_struct.User_id != 0 {
c_struct.user_id = C.uint32_t(go_struct.User_id)
}
if go_struct.Wait_all_nodes != 0 {
c_struct.wait_all_nodes = C.uint16_t(go_struct.Wait_all_nodes)
}
if go_struct.Warn_signal != 0 {
c_struct.warn_signal = C.uint16_t(go_struct.Warn_signal)
}
if go_struct.Warn_time != 0 {
c_struct.warn_time = C.uint16_t(go_struct.Warn_time)
}
if go_struct.Work_dir != "" {
work_dir_s := C.CString(go_struct.Work_dir)
defer C.free(unsafe.Pointer(work_dir_s))
c_struct.work_dir = work_dir_s
}
if go_struct.Cpus_per_task != 0 {
c_struct.cpus_per_task = C.uint16_t(go_struct.Cpus_per_task)
}
if go_struct.Min_cpus != 0 {
c_struct.min_cpus = C.uint32_t(go_struct.Min_cpus)
}
if go_struct.Max_cpus != 0 {
c_struct.max_cpus = C.uint32_t(go_struct.Max_cpus)
}
if go_struct.Min_nodes != 0 {
c_struct.min_nodes = C.uint32_t(go_struct.Min_nodes)
}
if go_struct.Max_nodes != 0 {
c_struct.max_nodes = C.uint32_t(go_struct.Max_nodes)
}
if go_struct.Boards_per_node != 0 {
c_struct.boards_per_node = C.uint16_t(go_struct.Boards_per_node)
}
if go_struct.Sockets_per_board != 0 {
c_struct.sockets_per_board = C.uint16_t(go_struct.Sockets_per_board)
}
if go_struct.Sockets_per_node != 0 {
c_struct.sockets_per_node = C.uint16_t(go_struct.Sockets_per_node)
}
if go_struct.Cores_per_socket != 0 {
c_struct.cores_per_socket = C.uint16_t(go_struct.Cores_per_socket)
}
if go_struct.Threads_per_core != 0 {
c_struct.threads_per_core = C.uint16_t(go_struct.Threads_per_core)
}
if go_struct.Ntasks_per_node != 0 {
c_struct.ntasks_per_node = C.uint16_t(go_struct.Ntasks_per_node)
}
if go_struct.Ntasks_per_socket != 0 {
c_struct.ntasks_per_socket = C.uint16_t(go_struct.Ntasks_per_socket)
}
if go_struct.Ntasks_per_core != 0 {
c_struct.ntasks_per_core = C.uint16_t(go_struct.Ntasks_per_core)
}
if go_struct.Ntasks_per_board != 0 {
c_struct.ntasks_per_board = C.uint16_t(go_struct.Ntasks_per_board)
}
if go_struct.Pn_min_cpus != 0 {
c_struct.pn_min_cpus = C.uint16_t(go_struct.Pn_min_cpus)
}
if go_struct.Pn_min_tmp_disk != 0 {
c_struct.pn_min_tmp_disk = C.uint32_t(go_struct.Pn_min_tmp_disk)
}
if go_struct.Req_switch != 0 {
c_struct.req_switch = C.uint32_t(go_struct.Req_switch)
}
if go_struct.Std_err != "" {
std_err_s := C.CString(go_struct.Std_err)
defer C.free(unsafe.Pointer(std_err_s))
c_struct.std_err = std_err_s
}
if go_struct.Std_in != "" {
std_in_s := C.CString(go_struct.Std_in)
defer C.free(unsafe.Pointer(std_in_s))
c_struct.std_in = std_in_s
}
if go_struct.Std_out != "" {
std_out_s := C.CString(go_struct.Std_out)
defer C.free(unsafe.Pointer(std_out_s))
c_struct.std_out = std_out_s
}
if go_struct.Wait4switch != 0 {
c_struct.wait4switch = C.uint32_t(go_struct.Wait4switch)
}
if go_struct.Wckey != "" {
wckey_s := C.CString(go_struct.Wckey)
defer C.free(unsafe.Pointer(wckey_s))
c_struct.wckey = wckey_s
}
c_msg := C.submit_job(&c_struct)
defer C.free_submit_response_msg(c_msg)
if c_msg == nil {
go_msg := Submit_response_msg{}
go_msg.Job_id = 1<<31 - 1
go_msg.Error_code = uint32(C.slurm_get_errno())
return go_msg
}
go_msg := submit_response_msg_convert_c_to_go(c_msg)
return go_msg
}
func Update_job(update_info Update_job_options, JobId uint32) uint32 {
var c_struct C.struct_job_descriptor
C.slurm_init_job_desc_msg(&c_struct)
if update_info.Partition != "" {
partition_s := C.CString(update_info.Partition)
defer C.free(unsafe.Pointer(partition_s))
c_struct.partition = partition_s
}
if update_info.Qos != "" {
qos_s := C.CString(update_info.Qos)
defer C.free(unsafe.Pointer(qos_s))
c_struct.qos = qos_s
}
if update_info.Num_tasks != 0 {
c_struct.num_tasks = C.uint32_t(update_info.Num_tasks)
}
if update_info.Ntasks_per_core != 0 {
c_struct.ntasks_per_core = C.uint16_t(update_info.Ntasks_per_core)
}
if update_info.Ntasks_per_node != 0 {
c_struct.ntasks_per_node = C.uint16_t(update_info.Ntasks_per_node)
}
if update_info.Ntasks_per_socket != 0 {
c_struct.ntasks_per_socket = C.uint16_t(update_info.Ntasks_per_socket)
}
if update_info.Max_nodes != 0 {
c_struct.max_nodes = C.uint32_t(update_info.Max_nodes)
}
if update_info.Min_nodes != 0 {
c_struct.min_nodes = C.uint32_t(update_info.Min_nodes)
}
job_list := job_info.Get_job(uint32(JobId))
if job_list.Error_code != 0 {
return uint32(job_list.Error_code)
}
job := job_list.Job_list[0]
if job.Job_state != C.JOB_PENDING {
return uint32(C.ESLURM_JOB_NOT_PENDING)
}
c_struct.job_id = C.uint32_t(JobId)
err := C.update_job(&c_struct)
return uint32(err)
}

View File

@ -0,0 +1,374 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc (unknown)
// source: idl/slurm.proto
package slurmpb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// slurm version
type SlurmVersion int32
const (
// 0 - 阿里云
SlurmVersion_tianhe SlurmVersion = 0
)
// Enum value maps for SlurmVersion.
var (
SlurmVersion_name = map[int32]string{
0: "tianhe",
}
SlurmVersion_value = map[string]int32{
"tianhe": 0,
}
)
func (x SlurmVersion) Enum() *SlurmVersion {
p := new(SlurmVersion)
*p = x
return p
}
func (x SlurmVersion) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (SlurmVersion) Descriptor() protoreflect.EnumDescriptor {
return file_idl_slurm_proto_enumTypes[0].Descriptor()
}
func (SlurmVersion) Type() protoreflect.EnumType {
return &file_idl_slurm_proto_enumTypes[0]
}
func (x SlurmVersion) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use SlurmVersion.Descriptor instead.
func (SlurmVersion) EnumDescriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{0}
}
type ListNodesReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SlurmVersion int32 `protobuf:"varint,1,opt,name=SlurmVersion,proto3" json:"SlurmVersion,omitempty"`
}
func (x *ListNodesReq) Reset() {
*x = ListNodesReq{}
if protoimpl.UnsafeEnabled {
mi := &file_idl_slurm_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNodesReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodesReq) ProtoMessage() {}
func (x *ListNodesReq) ProtoReflect() protoreflect.Message {
mi := &file_idl_slurm_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodesReq.ProtoReflect.Descriptor instead.
func (*ListNodesReq) Descriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{0}
}
func (x *ListNodesReq) GetSlurmVersion() int32 {
if x != nil {
return x.SlurmVersion
}
return 0
}
type NodeInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Boards int32 `protobuf:"varint,1,opt,name=boards,proto3" json:"boards,omitempty"`
Cpus int32 `protobuf:"varint,2,opt,name=cpus,proto3" json:"cpus,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
RealMemory int32 `protobuf:"varint,4,opt,name=realMemory,proto3" json:"realMemory,omitempty"`
Sockets int32 `protobuf:"varint,5,opt,name=sockets,proto3" json:"sockets,omitempty"`
Threads int32 `protobuf:"varint,6,opt,name=threads,proto3" json:"threads,omitempty"`
}
func (x *NodeInfo) Reset() {
*x = NodeInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_idl_slurm_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeInfo) ProtoMessage() {}
func (x *NodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_idl_slurm_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
func (*NodeInfo) Descriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{1}
}
func (x *NodeInfo) GetBoards() int32 {
if x != nil {
return x.Boards
}
return 0
}
func (x *NodeInfo) GetCpus() int32 {
if x != nil {
return x.Cpus
}
return 0
}
func (x *NodeInfo) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *NodeInfo) GetRealMemory() int32 {
if x != nil {
return x.RealMemory
}
return 0
}
func (x *NodeInfo) GetSockets() int32 {
if x != nil {
return x.Sockets
}
return 0
}
func (x *NodeInfo) GetThreads() int32 {
if x != nil {
return x.Threads
}
return 0
}
type ListNodesResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeInfos []*NodeInfo `protobuf:"bytes,1,rep,name=node_infos,json=nodeInfos,proto3" json:"node_infos,omitempty"`
}
func (x *ListNodesResp) Reset() {
*x = ListNodesResp{}
if protoimpl.UnsafeEnabled {
mi := &file_idl_slurm_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNodesResp) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodesResp) ProtoMessage() {}
func (x *ListNodesResp) ProtoReflect() protoreflect.Message {
mi := &file_idl_slurm_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodesResp.ProtoReflect.Descriptor instead.
func (*ListNodesResp) Descriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{2}
}
func (x *ListNodesResp) GetNodeInfos() []*NodeInfo {
if x != nil {
return x.NodeInfos
}
return nil
}
var File_idl_slurm_proto protoreflect.FileDescriptor
var file_idl_slurm_proto_rawDesc = []byte{
0x0a, 0x0f, 0x69, 0x64, 0x6c, 0x2f, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x05, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x22, 0x32, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74,
0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x12, 0x22, 0x0a, 0x0c, 0x53, 0x6c, 0x75, 0x72,
0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c,
0x53, 0x6c, 0x75, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x9e, 0x01, 0x0a,
0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6f, 0x61,
0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x62, 0x6f, 0x61, 0x72, 0x64,
0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x70, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
0x04, 0x63, 0x70, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x61,
0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72,
0x65, 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x6f, 0x63,
0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x6f, 0x63, 0x6b,
0x65, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x06,
0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x22, 0x3f, 0x0a,
0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2e,
0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49,
0x6e, 0x66, 0x6f, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x2a, 0x1a,
0x0a, 0x0c, 0x53, 0x6c, 0x75, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a,
0x0a, 0x06, 0x74, 0x69, 0x61, 0x6e, 0x68, 0x65, 0x10, 0x00, 0x32, 0x46, 0x0a, 0x0c, 0x53, 0x6c,
0x75, 0x72, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x4c, 0x69,
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x13, 0x2e, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x2e,
0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x73,
0x6c, 0x75, 0x72, 0x6d, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65,
0x73, 0x70, 0x42, 0x0a, 0x5a, 0x08, 0x2f, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x70, 0x62, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_idl_slurm_proto_rawDescOnce sync.Once
file_idl_slurm_proto_rawDescData = file_idl_slurm_proto_rawDesc
)
func file_idl_slurm_proto_rawDescGZIP() []byte {
file_idl_slurm_proto_rawDescOnce.Do(func() {
file_idl_slurm_proto_rawDescData = protoimpl.X.CompressGZIP(file_idl_slurm_proto_rawDescData)
})
return file_idl_slurm_proto_rawDescData
}
var file_idl_slurm_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_idl_slurm_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_idl_slurm_proto_goTypes = []interface{}{
(SlurmVersion)(0), // 0: slurm.SlurmVersion
(*ListNodesReq)(nil), // 1: slurm.ListNodesReq
(*NodeInfo)(nil), // 2: slurm.NodeInfo
(*ListNodesResp)(nil), // 3: slurm.ListNodesResp
}
var file_idl_slurm_proto_depIdxs = []int32{
2, // 0: slurm.ListNodesResp.node_infos:type_name -> slurm.NodeInfo
1, // 1: slurm.SlurmService.ListNodes:input_type -> slurm.ListNodesReq
3, // 2: slurm.SlurmService.ListNodes:output_type -> slurm.ListNodesResp
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_idl_slurm_proto_init() }
func file_idl_slurm_proto_init() {
if File_idl_slurm_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_idl_slurm_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNodesReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_idl_slurm_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_idl_slurm_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNodesResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_idl_slurm_proto_rawDesc,
NumEnums: 1,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_idl_slurm_proto_goTypes,
DependencyIndexes: file_idl_slurm_proto_depIdxs,
EnumInfos: file_idl_slurm_proto_enumTypes,
MessageInfos: file_idl_slurm_proto_msgTypes,
}.Build()
File_idl_slurm_proto = out.File
file_idl_slurm_proto_rawDesc = nil
file_idl_slurm_proto_goTypes = nil
file_idl_slurm_proto_depIdxs = nil
}

View File

@ -0,0 +1,173 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: idl/slurm.proto
/*
Package slurmpb is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package slurmpb
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
var (
filter_SlurmService_ListNodes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_SlurmService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, client SlurmServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListNodesReq
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SlurmService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListNodes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_SlurmService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, server SlurmServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListNodesReq
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SlurmService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListNodes(ctx, &protoReq)
return msg, metadata, err
}
// RegisterSlurmServiceHandlerServer registers the http handlers for service SlurmService to "mux".
// UnaryRPC :call SlurmServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSlurmServiceHandlerFromEndpoint instead.
func RegisterSlurmServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SlurmServiceServer) error {
mux.Handle("GET", pattern_SlurmService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/slurm.SlurmService/ListNodes", runtime.WithHTTPPathPattern("/apis/slurm/listNodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_SlurmService_ListNodes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_SlurmService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterSlurmServiceHandlerFromEndpoint is same as RegisterSlurmServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterSlurmServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterSlurmServiceHandler(ctx, mux, conn)
}
// RegisterSlurmServiceHandler registers the http handlers for service SlurmService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterSlurmServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterSlurmServiceHandlerClient(ctx, mux, NewSlurmServiceClient(conn))
}
// RegisterSlurmServiceHandlerClient registers the http handlers for service SlurmService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SlurmServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SlurmServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "SlurmServiceClient" to call the correct interceptors.
func RegisterSlurmServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SlurmServiceClient) error {
mux.Handle("GET", pattern_SlurmService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/slurm.SlurmService/ListNodes", runtime.WithHTTPPathPattern("/apis/slurm/listNodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_SlurmService_ListNodes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_SlurmService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_SlurmService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "slurm", "listNodes"}, ""))
)
var (
forward_SlurmService_ListNodes_0 = runtime.ForwardResponseMessage
)

View File

@ -0,0 +1,107 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc (unknown)
// source: idl/slurm.proto
package slurmpb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// SlurmServiceClient is the client API for SlurmService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SlurmServiceClient interface {
// Echo 样例接口
ListNodes(ctx context.Context, in *ListNodesReq, opts ...grpc.CallOption) (*ListNodesResp, error)
}
type slurmServiceClient struct {
cc grpc.ClientConnInterface
}
func NewSlurmServiceClient(cc grpc.ClientConnInterface) SlurmServiceClient {
return &slurmServiceClient{cc}
}
func (c *slurmServiceClient) ListNodes(ctx context.Context, in *ListNodesReq, opts ...grpc.CallOption) (*ListNodesResp, error) {
out := new(ListNodesResp)
err := c.cc.Invoke(ctx, "/slurm.SlurmService/ListNodes", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// SlurmServiceServer is the server API for SlurmService service.
// All implementations must embed UnimplementedSlurmServiceServer
// for forward compatibility
type SlurmServiceServer interface {
// Echo 样例接口
ListNodes(context.Context, *ListNodesReq) (*ListNodesResp, error)
mustEmbedUnimplementedSlurmServiceServer()
}
// UnimplementedSlurmServiceServer must be embedded to have forward compatible implementations.
type UnimplementedSlurmServiceServer struct {
}
func (UnimplementedSlurmServiceServer) ListNodes(context.Context, *ListNodesReq) (*ListNodesResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListNodes not implemented")
}
func (UnimplementedSlurmServiceServer) mustEmbedUnimplementedSlurmServiceServer() {}
// UnsafeSlurmServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SlurmServiceServer will
// result in compilation errors.
type UnsafeSlurmServiceServer interface {
mustEmbedUnimplementedSlurmServiceServer()
}
func RegisterSlurmServiceServer(s grpc.ServiceRegistrar, srv SlurmServiceServer) {
s.RegisterService(&SlurmService_ServiceDesc, srv)
}
func _SlurmService_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListNodesReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SlurmServiceServer).ListNodes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/slurm.SlurmService/ListNodes",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SlurmServiceServer).ListNodes(ctx, req.(*ListNodesReq))
}
return interceptor(ctx, in, info, handler)
}
// SlurmService_ServiceDesc is the grpc.ServiceDesc for SlurmService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var SlurmService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "slurm.SlurmService",
HandlerType: (*SlurmServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListNodes",
Handler: _SlurmService_ListNodes_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "idl/slurm.proto",
}

View File

@ -0,0 +1,87 @@
{
"swagger": "2.0",
"info": {
"title": "idl/slurm.proto",
"version": "version not set"
},
"tags": [
{
"name": "SlurmService"
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {},
"definitions": {
"protobufAny": {
"type": "object",
"properties": {
"@type": {
"type": "string"
}
},
"additionalProperties": {}
},
"rpcStatus": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
},
"slurmListNodesResp": {
"type": "object",
"properties": {
"nodeInfos": {
"type": "array",
"items": {
"$ref": "#/definitions/slurmNodeInfo"
}
}
}
},
"slurmNodeInfo": {
"type": "object",
"properties": {
"boards": {
"type": "integer",
"format": "int32"
},
"cpus": {
"type": "integer",
"format": "int32"
},
"name": {
"type": "string"
},
"realMemory": {
"type": "integer",
"format": "int32"
},
"sockets": {
"type": "integer",
"format": "int32"
},
"threads": {
"type": "integer",
"format": "int32"
}
}
}
}
}

View File

@ -0,0 +1,33 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
// slurm version
enum SlurmVersion {
// 0 -
tianhe = 0;
}
message ListNodesReq{
int32 SlurmVersion = 1;
}
message NodeInfo{
int32 boards = 1 ;
int32 cpus = 2 ;
string name = 3 ;
int32 realMemory = 4 ;
int32 sockets = 5 ;
int32 threads = 6 ;
}
message ListNodesResp {
repeated NodeInfo node_infos =1;
}
// Slurm Services
service SlurmService {
// Echo
rpc ListNodes(ListNodesReq) returns (ListNodesResp);
}

View File

@ -0,0 +1,7 @@
type: google.api.Service
config_version: 3
http:
rules:
- selector: slurm.SlurmService.ListNodes
get: "/apis/slurm/listNodes"

30
adaptor/pcm_slurm/main.go Normal file
View File

@ -0,0 +1,30 @@
package main
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
slurmserver "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/config"
"code.gitlink.org.cn/JCCE/PCM.git/common/global"
"code.gitlink.org.cn/JCCE/PCM.git/common/server"
"go.uber.org/zap"
"google.golang.org/grpc"
)
func main() {
// 初始化配置以及数据库
config.InitConfig()
// 新建一个zap logger实例
logger, _ := zap.NewDevelopment()
// logger.Sugar().Fatal 直接可以少几行判错的代码
logger.Sugar().Fatal(server.RunGRPCServer(&server.GRPCConfig{
Name: "pcm_slurm",
Addr: global.S.SlurmInfo.Address,
RegisterFunc: func(g *grpc.Server) {
slurmpb.RegisterSlurmServiceServer(g, &slurmserver.Server{})
},
Logger: logger,
}))
}

View File

@ -0,0 +1,3 @@
package rest
//TODO

View File

@ -0,0 +1,23 @@
package server
import (
slurmpb "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type Server struct {
slurmpb.UnimplementedSlurmServiceServer
}
// ListNodes return all slurm nodes
func (s *Server) ListNodes(ctx context.Context, req *slurmpb.ListNodesReq) (*slurmpb.ListNodesResp, error) {
resp, err := ListNodes(ctx, req)
if err != nil {
glog.Errorf("ListSlurmNodes error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}

View File

@ -0,0 +1,13 @@
package server
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
slurmer "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/service"
"context"
)
func ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (*pbslurm.ListNodesResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
resp, _ := slurm.ListNodes(ctx, req)
return resp, nil
}

View File

@ -0,0 +1,19 @@
package slurmer
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
)
type Slurmer interface {
ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (resp *pbslurm.ListNodesResp, err error) //list slurm nodes
}
func SelectSlurmVersion(slurmVersion int32) (slurmer Slurmer, err error) {
switch slurmVersion {
case int32(pbslurm.SlurmVersion_tianhe):
return SelectTianhe()
}
return
}

View File

@ -0,0 +1,35 @@
package slurmer
import (
nodeinfo "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/nodeinfo"
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
)
type SlurmStruct struct {
version string
}
func SelectTianhe() (Slurmer, error) {
return &SlurmStruct{
version: "2.6.9.1",
}, nil
}
func (slurmStruct SlurmStruct) ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (*pbslurm.ListNodesResp, error) {
nodeList := nodeinfo.Get_all_nodes()
var resp = pbslurm.ListNodesResp{}
for _, node := range nodeList.Node_list {
nodeInfoResult := pbslurm.NodeInfo{}
nodeInfoResult.Cpus = int32(node.Cpus)
nodeInfoResult.Boards = int32(node.Boards)
nodeInfoResult.RealMemory = int32(node.Real_memory)
nodeInfoResult.Sockets = int32(node.Sockets)
nodeInfoResult.Threads = int32(node.Threads)
resp.NodeInfos = append(resp.NodeInfos, &nodeInfoResult)
}
return &resp, nil
}

View File

@ -0,0 +1,17 @@
version: v1
plugins:
- name: go
out: gen
opt:
- paths=source_relative
- name: go-grpc
out: gen
opt:
- paths=source_relative
- name: grpc-gateway
out: gen
opt:
- paths=source_relative
- grpc_api_configuration=idl/ecs.yaml
- name: openapiv2
out: gen/openapiv2

13
adaptor/pcm_vm/buf.yaml Normal file
View File

@ -0,0 +1,13 @@
version: v1
name: buf.build/JCCE/PCM
breaking:
use:
- FILE
lint:
use:
- DEFAULT
# ignore:
# - google/type/datetime.proto
deps:
- buf.build/googleapis/googleapis
- buf.build/grpc-ecosystem/grpc-gateway

View File

@ -1,12 +1,12 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: idl/pbecs/ecs.proto
// source: idl/ecs.proto
/*
Package pbecs is a reverse proxy.
Package ecspb is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package pbecs
package ecspb
import (
"context"
@ -339,20 +339,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/CreateMultipleEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/CreateMultipleEcs", runtime.WithHTTPPathPattern("/apis/ecs/createMultiple"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_CreateMultipleEcs_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_CreateMultipleEcs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_CreateMultipleEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_CreateMultipleEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -362,20 +364,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/CreateEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/CreateEcs", runtime.WithHTTPPathPattern("/apis/ecs/create"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_CreateEcs_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_CreateEcs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_CreateEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_CreateEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -385,20 +389,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/DeleteEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/DeleteEcs", runtime.WithHTTPPathPattern("/apis/ecs/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_DeleteEcs_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_DeleteEcs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_DeleteEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_DeleteEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -408,20 +414,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/UpdateEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/UpdateEcs", runtime.WithHTTPPathPattern("/apis/ecs/update"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_UpdateEcs_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_UpdateEcs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_UpdateEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_UpdateEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -431,20 +439,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/ListEcsDetail")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/ListEcsDetail", runtime.WithHTTPPathPattern("/apis/ecs/detail"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_ListEcsDetail_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_ListEcsDetail_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcsDetail_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcsDetail_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -454,20 +464,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/ListEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/ListEcs", runtime.WithHTTPPathPattern("/apis/ecs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_ListEcs_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_ListEcs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -477,20 +489,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/ListEcsAll")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/ListEcsAll", runtime.WithHTTPPathPattern("/apis/ecs/all"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_ListEcsAll_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_ListEcsAll_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcsAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcsAll_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -500,20 +514,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/ActionEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/ActionEcs", runtime.WithHTTPPathPattern("/apis/ecs/action"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_ActionEcs_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_ActionEcs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ActionEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ActionEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -523,20 +539,22 @@ func RegisterEcsServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux,
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pbecs.EcsService/ListEcsImages")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/ecs.EcsService/ListEcsImages", runtime.WithHTTPPathPattern("/apis/ecs/listImages"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_EcsService_ListEcsImages_0(rctx, inboundMarshaler, server, req, pathParams)
resp, md, err := local_request_EcsService_ListEcsImages_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcsImages_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcsImages_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -585,19 +603,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/CreateMultipleEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/CreateMultipleEcs", runtime.WithHTTPPathPattern("/apis/ecs/createMultiple"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_CreateMultipleEcs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_CreateMultipleEcs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_CreateMultipleEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_CreateMultipleEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -605,19 +625,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/CreateEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/CreateEcs", runtime.WithHTTPPathPattern("/apis/ecs/create"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_CreateEcs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_CreateEcs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_CreateEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_CreateEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -625,19 +647,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/DeleteEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/DeleteEcs", runtime.WithHTTPPathPattern("/apis/ecs/delete"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_DeleteEcs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_DeleteEcs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_DeleteEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_DeleteEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -645,19 +669,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/UpdateEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/UpdateEcs", runtime.WithHTTPPathPattern("/apis/ecs/update"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_UpdateEcs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_UpdateEcs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_UpdateEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_UpdateEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -665,19 +691,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/ListEcsDetail")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/ListEcsDetail", runtime.WithHTTPPathPattern("/apis/ecs/detail"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_ListEcsDetail_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_ListEcsDetail_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcsDetail_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcsDetail_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -685,19 +713,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/ListEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/ListEcs", runtime.WithHTTPPathPattern("/apis/ecs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_ListEcs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_ListEcs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -705,19 +735,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/ListEcsAll")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/ListEcsAll", runtime.WithHTTPPathPattern("/apis/ecs/all"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_ListEcsAll_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_ListEcsAll_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcsAll_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcsAll_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -725,19 +757,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/ActionEcs")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/ActionEcs", runtime.WithHTTPPathPattern("/apis/ecs/action"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_ActionEcs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_ActionEcs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ActionEcs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ActionEcs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
@ -745,19 +779,21 @@ func RegisterEcsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux,
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/pbecs.EcsService/ListEcsImages")
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/ecs.EcsService/ListEcsImages", runtime.WithHTTPPathPattern("/apis/ecs/listImages"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_EcsService_ListEcsImages_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
resp, md, err := request_EcsService_ListEcsImages_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_EcsService_ListEcsImages_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
forward_EcsService_ListEcsImages_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})

View File

@ -2,9 +2,9 @@
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc (unknown)
// source: idl/pbecs/ecs.proto
// source: idl/ecs.proto
package pbecs
package ecspb
import (
context "context"
@ -36,9 +36,9 @@ type EcsServiceClient interface {
ListEcs(ctx context.Context, in *ListReq, opts ...grpc.CallOption) (*ListResp, error)
// 查询所有云的ECS
ListEcsAll(ctx context.Context, in *ListAllReq, opts ...grpc.CallOption) (*ListResp, error)
//操作ecs(start-stop-restart)
// 操作ecs(start-stop-restart)
ActionEcs(ctx context.Context, in *ActionReq, opts ...grpc.CallOption) (*ActionResp, error)
//查询ecs镜像
// 查询ecs镜像
ListEcsImages(ctx context.Context, in *ListImagesReq, opts ...grpc.CallOption) (*ListImagesResp, error)
}
@ -52,7 +52,7 @@ func NewEcsServiceClient(cc grpc.ClientConnInterface) EcsServiceClient {
func (c *ecsServiceClient) CreateMultipleEcs(ctx context.Context, in *CreateEcsMultipleReq, opts ...grpc.CallOption) (*CreateEcsMultipleResp, error) {
out := new(CreateEcsMultipleResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/CreateMultipleEcs", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/CreateMultipleEcs", in, out, opts...)
if err != nil {
return nil, err
}
@ -61,7 +61,7 @@ func (c *ecsServiceClient) CreateMultipleEcs(ctx context.Context, in *CreateEcsM
func (c *ecsServiceClient) CreateEcs(ctx context.Context, in *CreateEcsReq, opts ...grpc.CallOption) (*CreateEcsResp, error) {
out := new(CreateEcsResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/CreateEcs", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/CreateEcs", in, out, opts...)
if err != nil {
return nil, err
}
@ -70,7 +70,7 @@ func (c *ecsServiceClient) CreateEcs(ctx context.Context, in *CreateEcsReq, opts
func (c *ecsServiceClient) DeleteEcs(ctx context.Context, in *DeleteEcsReq, opts ...grpc.CallOption) (*DeleteEcsResp, error) {
out := new(DeleteEcsResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/DeleteEcs", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/DeleteEcs", in, out, opts...)
if err != nil {
return nil, err
}
@ -79,7 +79,7 @@ func (c *ecsServiceClient) DeleteEcs(ctx context.Context, in *DeleteEcsReq, opts
func (c *ecsServiceClient) UpdateEcs(ctx context.Context, in *UpdateEcsReq, opts ...grpc.CallOption) (*UpdateEcsResp, error) {
out := new(UpdateEcsResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/UpdateEcs", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/UpdateEcs", in, out, opts...)
if err != nil {
return nil, err
}
@ -88,7 +88,7 @@ func (c *ecsServiceClient) UpdateEcs(ctx context.Context, in *UpdateEcsReq, opts
func (c *ecsServiceClient) ListEcsDetail(ctx context.Context, in *ListDetailReq, opts ...grpc.CallOption) (*ListDetailResp, error) {
out := new(ListDetailResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/ListEcsDetail", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/ListEcsDetail", in, out, opts...)
if err != nil {
return nil, err
}
@ -97,7 +97,7 @@ func (c *ecsServiceClient) ListEcsDetail(ctx context.Context, in *ListDetailReq,
func (c *ecsServiceClient) ListEcs(ctx context.Context, in *ListReq, opts ...grpc.CallOption) (*ListResp, error) {
out := new(ListResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/ListEcs", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/ListEcs", in, out, opts...)
if err != nil {
return nil, err
}
@ -106,7 +106,7 @@ func (c *ecsServiceClient) ListEcs(ctx context.Context, in *ListReq, opts ...grp
func (c *ecsServiceClient) ListEcsAll(ctx context.Context, in *ListAllReq, opts ...grpc.CallOption) (*ListResp, error) {
out := new(ListResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/ListEcsAll", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/ListEcsAll", in, out, opts...)
if err != nil {
return nil, err
}
@ -115,7 +115,7 @@ func (c *ecsServiceClient) ListEcsAll(ctx context.Context, in *ListAllReq, opts
func (c *ecsServiceClient) ActionEcs(ctx context.Context, in *ActionReq, opts ...grpc.CallOption) (*ActionResp, error) {
out := new(ActionResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/ActionEcs", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/ActionEcs", in, out, opts...)
if err != nil {
return nil, err
}
@ -124,7 +124,7 @@ func (c *ecsServiceClient) ActionEcs(ctx context.Context, in *ActionReq, opts ..
func (c *ecsServiceClient) ListEcsImages(ctx context.Context, in *ListImagesReq, opts ...grpc.CallOption) (*ListImagesResp, error) {
out := new(ListImagesResp)
err := c.cc.Invoke(ctx, "/pbecs.EcsService/ListEcsImages", in, out, opts...)
err := c.cc.Invoke(ctx, "/ecs.EcsService/ListEcsImages", in, out, opts...)
if err != nil {
return nil, err
}
@ -149,9 +149,9 @@ type EcsServiceServer interface {
ListEcs(context.Context, *ListReq) (*ListResp, error)
// 查询所有云的ECS
ListEcsAll(context.Context, *ListAllReq) (*ListResp, error)
//操作ecs(start-stop-restart)
// 操作ecs(start-stop-restart)
ActionEcs(context.Context, *ActionReq) (*ActionResp, error)
//查询ecs镜像
// 查询ecs镜像
ListEcsImages(context.Context, *ListImagesReq) (*ListImagesResp, error)
mustEmbedUnimplementedEcsServiceServer()
}
@ -210,7 +210,7 @@ func _EcsService_CreateMultipleEcs_Handler(srv interface{}, ctx context.Context,
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/CreateMultipleEcs",
FullMethod: "/ecs.EcsService/CreateMultipleEcs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).CreateMultipleEcs(ctx, req.(*CreateEcsMultipleReq))
@ -228,7 +228,7 @@ func _EcsService_CreateEcs_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/CreateEcs",
FullMethod: "/ecs.EcsService/CreateEcs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).CreateEcs(ctx, req.(*CreateEcsReq))
@ -246,7 +246,7 @@ func _EcsService_DeleteEcs_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/DeleteEcs",
FullMethod: "/ecs.EcsService/DeleteEcs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).DeleteEcs(ctx, req.(*DeleteEcsReq))
@ -264,7 +264,7 @@ func _EcsService_UpdateEcs_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/UpdateEcs",
FullMethod: "/ecs.EcsService/UpdateEcs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).UpdateEcs(ctx, req.(*UpdateEcsReq))
@ -282,7 +282,7 @@ func _EcsService_ListEcsDetail_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/ListEcsDetail",
FullMethod: "/ecs.EcsService/ListEcsDetail",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).ListEcsDetail(ctx, req.(*ListDetailReq))
@ -300,7 +300,7 @@ func _EcsService_ListEcs_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/ListEcs",
FullMethod: "/ecs.EcsService/ListEcs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).ListEcs(ctx, req.(*ListReq))
@ -318,7 +318,7 @@ func _EcsService_ListEcsAll_Handler(srv interface{}, ctx context.Context, dec fu
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/ListEcsAll",
FullMethod: "/ecs.EcsService/ListEcsAll",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).ListEcsAll(ctx, req.(*ListAllReq))
@ -336,7 +336,7 @@ func _EcsService_ActionEcs_Handler(srv interface{}, ctx context.Context, dec fun
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/ActionEcs",
FullMethod: "/ecs.EcsService/ActionEcs",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).ActionEcs(ctx, req.(*ActionReq))
@ -354,7 +354,7 @@ func _EcsService_ListEcsImages_Handler(srv interface{}, ctx context.Context, dec
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/pbecs.EcsService/ListEcsImages",
FullMethod: "/ecs.EcsService/ListEcsImages",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(EcsServiceServer).ListEcsImages(ctx, req.(*ListImagesReq))
@ -366,7 +366,7 @@ func _EcsService_ListEcsImages_Handler(srv interface{}, ctx context.Context, dec
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var EcsService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "pbecs.EcsService",
ServiceName: "ecs.EcsService",
HandlerType: (*EcsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
@ -407,5 +407,5 @@ var EcsService_ServiceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{},
Metadata: "idl/pbecs/ecs.proto",
Metadata: "idl/ecs.proto",
}

View File

@ -0,0 +1,578 @@
{
"swagger": "2.0",
"info": {
"title": "idl/ecs.proto",
"version": "version not set"
},
"tags": [
{
"name": "EcsService"
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {},
"definitions": {
"ecsActionResp": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"accountName": {
"type": "string",
"title": "账户名称根据config.yaml中的配置默认为第一个配置的账户"
},
"ecses": {
"type": "array",
"items": {
"$ref": "#/definitions/ecsEcsInstance"
},
"title": "Ecs 机器集合"
},
"finished": {
"type": "boolean",
"title": "查询是否完成,如果为否-false则可以将下面三个分页参数填入到请求中继续查询"
},
"status": {
"type": "string",
"title": "vm状态"
}
}
},
"ecsActionType": {
"type": "string",
"enum": [
"start",
"stop",
"restart"
],
"default": "start",
"description": "- start: 启动\n - stop: 停止\n - restart: 重启",
"title": "虚拟机状态操作"
},
"ecsCloudProvider": {
"type": "string",
"enum": [
"ali",
"tencent",
"huawei",
"k8s",
"harvester"
],
"default": "ali",
"description": "- ali: 0 - 阿里云\n - tencent: 1 - 腾讯云\n - huawei: 2 - 华为云\n - k8s: 3 - K8S\n - harvester: 3 - Harvester",
"title": "云提供商"
},
"ecsCreateEcsMultipleResp": {
"type": "object",
"properties": {
"requestId": {
"type": "array",
"items": {
"type": "string"
},
"title": "请求ID"
},
"finished": {
"type": "boolean",
"title": "查询是否完成,如果为否-false则可以将下面三个分页参数填入到请求中继续查询"
}
},
"title": "创建多家云ECS返回值"
},
"ecsCreateEcsReq": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"accountName": {
"type": "string",
"title": "账号名称"
},
"regionId": {
"type": "integer",
"format": "int32",
"title": "地域,数据中心"
},
"imageId": {
"type": "string",
"title": "镜像id"
},
"instanceChargeType": {
"type": "string",
"title": "实例的付费方式"
},
"instanceType": {
"type": "string",
"title": "实例的资源规格"
},
"securityGroupId": {
"type": "string",
"title": "安全组id"
},
"vSwitchId": {
"type": "string",
"title": "交换机id"
},
"instanceName": {
"type": "string",
"title": "实例名称"
},
"description": {
"type": "string",
"title": "实例描述"
},
"zoneId": {
"type": "string",
"title": "可用区id"
},
"systemDisk": {
"$ref": "#/definitions/ecsSystemDisk"
},
"amount": {
"type": "integer",
"format": "int32",
"title": "创建ECS的数量"
},
"dryRun": {
"type": "string",
"title": "预检此次请求,为true时请求通过则返回 Request validation has been passed with DryRun flag set"
},
"category": {
"type": "string",
"title": "数据盘N的云盘种类。取值范围cloud_efficiency高效云盘cloud_ssdSSD云盘cloud_essdESSD云盘cloud普通云盘。"
},
"internetChargeType": {
"$ref": "#/definitions/ecsInternetChargeType"
},
"internetMaxBandwidthOut": {
"type": "integer",
"format": "int32",
"title": "公网入带宽最大值单位为Mbit/s。创建的实例如果参数InternetMaxBandwidthOut的值大于0则自动为实例分配公网IP。"
},
"vpcId": {
"type": "string",
"title": "vpc id 华为云必需"
},
"subnetId": {
"type": "string",
"title": "待创建云服务器所在的子网信息。需要指定vpcid对应VPC下已创建的子网subnet的网络IDUUID格式。华为云必需"
},
"namespace": {
"type": "string",
"title": "-------------harvester---------------\n命名空间"
},
"cpu": {
"type": "integer",
"format": "int32",
"title": "vCpu"
},
"memory": {
"type": "string",
"title": "memory"
},
"sshKey": {
"type": "string",
"title": "ssh_key"
},
"diskName": {
"type": "string",
"title": "diskName"
},
"diskType": {
"type": "string",
"title": "disk类型 disk,cd-rom"
},
"diskSize": {
"type": "string",
"title": "卷大小"
},
"bus": {
"type": "string",
"description": "bus 总线指示要模拟的磁盘设备的类型支持virtio, sata, scsi."
},
"networkName": {
"type": "string",
"title": "网络名称"
},
"networkModel": {
"type": "string",
"description": "network_model 网络模式支持e1000, e1000e, ne2k_pci, pcnet, rtl8139, virtio."
},
"network": {
"type": "string",
"title": "网络"
},
"networkType": {
"type": "string",
"title": "网络连接方法默认bridge"
},
"osType": {
"type": "string",
"title": "osType //系统类型"
},
"machineType": {
"type": "string",
"title": "machineType //机器类型 none、q35、pc"
},
"machineName": {
"type": "string",
"title": "machineName //主机名称,默认为虚拟机名称"
},
"userDataTemplate": {
"type": "string",
"title": "userDataTemplate //用户数据模板"
},
"networkDataTemplate": {
"type": "string",
"title": "networkDataTemplate //网络数据模板"
},
"vmTemplateName": {
"type": "string",
"title": "vmTemplateName //模板名称"
},
"vmTemplateVersion": {
"type": "string",
"title": "vmTemplateVersion //版本号"
}
},
"title": "创建ECS入参"
},
"ecsCreateEcsResp": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"accountName": {
"type": "string",
"title": "账户名称根据config.yaml中的配置默认为第一个配置的账户"
},
"regionId": {
"type": "integer",
"format": "int32",
"title": "区域Id参考 tenant.proto 中的各个云的区域"
},
"requestId": {
"type": "string",
"title": "请求ID"
},
"orderId": {
"type": "string",
"title": "订单id"
},
"tradePrice": {
"type": "number",
"format": "float",
"title": "订单成交价"
},
"instanceIdSets": {
"type": "array",
"items": {
"type": "string"
},
"title": "实例IDInstanceIdSet列表"
},
"finished": {
"type": "boolean",
"title": "查询是否完成,如果为否-false则可以将下面三个分页参数填入到请求中继续查询"
}
},
"title": "创建ECS返回值"
},
"ecsDeleteEcsResp": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"accountName": {
"type": "string",
"title": "账户名称根据config.yaml中的配置默认为第一个配置的账户"
},
"regionId": {
"type": "integer",
"format": "int32",
"title": "区域Id参考 tenant.proto 中的各个云的区域"
},
"requestId": {
"type": "string",
"title": "请求ID"
}
},
"title": "删除ECS返回值"
},
"ecsEcsInstance": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"accountName": {
"type": "string",
"title": "账号名称"
},
"instanceId": {
"type": "string",
"title": "实例id"
},
"instanceName": {
"type": "string",
"title": "实例名称"
},
"regionName": {
"type": "string",
"title": "地域,数据中心"
},
"publicIps": {
"type": "array",
"items": {
"type": "string"
},
"title": "公网ip"
},
"instanceType": {
"type": "string",
"title": "实例类型"
},
"cpu": {
"type": "string",
"title": "vcpu数"
},
"memory": {
"type": "string",
"title": "内存MB"
},
"description": {
"type": "string",
"title": "实例描述"
},
"status": {
"type": "string",
"title": "状态"
},
"creationTime": {
"type": "string",
"title": "创建时间ISO8601"
},
"expireTime": {
"type": "string",
"title": "过期时间"
},
"innerIps": {
"type": "array",
"items": {
"type": "string"
},
"title": "内网ip"
},
"vpcId": {
"type": "string",
"title": "vpc id"
},
"resourceGroupId": {
"type": "string",
"title": "资源组id"
},
"instanceChargeType": {
"type": "string",
"title": "收费类型"
},
"node": {
"type": "string",
"title": "-----------harvester---------\n虚拟机所在的节点"
},
"namespace": {
"type": "string",
"title": "namespace"
}
},
"title": "ECS 实例"
},
"ecsImage": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"accountName": {
"type": "string",
"title": "账户名称根据config.yaml中的配置默认为第一个配置的账户"
},
"status": {
"type": "string",
"title": "镜像状态"
},
"displayName": {
"type": "string",
"title": "镜像名称"
},
"namespace": {
"type": "string",
"title": "命名空间"
},
"id": {
"type": "string",
"title": "镜像id"
}
}
},
"ecsInternetChargeType": {
"type": "string",
"enum": [
"PayByBandwidth",
"PayByTraffic"
],
"default": "PayByBandwidth",
"description": "- PayByBandwidth: 按固定带宽计费。\n - PayByTraffic: (默认):按使用流量计费",
"title": "网络计费类型"
},
"ecsListDetailResp": {
"type": "object",
"properties": {
"ecses": {
"type": "array",
"items": {
"$ref": "#/definitions/ecsEcsInstance"
},
"title": "Ecs 机器集合"
},
"finished": {
"type": "boolean",
"title": "查询是否完成,如果为否-false则可以将下面三个分页参数填入到请求中继续查询"
},
"pageNumber": {
"type": "integer",
"format": "int32",
"title": "分页相关参数,页码"
},
"pageSize": {
"type": "integer",
"format": "int32",
"title": "分页相关参数,每页数量"
},
"nextToken": {
"type": "string",
"title": "分页相关参数下一页的token"
},
"requestId": {
"type": "string",
"title": "请求id出现问题后提供给云厂商排查问题"
}
},
"title": "查询ECS返回值"
},
"ecsListImagesResp": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"images": {
"type": "array",
"items": {
"$ref": "#/definitions/ecsImage"
},
"title": "镜像集合"
}
}
},
"ecsListResp": {
"type": "object",
"properties": {
"ecses": {
"type": "array",
"items": {
"$ref": "#/definitions/ecsEcsInstance"
},
"title": "Ecs 机器集合"
}
}
},
"ecsSystemDisk": {
"type": "object",
"properties": {
"size": {
"type": "string",
"title": "系统盘大小单位为GiB。取值范围20~500。该参数的取值必须大于或者等于max{20, ImageSize}。默认值max{40, 参数ImageId对应的镜像大小}"
},
"category": {
"type": "string",
"title": "系统盘类型。系统盘的云盘种类。取值范围cloud_efficiency高效云盘。cloud_ssdSSD云盘。cloud_essdESSD云盘。cloud普通云盘。"
},
"diskName": {
"type": "string",
"title": "系统盘名称"
},
"description": {
"type": "string",
"title": "系统盘描述"
},
"performanceLevel": {
"type": "string",
"title": "创建ESSD云盘作为系统盘使用时设置云盘的性能等级。取值范围PL0单盘最高随机读写IOPS 1万。PL1默认单盘最高随机读写IOPS 5万。PL2单盘最高随机读写IOPS 10万。PL3单盘最高随机读写IOPS 100万。"
},
"autoSnapshotPolicyId": {
"type": "string",
"title": "系统盘采用的自动快照策略ID。"
}
},
"title": "系统磁盘"
},
"ecsUpdateEcsResp": {
"type": "object",
"properties": {
"provider": {
"$ref": "#/definitions/ecsCloudProvider"
},
"accountName": {
"type": "string",
"title": "账户名称根据config.yaml中的配置默认为第一个配置的账户"
},
"regionId": {
"type": "integer",
"format": "int32",
"title": "区域Id参考 tenant.proto 中的各个云的区域"
},
"requestId": {
"type": "string",
"title": "请求ID"
}
},
"title": "更新ECS返回值"
},
"protobufAny": {
"type": "object",
"properties": {
"@type": {
"type": "string"
}
},
"additionalProperties": {}
},
"rpcStatus": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
}
}
}

View File

@ -1,15 +1,27 @@
syntax = "proto3";
package pbecs;
package ecs;
option go_package = "code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs";
option go_package = "/ecspb";
import "idl/pbtenant/tenant.proto";
import "google/api/annotations.proto";
//
enum CloudProvider {
// 0 -
ali = 0;
// 1 -
tencent = 1;
// 2 -
huawei = 2;
// 3 - K8S
k8s = 3;
// 3 - Harvester
harvester = 4;
}
//ECS
message EcsInstance {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
// id
@ -72,7 +84,7 @@ message CreateEcsMultipleResp {
//ECS入参
message CreateEcsReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
//
@ -169,7 +181,7 @@ message SystemDisk {
//ECS返回值
message CreateEcsResp {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Id tenant.proto
@ -189,7 +201,7 @@ message CreateEcsResp {
//ECS入参
message DeleteEcsReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
//
@ -222,7 +234,7 @@ message DeleteEcsReq {
//ECS返回值
message DeleteEcsResp {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Id tenant.proto
@ -234,7 +246,7 @@ message DeleteEcsResp {
//ECS入参
message UpdateEcsReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
string account_name = 2;
//
@ -265,7 +277,7 @@ message UpdateEcsReq {
//ECS返回值
message UpdateEcsResp {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Id tenant.proto
@ -277,7 +289,7 @@ message UpdateEcsResp {
//ECS入参
message ListDetailReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Id tenant.proto
@ -321,7 +333,7 @@ enum ActionType {
message ActionReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Id tenant.proto
@ -336,7 +348,7 @@ message ActionReq {
message ActionResp {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Ecs
@ -349,7 +361,7 @@ message ActionResp {
message ListReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
}
message ListResp {
@ -361,7 +373,7 @@ message ListAllReq{}
message ListImagesReq {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
// Id tenant.proto
@ -376,7 +388,7 @@ message ListImagesReq {
message Image {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
// config.yaml中的配置
string account_name = 2;
//
@ -391,7 +403,7 @@ message Image {
message ListImagesResp {
//
pbtenant.CloudProvider provider = 1;
CloudProvider provider = 1;
//
repeated Image images = 2;
}
@ -405,70 +417,29 @@ message ListImagesResp {
service EcsService {
// ECS
rpc CreateMultipleEcs(CreateEcsMultipleReq) returns (CreateEcsMultipleResp) {
option (google.api.http) = {
post : "/apis/ecs/createMultiple"
body : "*"
};
}
rpc CreateMultipleEcs(CreateEcsMultipleReq) returns (CreateEcsMultipleResp);
// ECS
rpc CreateEcs(CreateEcsReq) returns (CreateEcsResp) {
option (google.api.http) = {
post : "/apis/ecs/create"
body : "*"
};
}
rpc CreateEcs(CreateEcsReq) returns (CreateEcsResp);
// ECS
rpc DeleteEcs(DeleteEcsReq) returns (DeleteEcsResp) {
option (google.api.http) = {
post : "/apis/ecs/delete"
body : "*"
};
}
rpc DeleteEcs(DeleteEcsReq) returns (DeleteEcsResp);
// ECS
rpc UpdateEcs(UpdateEcsReq) returns (UpdateEcsResp) {
option (google.api.http) = {
put : "/apis/ecs/update"
body : "*"
};
}
rpc UpdateEcs(UpdateEcsReq) returns (UpdateEcsResp);
// ECS明细 -
rpc ListEcsDetail(ListDetailReq) returns (ListDetailResp) {
option (google.api.http) = {
get : "/apis/ecs/detail"
};
}
rpc ListEcsDetail(ListDetailReq) returns (ListDetailResp);
// ECS全量 -
rpc ListEcs(ListReq) returns (ListResp) {
option (google.api.http) = {
get : "/apis/ecs"
};
}
rpc ListEcs(ListReq) returns (ListResp);
// ECS
rpc ListEcsAll(ListAllReq) returns (ListResp) {
option (google.api.http) = {
get : "/apis/ecs/all"
};
}
rpc ListEcsAll(ListAllReq) returns (ListResp);
//ecs(start-stop-restart)
rpc ActionEcs(ActionReq) returns (ActionResp){
option (google.api.http) = {
post : "/apis/ecs/action"
body : "*"
};
}
rpc ActionEcs(ActionReq) returns (ActionResp);
//ecs镜像
rpc ListEcsImages(ListImagesReq) returns (ListImagesResp){
option (google.api.http) = {
get : "/apis/ecs/listImages"
};
}
rpc ListEcsImages(ListImagesReq) returns (ListImagesResp);
}

View File

@ -0,0 +1,28 @@
type: google.api.Service
config_version: 3
http:
rules:
- selector: ecs.EcsService.CreateMultipleEcs
post: "/apis/ecs/createMultiple"
body: "*"
- selector: ecs.EcsService.CreateEcs
post: "/apis/ecs/create"
body: "*"
- selector: ecs.EcsService.DeleteEcs
post: "/apis/ecs/delete"
body: "*"
- selector: ecs.EcsService.UpdateEcs
put: "/apis/ecs/update"
body: "*"
- selector: ecs.EcsService.ListEcsDetail
get: "/apis/ecs/detail"
- selector: ecs.EcsService.ListEcs
get: "/apis/ecs"
- selector: ecs.EcsService.ListEcsAll
get: "/apis/ecs/all"
- selector: ecs.EcsService.ActionEcs
post: "/apis/ecs/action"
body: "*"
- selector: ecs.EcsService.ListEcsImages
get: "/apis/ecs/listImages"

56
adaptor/pcm_vm/main.go Normal file
View File

@ -0,0 +1,56 @@
package main
import (
vmpb "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
vmserver "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/config"
"code.gitlink.org.cn/JCCE/PCM.git/common/global"
"code.gitlink.org.cn/JCCE/PCM.git/common/server"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"flag"
"github.com/golang/glog"
"github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc"
"log"
)
func main() {
var configFile string
flag.StringVar(&configFile, "conf", "configs/tenanter.yaml", "tenanter.yaml")
flag.Parse()
defer glog.Flush()
if err := tenanter.LoadCloudConfigsFromFile(configFile); err != nil {
if !errors.Is(err, tenanter.ErrLoadTenanterFileEmpty) {
glog.Fatalf("LoadCloudConfigsFromFile error %+v", err)
}
glog.Warningf("LoadCloudConfigsFromFile empty file path %s", configFile)
}
// 初始化配置以及数据库
config.InitConfig()
// 新建一个zap logger实例
logger, err := zap.NewDevelopment()
if err != nil {
log.Fatalf("cannot create logger: %v", err)
}
if err != nil {
logger.Fatal("cannot open private key", zap.Error(err))
}
if err != nil {
logger.Fatal("cannot read private key", zap.Error(err))
}
if err != nil {
logger.Fatal("cannot parse private key", zap.Error(err))
}
// logger.Sugar().Fatal 直接可以少几行判错的代码
logger.Sugar().Fatal(server.RunGRPCServer(&server.GRPCConfig{
Name: "pcm_vm",
Addr: global.S.VmInfo.Address,
RegisterFunc: func(g *grpc.Server) {
vmpb.RegisterEcsServiceServer(g, &vmserver.Server{})
},
Logger: logger,
}))
}

View File

@ -1,19 +1,20 @@
package server
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
"context"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/vm/server/ecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"github.com/golang/glog"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type Server struct {
ecspb.UnimplementedEcsServiceServer
}
// CreateMultipleEcs return create cloudy ecs
func (s *Server) CreateMultipleEcs(ctx context.Context, reqs *pbecs.CreateEcsMultipleReq) (*pbecs.CreateEcsMultipleResp, error) {
resp, err := ecs.CreateMultipleEcs(ctx, reqs)
func (s *Server) CreateMultipleEcs(ctx context.Context, reqs *ecspb.CreateEcsMultipleReq) (*ecspb.CreateEcsMultipleResp, error) {
resp, err := CreateMultipleEcs(ctx, reqs)
if err != nil {
glog.Errorf("ListEcsDetail error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -22,8 +23,8 @@ func (s *Server) CreateMultipleEcs(ctx context.Context, reqs *pbecs.CreateEcsMul
}
// CreateEcs return create ecs
func (s *Server) CreateEcs(ctx context.Context, req *pbecs.CreateEcsReq) (*pbecs.CreateEcsResp, error) {
resp, err := ecs.CreateEcs(ctx, req)
func (s *Server) CreateEcs(ctx context.Context, req *ecspb.CreateEcsReq) (*ecspb.CreateEcsResp, error) {
resp, err := CreateEcs(ctx, req)
if err != nil {
glog.Errorf("ListEcsDetail error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -32,8 +33,8 @@ func (s *Server) CreateEcs(ctx context.Context, req *pbecs.CreateEcsReq) (*pbecs
}
// DeleteEcs return Delete ecs
func (s *Server) DeleteEcs(ctx context.Context, req *pbecs.DeleteEcsReq) (*pbecs.DeleteEcsResp, error) {
resp, err := ecs.DeleteEcs(ctx, req)
func (s *Server) DeleteEcs(ctx context.Context, req *ecspb.DeleteEcsReq) (*ecspb.DeleteEcsResp, error) {
resp, err := DeleteEcs(ctx, req)
if err != nil {
glog.Errorf("ListEcsDetail error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -42,8 +43,8 @@ func (s *Server) DeleteEcs(ctx context.Context, req *pbecs.DeleteEcsReq) (*pbecs
}
// UpdateEcs return Update ecs
func (s *Server) UpdateEcs(ctx context.Context, req *pbecs.UpdateEcsReq) (*pbecs.UpdateEcsResp, error) {
resp, err := ecs.UpdateEcs(ctx, req)
func (s *Server) UpdateEcs(ctx context.Context, req *ecspb.UpdateEcsReq) (*ecspb.UpdateEcsResp, error) {
resp, err := UpdateEcs(ctx, req)
if err != nil {
glog.Errorf("ListEcsDetail error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -52,8 +53,8 @@ func (s *Server) UpdateEcs(ctx context.Context, req *pbecs.UpdateEcsReq) (*pbecs
}
// ListEcsDetail return ecs detail
func (s *Server) ListEcsDetail(ctx context.Context, req *pbecs.ListDetailReq) (*pbecs.ListDetailResp, error) {
resp, err := ecs.ListDetail(ctx, req)
func (s *Server) ListEcsDetail(ctx context.Context, req *ecspb.ListDetailReq) (*ecspb.ListDetailResp, error) {
resp, err := ListDetail(ctx, req)
if err != nil {
glog.Errorf("ListEcsDetail error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -62,8 +63,8 @@ func (s *Server) ListEcsDetail(ctx context.Context, req *pbecs.ListDetailReq) (*
}
// ListEcs return ecs list
func (s *Server) ListEcs(ctx context.Context, req *pbecs.ListReq) (*pbecs.ListResp, error) {
resp, err := ecs.List(ctx, req)
func (s *Server) ListEcs(ctx context.Context, req *ecspb.ListReq) (*ecspb.ListResp, error) {
resp, err := List(ctx, req)
if err != nil {
glog.Errorf("ListEcs error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -72,8 +73,8 @@ func (s *Server) ListEcs(ctx context.Context, req *pbecs.ListReq) (*pbecs.ListRe
}
// ListEcsAll return all ecs
func (s *Server) ListEcsAll(ctx context.Context, req *pbecs.ListAllReq) (*pbecs.ListResp, error) {
resp, err := ecs.ListAll(ctx)
func (s *Server) ListEcsAll(ctx context.Context, req *ecspb.ListAllReq) (*ecspb.ListResp, error) {
resp, err := ListAll(ctx)
if err != nil {
glog.Errorf("ListEcsAll error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -82,8 +83,8 @@ func (s *Server) ListEcsAll(ctx context.Context, req *pbecs.ListAllReq) (*pbecs.
}
// ActionEcs return ecs action
func (s *Server) ActionEcs(ctx context.Context, req *pbecs.ActionReq) (*pbecs.ActionResp, error) {
resp, err := ecs.ActionEcs(ctx, req)
func (s *Server) ActionEcs(ctx context.Context, req *ecspb.ActionReq) (*ecspb.ActionResp, error) {
resp, err := ActionEcs(ctx, req)
if err != nil {
glog.Errorf("ActionEcs error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
@ -92,8 +93,8 @@ func (s *Server) ActionEcs(ctx context.Context, req *pbecs.ActionReq) (*pbecs.Ac
}
// ListEcsImages return ecs image list
func (s *Server) ListEcsImages(ctx context.Context, req *pbecs.ListImagesReq) (*pbecs.ListImagesResp, error) {
resp, err := ecs.ListImages(ctx, req)
func (s *Server) ListEcsImages(ctx context.Context, req *ecspb.ListImagesReq) (*ecspb.ListImagesResp, error) {
resp, err := ListImages(ctx, req)
if err != nil {
glog.Errorf("ListImages error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())

View File

@ -1,13 +1,13 @@
package ecs
package server
import (
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
ecser "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/service"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"context"
"sync"
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/vm/service/ecser"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"github.com/golang/glog"
"github.com/pkg/errors"
)
@ -54,8 +54,8 @@ func CreateEcs(ctx context.Context, req *pbecs.CreateEcsReq) (*pbecs.CreateEcsRe
var (
ecs ecser.Ecser
)
tenanters, err := tenanter.GetTenanters(req.Provider)
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.Wrap(err, "get tenanters failed")
}
@ -74,8 +74,8 @@ func DeleteEcs(ctx context.Context, req *pbecs.DeleteEcsReq) (*pbecs.DeleteEcsRe
var (
ecs ecser.Ecser
)
tenanters, err := tenanter.GetTenanters(req.Provider)
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.Wrap(err, "get tenanters failed")
}
@ -94,8 +94,8 @@ func UpdateEcs(ctx context.Context, req *pbecs.UpdateEcsReq) (*pbecs.UpdateEcsRe
var (
ecs ecser.Ecser
)
tenanters, err := tenanter.GetTenanters(req.Provider)
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.Wrap(err, "get tenanters failed")
}
@ -116,12 +116,12 @@ func ListDetail(ctx context.Context, req *pbecs.ListDetailReq) (*pbecs.ListDetai
ecs ecser.Ecser
)
tenanters, err := tenanter.GetTenanters(req.Provider)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
if err != nil {
return nil, errors.WithMessage(err, "getTenanters error")
}
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.WithMessagef(err, "provider %v regionId %v", req.Provider, req.RegionId)
}
@ -146,12 +146,12 @@ func List(ctx context.Context, req *pbecs.ListReq) (*pbecs.ListResp, error) {
ecses []*pbecs.EcsInstance
)
tenanters, err := tenanter.GetTenanters(req.Provider)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
if err != nil {
return nil, errors.WithMessage(err, "getTenanters error")
}
regions := tenanter.GetAllRegionIds(req.Provider)
regions := tenanter.GetAllRegionIds(pbtenant.CloudProvider(req.Provider))
wg.Add(len(tenanters) * len(regions))
for _, t := range tenanters {
@ -202,12 +202,12 @@ func ListAll(ctx context.Context) (*pbecs.ListResp, error) {
ecses []*pbecs.EcsInstance
)
wg.Add(len(pbtenant.CloudProvider_name))
for k := range pbtenant.CloudProvider_name {
wg.Add(len(pbecs.CloudProvider_name))
for k := range pbecs.CloudProvider_name {
go func(provider int32) {
defer wg.Done()
resp, err := List(ctx, &pbecs.ListReq{Provider: pbtenant.CloudProvider(provider)})
resp, err := List(ctx, &pbecs.ListReq{Provider: pbecs.CloudProvider(provider)})
if err != nil {
glog.Errorf("List error %v", err)
return
@ -227,8 +227,8 @@ func ActionEcs(ctx context.Context, req *pbecs.ActionReq) (*pbecs.ActionResp, er
var (
ecs ecser.Ecser
)
tenanters, err := tenanter.GetTenanters(req.Provider)
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.Wrap(err, "get tenanters failed")
}
@ -247,8 +247,8 @@ func ListImages(ctx context.Context, req *pbecs.ListImagesReq) (*pbecs.ListImage
var (
ecs ecser.Ecser
)
tenanters, err := tenanter.GetTenanters(req.Provider)
region, err := tenanter.NewRegion(req.Provider, req.RegionId)
tenanters, err := tenanter.GetTenanters(pbtenant.CloudProvider(req.Provider))
region, err := tenanter.NewRegion(pbtenant.CloudProvider(req.Provider), req.RegionId)
if err != nil {
return nil, errors.Wrap(err, "get tenanters failed")
}

View File

@ -1,8 +1,8 @@
package ecs
package server
import (
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"context"
"testing"
)
@ -16,9 +16,9 @@ func TestListDetail(t *testing.T) {
args args
wantErr bool
}{
{name: "ali", args: args{req: &pbecs.ListDetailReq{Provider: pbtenant.CloudProvider_ali, RegionId: int32(pbtenant.AliRegionId_ali_cn_hangzhou), PageNumber: 1, PageSize: 10}}, wantErr: false},
{name: "tencent", args: args{req: &pbecs.ListDetailReq{Provider: pbtenant.CloudProvider_tencent, RegionId: int32(pbtenant.TencentRegionId_tc_ap_beijing), PageNumber: 1, PageSize: 10}}, wantErr: false},
{name: "aws", args: args{req: &pbecs.ListDetailReq{Provider: pbtenant.CloudProvider_aws, RegionId: int32(pbtenant.AwsRegionId_aws_us_east_2), PageNumber: 1, PageSize: 10}}, wantErr: false},
{name: "ali", args: args{req: &pbecs.ListDetailReq{Provider: pbecs.CloudProvider_ali, RegionId: int32(pbtenant.AliRegionId_ali_cn_hangzhou), PageNumber: 1, PageSize: 10}}, wantErr: false},
{name: "tencent", args: args{req: &pbecs.ListDetailReq{Provider: pbecs.CloudProvider_tencent, RegionId: int32(pbtenant.TencentRegionId_tc_ap_beijing), PageNumber: 1, PageSize: 10}}, wantErr: false},
{name: "aws", args: args{req: &pbecs.ListDetailReq{Provider: pbecs.CloudProvider_huawei, RegionId: int32(pbtenant.HuaweiRegionId_hw_af_south_1), PageNumber: 1, PageSize: 10}}, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@ -41,10 +41,10 @@ func TestList(t *testing.T) {
args args
wantErr bool
}{
{name: "ali", args: args{req: &pbecs.ListReq{Provider: pbtenant.CloudProvider_ali}}, wantErr: false},
{name: "tencent", args: args{req: &pbecs.ListReq{Provider: pbtenant.CloudProvider_tencent}}, wantErr: false},
{name: "huawei", args: args{req: &pbecs.ListReq{Provider: pbtenant.CloudProvider_huawei}}, wantErr: false},
{name: "aws", args: args{req: &pbecs.ListReq{Provider: pbtenant.CloudProvider_aws}}, wantErr: false},
{name: "ali", args: args{req: &pbecs.ListReq{Provider: pbecs.CloudProvider_ali}}, wantErr: false},
{name: "tencent", args: args{req: &pbecs.ListReq{Provider: pbecs.CloudProvider_tencent}}, wantErr: false},
{name: "huawei", args: args{req: &pbecs.ListReq{Provider: pbecs.CloudProvider_huawei}}, wantErr: false},
{name: "aws", args: args{req: &pbecs.ListReq{Provider: pbecs.CloudProvider_huawei}}, wantErr: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@ -1,8 +1,9 @@
package ecs
package server
import (
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"os"
"testing"
)
@ -12,20 +13,20 @@ var (
)
func TestMain(m *testing.M) {
err := tenanter.LoadCloudConfigs("../../../config.yaml")
err := tenanter.LoadCloudConfigs("../../../tenanter.yaml")
if err != nil {
panic(err)
}
if aliTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider_ali); err != nil {
if aliTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider(pbecs.CloudProvider_ali)); err != nil {
panic("get aliTenant failed")
}
if tcTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider_tencent); err != nil {
if tcTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider(pbecs.CloudProvider_tencent)); err != nil {
panic("get tcTenant failed")
}
if hwTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider_huawei); err != nil {
if hwTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider(pbecs.CloudProvider_huawei)); err != nil {
panic("get hwTenant failed")
}
if k8sTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider_k8s); err != nil {
if k8sTenant, err = tenanter.GetTenanters(pbtenant.CloudProvider(pbecs.CloudProvider_k8s)); err != nil {
panic("get awsTenant failed")
}
os.Exit(m.Run())

View File

@ -1,12 +1,11 @@
package ecser
package service
import (
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
"context"
"sync"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
openapi "github.com/alibabacloud-go/darabonba-openapi/client"
string_ "github.com/alibabacloud-go/darabonba-string/client"
aliecs "github.com/alibabacloud-go/ecs-20140526/v2/client"
@ -201,7 +200,7 @@ func (ecs *AliEcs) ListDetail(ctx context.Context, req *pbecs.ListDetailReq) (*p
InnerIps = append(InnerIps, *vv)
}
ecses[k] = &pbecs.EcsInstance{
Provider: pbtenant.CloudProvider_ali,
Provider: pbecs.CloudProvider_ali,
AccountName: ecs.tenanter.AccountName(),
InstanceId: *v.InstanceId,
InstanceName: *v.InstanceName,

View File

@ -1,4 +1,4 @@
package ecser
package service
//TODO aws
//

View File

@ -1,12 +1,10 @@
package ecser
package service
import (
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
"context"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
"github.com/golang/glog"
"github.com/pkg/errors"
)
@ -25,7 +23,7 @@ type Ecser interface {
ListEcsImages(ctx context.Context, req *pbecs.ListImagesReq) (resp *pbecs.ListImagesResp, err error) //查询镜像列表
}
func NewEcsClient(provider pbtenant.CloudProvider, region tenanter.Region, tenant tenanter.Tenanter) (ecser Ecser, err error) {
func NewEcsClient(provider pbecs.CloudProvider, region tenanter.Region, tenant tenanter.Tenanter) (ecser Ecser, err error) {
// 部分sdk会在内部panic
defer func() {
if err1 := recover(); err1 != nil {
@ -35,14 +33,13 @@ func NewEcsClient(provider pbtenant.CloudProvider, region tenanter.Region, tenan
}()
switch provider {
case pbtenant.CloudProvider_ali:
case pbecs.CloudProvider_ali:
return newAliEcsClient(region, tenant)
case pbtenant.CloudProvider_tencent:
case pbecs.CloudProvider_tencent:
return newTencentCvmClient(region, tenant)
case pbtenant.CloudProvider_huawei:
case pbecs.CloudProvider_huawei:
return newHuaweiEcsClient(region, tenant)
//TODO aws
case pbtenant.CloudProvider_harvester:
case pbecs.CloudProvider_harvester:
return newHarvesterClient(tenant)
}

View File

@ -1,24 +1,24 @@
package ecser
package service
import (
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"context"
"testing"
)
func TestEcser_ListDetail(t *testing.T) {
region, _ := tenanter.NewRegion(pbtenant.CloudProvider_ali, int32(pbtenant.AliRegionId_ali_cn_hangzhou))
ali, _ := NewEcsClient(pbtenant.CloudProvider_ali, region, aliTenant[0])
aliFailed, _ := NewEcsClient(pbtenant.CloudProvider_ali, region, tenanter.NewTenantWithAccessKey("empty", "", "", "", ""))
ali, _ := NewEcsClient(pbecs.CloudProvider(pbtenant.CloudProvider_ali), region, aliTenant[0])
aliFailed, _ := NewEcsClient(pbecs.CloudProvider(pbtenant.CloudProvider_ali), region, tenanter.NewTenantWithAccessKey("empty", "", "", "", ""))
region, _ = tenanter.NewRegion(pbtenant.CloudProvider_tencent, int32(pbtenant.TencentRegionId_tc_ap_beijing))
tc, _ := NewEcsClient(pbtenant.CloudProvider_tencent, region, tcTenant[0])
tcFailed, _ := NewEcsClient(pbtenant.CloudProvider_tencent, region, tenanter.NewTenantWithAccessKey("empty", "", "", "", ""))
tc, _ := NewEcsClient(pbecs.CloudProvider(pbtenant.CloudProvider_tencent), region, tcTenant[0])
tcFailed, _ := NewEcsClient(pbecs.CloudProvider(pbtenant.CloudProvider_tencent), region, tenanter.NewTenantWithAccessKey("empty", "", "", "", ""))
region, _ = tenanter.NewRegion(pbtenant.CloudProvider_huawei, int32(pbtenant.HuaweiRegionId_hw_cn_southwest_2))
hw, _ := NewEcsClient(pbtenant.CloudProvider_huawei, region, hwTenant[0])
hw, _ := NewEcsClient(pbecs.CloudProvider(pbtenant.CloudProvider_huawei), region, hwTenant[0])
// hwFailed, _ := newHuaweiEcsClient(int32(pbtenant.HuaweiRegionId_hw_cn_north_1), tenanter.NewTenantWithAccessKey("empty", "", "", ""))
type args struct {

View File

@ -1,4 +1,4 @@
package ecser
package service
//
// import (

View File

@ -1,9 +1,9 @@
package ecser
package service
import (
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
pbtenant "code.gitlink.org.cn/JCCE/PCM.git/tenant/gen/idl"
"context"
"fmt"
"github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1"
@ -80,7 +80,7 @@ func (h *HarVMer) CreateEcs(ctx context.Context, req *pbecs.CreateEcsReq) (resp
if req.ImageId != "" {
vmImage, err = h.harvCli.HarvesterhciV1beta1().VirtualMachineImages(req.GetNamespace()).Get(context.TODO(), req.ImageId, k8smetav1.GetOptions{})
if err != nil {
return nil, errors.Wrap(err, "get vm image error")
return nil, errors.Wrap(err, "get pcm_vm image error")
}
} else {
return nil, errors.Wrap(err, "Image ID given does not exist!")
@ -173,7 +173,7 @@ func (h *HarVMer) CreateEcs(ctx context.Context, req *pbecs.CreateEcsReq) (resp
isFinished = true
}
return &pbecs.CreateEcsResp{
Provider: pbtenant.CloudProvider_harvester,
Provider: pbecs.CloudProvider_harvester,
AccountName: h.tenanter.AccountName(),
InstanceIdSets: InstanceIds,
Finished: isFinished,
@ -368,10 +368,10 @@ func (h *HarVMer) DeleteEcs(ctx context.Context, req *pbecs.DeleteEcsReq) (resp
}
err = h.harvCli.KubevirtV1().VirtualMachines(req.GetNamespace()).Delete(context.TODO(), req.GetInstanceName(), k8smetav1.DeleteOptions{})
if err != nil {
logrus.Errorf("delete vm error: %v", err)
logrus.Errorf("delete pcm_vm error: %v", err)
return nil, errors.Wrap(err, "VM could not be deleted successfully: %w")
}
//delete vm disk
//delete pcm_vm disk
if req.DiskName != "" {
for _, delName := range strings.Split(req.DiskName, ",") {
for _, disk := range vm.Spec.Template.Spec.Volumes {
@ -387,7 +387,7 @@ func (h *HarVMer) DeleteEcs(ctx context.Context, req *pbecs.DeleteEcsReq) (resp
}
}
return &pbecs.DeleteEcsResp{
Provider: pbtenant.CloudProvider_harvester,
Provider: pbecs.CloudProvider_harvester,
AccountName: h.tenanter.AccountName(),
}, nil
}
@ -440,7 +440,7 @@ func (h *HarVMer) UpdateEcs(ctx context.Context, req *pbecs.UpdateEcsReq) (resp
}
}
return &pbecs.UpdateEcsResp{
Provider: pbtenant.CloudProvider_harvester,
Provider: pbecs.CloudProvider_harvester,
AccountName: h.tenanter.AccountName(),
}, nil
}
@ -474,7 +474,7 @@ func (h *HarVMer) ListDetail(ctx context.Context, req *pbecs.ListDetailReq) (res
IP = append(IP, vmiMap[vm.Name].Status.Interfaces[0].IP)
}
ecses[k] = &pbecs.EcsInstance{
Provider: pbtenant.CloudProvider_harvester,
Provider: pbecs.CloudProvider_harvester,
AccountName: h.tenanter.AccountName(),
Status: state,
InstanceName: vm.Name,
@ -520,7 +520,7 @@ func (h *HarVMer) ActionEcs(ctx context.Context, req *pbecs.ActionReq) (resp *pb
status = "Running"
}
return &pbecs.ActionResp{
Provider: pbtenant.CloudProvider_harvester,
Provider: pbecs.CloudProvider_harvester,
AccountName: h.tenanter.AccountName(),
Status: status,
}, nil
@ -580,7 +580,7 @@ func (h *HarVMer) ListEcsImages(ctx context.Context, req *pbecs.ListImagesReq) (
imageList := make([]*pbecs.Image, 0)
for _, im := range images.Items {
image := pbecs.Image{
Provider: pbtenant.CloudProvider_harvester,
Provider: pbecs.CloudProvider(pbtenant.CloudProvider_harvester),
Id: im.Name,
DisplayName: im.Spec.DisplayName,
Namespace: im.Namespace,
@ -589,7 +589,7 @@ func (h *HarVMer) ListEcsImages(ctx context.Context, req *pbecs.ListImagesReq) (
}
return &pbecs.ListImagesResp{
Provider: pbtenant.CloudProvider_harvester,
Provider: pbecs.CloudProvider(pbtenant.CloudProvider_harvester),
Images: imageList,
}, nil
}

View File

@ -1,4 +1,4 @@
package ecser
package service
import (
"bytes"
@ -368,7 +368,7 @@ func defaultListOpts(ctx *cli.Context) *ntypes.ListOpts {
return listOpts
}
//NewTrue returns a pointer to true
// NewTrue returns a pointer to true
func NewTrue() *bool {
b := true
return &b

View File

@ -1,12 +1,11 @@
package ecser
package service
import (
pbecs "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_vm/gen/idl"
"context"
"strconv"
"code.gitlink.org.cn/JCCE/PCM.git/common/tenanter"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbecs"
"code.gitlink.org.cn/JCCE/PCM.git/lan_trans/idl/pbtenant"
string_ "github.com/alibabacloud-go/darabonba-string/client"
util "github.com/alibabacloud-go/tea-utils/service"
"github.com/alibabacloud-go/tea/tea"
@ -229,7 +228,7 @@ func (ecs *HuaweiEcs) ListDetail(ctx context.Context, req *pbecs.ListDetailReq)
}
}
ecses[k] = &pbecs.EcsInstance{
Provider: pbtenant.CloudProvider_huawei,
Provider: pbecs.CloudProvider_huawei,
AccountName: ecs.tenanter.AccountName(),
InstanceId: v.Id,
InstanceName: v.Name,

Some files were not shown because too many files have changed in this diff Show More