Merge pull request 'new feature:pcm_slurm' (#22) from slurm_zqj into master

This commit is contained in:
zhouqunjie 2022-11-16 15:01:21 +08:00
commit b965aad6b1
49 changed files with 3632 additions and 1209 deletions

View File

@ -1285,7 +1285,7 @@ func (r *CreateClusterNodePoolFromExistingAsgResponse) FromJsonString(s string)
type CreateClusterNodePoolRequest struct {
*tchttp.BaseRequest
// cluster id
// user id
ClusterId *string `json:"ClusterId,omitempty" name:"ClusterId"`
// AutoScalingGroupPara AS组参数
@ -7604,7 +7604,7 @@ type PrometheusTemplate struct {
// 模板维度,支持以下类型
// instance 实例级别
// cluster 集群级别
// user 集群级别
Level *string `json:"Level,omitempty" name:"Level"`
// 模板描述
@ -8210,7 +8210,7 @@ type Tag struct {
type TagSpecification struct {
// 标签绑定的资源类型,当前支持类型:"cluster"
// 标签绑定的资源类型,当前支持类型:"user"
// 注意:此字段可能返回 null表示取不到有效值。
ResourceType *string `json:"ResourceType,omitempty" name:"ResourceType"`

View File

@ -0,0 +1,16 @@
package main
//
//import (
// user_info "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/user"
// "fmt"
//)
//
//func main() {
//
// fmt.Printf("Found user")
//
// userList := user_info.Get_all_users()
// fmt.Printf("Found %d user \n", userList.WithoutDefaults)
//
//}

View File

@ -0,0 +1,36 @@
package main
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
slurmdb_qos_rec_t Get_qos() {
slurmdb_qos_cond_t *qos_cond = NULL;
void *conn = slurmdb_connection_get();
List asdlist = slurmdb_qos_get(conn, qos_cond);
uint16_t size = slurm_list_count(asdlist);
slurmdb_qos_rec_t QosArray[size];
slurmdb_qos_rec_t *rec = NULL;
ListIterator itr = slurm_list_iterator_create(asdlist);
int i = 0;
while ((rec = slurm_list_next(itr))) {
QosArray[i] = *rec;
i++;
}
slurm_list_destroy(asdlist);
return *QosArray;
}
*/
import "C"
import "fmt"
func main() {
a := C.Get_qos()
fmt.Println("name: ", C.GoString(a.name))
}

View File

@ -405,7 +405,7 @@ func Print_Ctl_conf(go_struct Ctl_conf) {
fmt.Printf("%s:\t %d\n", "batch start timeout", go_struct.Batch_start_timeout)
fmt.Printf("%s:\t %d\n", "boot time", go_struct.Boot_time)
fmt.Printf("%s:\t %s\n", "checkpoint type", go_struct.Checkpoint_type)
fmt.Printf("%s:\t %s\n", "cluster name", go_struct.Cluster_name)
fmt.Printf("%s:\t %s\n", "user name", go_struct.Cluster_name)
fmt.Printf("%s:\t %d\n", "complete wait", go_struct.Complete_wait)
fmt.Printf("%s:\t %d\n", "debug flags", go_struct.Debug_flags)
fmt.Printf("%s:\t %d\n", "def mem per cpu", go_struct.Def_mem_per_cpu)

View File

@ -0,0 +1,103 @@
package test
//
///*
//#cgo LDFLAGS: -lslurmdb
//
//#include <stdio.h>
//#include <slurm/slurm.h>
//#include <slurm/slurmdb.h>
//#include <memory.h>
//#include <malloc.h>
//
//typedef struct user_info_msg {
// uint32_t record_count;
// slurmdb_user_rec_t *user_array;
//} user_info_msg_t;
//
//typedef struct slurmdb_user_rec{
// uint16_t admin_level;
// List assoc_list;
// List coord_accts;
// char *default_acct;
// char *default_wckey;
// char *name;
// char *old_name;
// uint32_t uid;
// List wckey_list;
//} slurmdb_user_rec_t_f;
//
//struct user_info_msg get_user_info() {
// struct user_info_msg userinfo;
// List userList = NULL;
// slurmdb_user_cond_t *user_cond = NULL;
// void *db_conn;
// db_conn = slurmdb_connection_get();
// userList = slurmdb_users_get(db_conn, user_cond);
// slurmdb_connection_close(&db_conn);
//
// slurmdb_user_rec_t *rec = NULL;
// ListIterator itr = slurm_list_iterator_create(userList);
// int i = 0;
// uint32_t length;
// length = slurm_list_count(userList);
// userinfo.record_count = length;
// userinfo.user_array = malloc(length * sizeof(slurmdb_user_rec_t));
// while ((rec = slurm_list_next(itr))) {
// userinfo.user_array[i] = *rec;
// i++;
// }
// return userinfo;
//}
//
//struct slurmdb_user_rec *user_from_list(struct user_info_msg *list, int i) {
// return (struct slurmdb_user_rec *) &list->user_array[i];
//}
//
//*/
//import "C"
//
//type UserInfo struct {
// //uint16_t admin_level; /* really slurmdb_admin_level_t but for
// //packing purposes needs to be uint16_t */
// //List assoc_list; /* list of slurmdb_association_rec_t *'s */
// //List coord_accts; /* list of slurmdb_coord_rec_t *'s */
// //char *default_acct;
// //char *default_wckey;
// Name string
// OldName string
// //uint32_t uid;
// //List wckey_list; /* list of slurmdb_wckey_rec_t *'s */
//}
//
//type UserInfoMsg struct {
// Last_update int64
// Record_count uint32
// UserInfoList []UserInfo
//}
//
//func User_descriptor_convert_c_to_go(c_struct *C.struct_slurmdb_user_rec) UserInfo {
// var go_struct UserInfo
// go_struct.Name = C.GoString(c_struct.name)
// return go_struct
//}
//
//// []slurmpb.UserInfo
//func GetUserInfo() UserInfoMsg {
// var go_user_buffer UserInfoMsg
// c_user_buffer := C.get_user_info()
// go_user_buffer.Record_count = uint32(c_user_buffer.record_count)
// go_user_buffer.UserInfoList = make([]UserInfo, c_user_buffer.record_count, c_user_buffer.record_count)
//
// for i := uint32(0); i < go_user_buffer.Record_count; i++ {
// user := C.user_from_list(&c_user_buffer, C.int(i))
// go_user := User_descriptor_convert_c_to_go(user)
// go_user_buffer.UserInfoList[i] = go_user
// }
// return go_user_buffer
//}
//
//func main() {
// userInfoMsg := GetUserInfo()
// println(userInfoMsg.UserInfoList[0].Name)
//}

View File

@ -0,0 +1,90 @@
package user
//
///*
//#cgo LDFLAGS: -lslurmdb
//
//#include <stdio.h>
//#include <slurm/slurm.h>
//#include <slurm/slurmdb.h>
//#include <memory.h>
//#include <malloc.h>
//
//typedef struct user_info_msg {
// uint32_t record_count;
// slurmdb_user_rec_t *user_array;
//} user_info_msg_t;
//
//typedef struct slurmdb_user_rec{
// uint16_t admin_level;
// List assoc_list;
// List coord_accts;
// char *default_acct;
// char *default_wckey;
// char *name;
// char *old_name;
// uint32_t uid;
// List wckey_list;
//} slurmdb_user_rec_t_f;
//
//struct user_info_msg get_user_info() {
// struct user_info_msg userinfo;
// List userList = NULL;
// slurmdb_user_cond_t *user_cond = NULL;
// void *db_conn;
// db_conn = slurmdb_connection_get();
// userList = slurmdb_users_get(db_conn, user_cond);
// slurmdb_connection_close(&db_conn);
//
// slurmdb_user_rec_t *rec = NULL;
// ListIterator itr = slurm_list_iterator_create(userList);
// int i = 0;
// uint32_t length;
// length = slurm_list_count(userList);
// userinfo.record_count = length;
// userinfo.user_array = malloc(length * sizeof(slurmdb_user_rec_t));
// while ((rec = slurm_list_next(itr))) {
// userinfo.user_array[i] = *rec;
// i++;
// }
// return userinfo;
//}
//
//struct slurmdb_user_rec *user_from_list(struct user_info_msg *list, int i) {
// return (struct slurmdb_user_rec *) &list->user_array[i];
//}
//
//*/
//import "C"
//
//type UserInfo struct {
// Name string
// OldName string
//}
//
//type UserInfoMsg struct {
// Last_update int64
// Record_count uint32
// UserInfoList []UserInfo
//}
//
//func User_descriptor_convert_c_to_go(c_struct *C.struct_slurmdb_user_rec) UserInfo {
// var go_struct UserInfo
// go_struct.Name = C.GoString(c_struct.name)
// return go_struct
//}
//
//// []slurmpb.UserInfo
//func GetUserInfo() UserInfoMsg {
// var go_user_buffer UserInfoMsg
// c_user_buffer := C.get_user_info()
// go_user_buffer.Record_count = uint32(c_user_buffer.record_count)
// go_user_buffer.UserInfoList = make([]UserInfo, c_user_buffer.record_count, c_user_buffer.record_count)
//
// for i := uint32(0); i < go_user_buffer.Record_count; i++ {
// user := C.user_from_list(&c_user_buffer, C.int(i))
// go_user := User_descriptor_convert_c_to_go(user)
// go_user_buffer.UserInfoList[i] = go_user
// }
// return go_user_buffer
//}

View File

@ -0,0 +1,80 @@
package user
//
///*
//#cgo LDFLAGS: -lslurmdb
//
//#include <stdio.h>
//#include <slurm/slurm.h>
//#include <slurm/slurmdb.h>
//
//List Get_user_List() {
// __attribute__((unused)) List slurmdb_user_rec_t = NULL;
// slurmdb_user_cond_t *user_cond = NULL;
// slurmdb_user_rec_t = slurmdb_users_get(slurmdb_connection_get(), user_cond);
// return slurmdb_user_rec_t;
//}
//
//slurmdb_user_rec_t UserArray[900];
//
//struct slurmdb_user_rec_t *user_from_list() {
// __attribute__((unused)) List userList = NULL;
// slurmdb_user_cond_t *user_cond = NULL;
// userList = slurmdb_users_get(slurmdb_connection_get(), user_cond);
// slurmdb_user_rec_t *rec = NULL;
// ListIterator itr = slurm_list_iterator_create(userList);
// int i = 0;
// while ((rec = slurm_list_next(itr))) {
// UserArray[i] = *rec;
// i++;
// }
// return (struct slurmdb_user_rec_t *) UserArray;
//}
//*/
//import "C"
//import (
// "fmt"
//)
//
//type SlurmdbUsers struct {
// slurm_user []SlurmdbUser
//}
//type SlurmdbUser struct {
// admin_level uint16 /* really slurmdb_admin_level_t but for
// packing purposes needs to be uint16_t */
// //assoc_list List /* list of slurmdb_association_rec_t *'s */
// //coord_accts List /* list of slurmdb_coord_rec_t *'s */
// default_acct string
// default_wckey string
// name string
// old_name string
// uid uint32
// //wckey_list List /* list of slurmdb_wckey_rec_t *'s */
//}
//
//func User_descriptor_convert_c_to_go(c_struct *C.slurmdb_user_rec_t) SlurmdbUser {
// var go_struct SlurmdbUser
// go_struct.name = C.GoString(c_struct.name)
// return go_struct
//}
//
//func GetUserInfo() SlurmdbUsers {
// var go_struct SlurmdbUser
// var users SlurmdbUsers
// userCount := int(C.slurm_list_count(C.Get_user_List()))
// C.user_from_list()
// SlurmdbUserList := make([]SlurmdbUser, 0)
// for i := 0; i < userCount; i++ {
// C.user_from_list()
// user := C.UserArray[i]
// go_struct = User_descriptor_convert_c_to_go(&user)
// SlurmdbUserList = append(SlurmdbUserList, go_struct)
// users.slurm_user = append(users.slurm_user, go_struct)
// }
// return users
//}
//
//func main() {
// slurmdbUser := GetUserInfo()
// fmt.Println(slurmdbUser.slurm_user[0].name)
//}

View File

@ -0,0 +1,92 @@
package user
//
//import "C"
//import slurmpb "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
//
///*
//#cgo LDFLAGS: -lslurmdb
//
//#include <stdio.h>
//#include <slurm/slurm.h>
//#include <slurm/slurmdb.h>
//
//
////slurmdb_user_rec_t UserArray[10];
//
////struct slurmdb_user_rec_t *user_from_list() {
////
//// __attribute__((unused)) List userList = NULL;
//// slurmdb_user_cond_t *user_cond = NULL;
//// userList = slurmdb_users_get(slurmdb_connection_get(), user_cond);
//// //slurmdb_user_rec_t *rec = NULL;
//// //ListIterator itr = slurm_list_iterator_create(userList);
//// //int i = 0;
//// //while ((rec = slurm_list_next(itr))) {
//// // UserArray[i] = *rec;
//// // i++;
//// //}
//// //return (struct slurmdb_user_rec_t *) UserArray;
//// return
////}
//
//typedef struct user_info_msg {
// time_t last_update;
// uint32_t record_count;
// slurmdb_user_rec_t *user_array[10];
//} user_info_msg_t;
//
//
//struct user_info_msg_t *user_from_list() {
// user_info_msg_t* user_buffer;
// __attribute__((unused)) List userList = NULL;
// slurmdb_user_cond_t *user_cond = NULL;
// userList = slurmdb_users_get(slurmdb_connection_get(), user_cond);
//
// slurmdb_user_rec_t *rec = NULL;
// ListIterator itr = slurm_list_iterator_create(userList);
// int i = 0;
// int length;
// length = slurm_list_count(userList);
// slurmdb_user_rec_t UserArray[length];
// while ((rec = slurm_list_next(itr))) {
// UserArray[i]=*rec;
// user_buffer->user_array[i] = &UserArray[i];
// i++;
// }
// return user_buffer;
//}
//
//
//*/
//import "C"
//
//func ConvertCToGo(cStruct *C.slurmdb_user_rec_t) slurmpb.UserInfo {
// var goStruct slurmpb.UserInfo
// goStruct.Name = C.GoString(cStruct.name)
// return goStruct
//}
//
//func GetUserInfo() slurmpb.UserInfoList {
// var users slurmpb.UserInfoList
// var goStruct slurmpb.UserInfo
// //userCount := int(C.slurm_list_count(C.Get_user_List()))
// c_users_buffer := C.user_from_list()
// println(c_users_buffer)
// SlurmUserList := make([]slurmpb.UserInfo, 0)
// for i := 0; i < 2; i++ {
// //c_users_buffer := C.user_from_list()
// //println(c_users_buffer)
// C.user_from_list()
// //user := C.UserArray[i]
// //goStruct = ConvertCToGo(&user)
// SlurmUserList = append(SlurmUserList, goStruct)
// users.UserInfos = append(users.UserInfos, &goStruct)
// }
// return users
//}
//
//func main() {
// users := GetUserInfo()
// println(users.UserInfos[0].Name)
//}

View File

@ -1,374 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc (unknown)
// source: idl/slurm.proto
package slurmpb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// slurm version
type SlurmVersion int32
const (
// 0 - 阿里云
SlurmVersion_tianhe SlurmVersion = 0
)
// Enum value maps for SlurmVersion.
var (
SlurmVersion_name = map[int32]string{
0: "tianhe",
}
SlurmVersion_value = map[string]int32{
"tianhe": 0,
}
)
func (x SlurmVersion) Enum() *SlurmVersion {
p := new(SlurmVersion)
*p = x
return p
}
func (x SlurmVersion) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (SlurmVersion) Descriptor() protoreflect.EnumDescriptor {
return file_idl_slurm_proto_enumTypes[0].Descriptor()
}
func (SlurmVersion) Type() protoreflect.EnumType {
return &file_idl_slurm_proto_enumTypes[0]
}
func (x SlurmVersion) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use SlurmVersion.Descriptor instead.
func (SlurmVersion) EnumDescriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{0}
}
type ListNodesReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SlurmVersion int32 `protobuf:"varint,1,opt,name=SlurmVersion,proto3" json:"SlurmVersion,omitempty"`
}
func (x *ListNodesReq) Reset() {
*x = ListNodesReq{}
if protoimpl.UnsafeEnabled {
mi := &file_idl_slurm_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNodesReq) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodesReq) ProtoMessage() {}
func (x *ListNodesReq) ProtoReflect() protoreflect.Message {
mi := &file_idl_slurm_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodesReq.ProtoReflect.Descriptor instead.
func (*ListNodesReq) Descriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{0}
}
func (x *ListNodesReq) GetSlurmVersion() int32 {
if x != nil {
return x.SlurmVersion
}
return 0
}
type NodeInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Boards int32 `protobuf:"varint,1,opt,name=boards,proto3" json:"boards,omitempty"`
Cpus int32 `protobuf:"varint,2,opt,name=cpus,proto3" json:"cpus,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
RealMemory int32 `protobuf:"varint,4,opt,name=realMemory,proto3" json:"realMemory,omitempty"`
Sockets int32 `protobuf:"varint,5,opt,name=sockets,proto3" json:"sockets,omitempty"`
Threads int32 `protobuf:"varint,6,opt,name=threads,proto3" json:"threads,omitempty"`
}
func (x *NodeInfo) Reset() {
*x = NodeInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_idl_slurm_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NodeInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NodeInfo) ProtoMessage() {}
func (x *NodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_idl_slurm_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NodeInfo.ProtoReflect.Descriptor instead.
func (*NodeInfo) Descriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{1}
}
func (x *NodeInfo) GetBoards() int32 {
if x != nil {
return x.Boards
}
return 0
}
func (x *NodeInfo) GetCpus() int32 {
if x != nil {
return x.Cpus
}
return 0
}
func (x *NodeInfo) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *NodeInfo) GetRealMemory() int32 {
if x != nil {
return x.RealMemory
}
return 0
}
func (x *NodeInfo) GetSockets() int32 {
if x != nil {
return x.Sockets
}
return 0
}
func (x *NodeInfo) GetThreads() int32 {
if x != nil {
return x.Threads
}
return 0
}
type ListNodesResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeInfos []*NodeInfo `protobuf:"bytes,1,rep,name=node_infos,json=nodeInfos,proto3" json:"node_infos,omitempty"`
}
func (x *ListNodesResp) Reset() {
*x = ListNodesResp{}
if protoimpl.UnsafeEnabled {
mi := &file_idl_slurm_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ListNodesResp) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ListNodesResp) ProtoMessage() {}
func (x *ListNodesResp) ProtoReflect() protoreflect.Message {
mi := &file_idl_slurm_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ListNodesResp.ProtoReflect.Descriptor instead.
func (*ListNodesResp) Descriptor() ([]byte, []int) {
return file_idl_slurm_proto_rawDescGZIP(), []int{2}
}
func (x *ListNodesResp) GetNodeInfos() []*NodeInfo {
if x != nil {
return x.NodeInfos
}
return nil
}
var File_idl_slurm_proto protoreflect.FileDescriptor
var file_idl_slurm_proto_rawDesc = []byte{
0x0a, 0x0f, 0x69, 0x64, 0x6c, 0x2f, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x05, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x22, 0x32, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74,
0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x12, 0x22, 0x0a, 0x0c, 0x53, 0x6c, 0x75, 0x72,
0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c,
0x53, 0x6c, 0x75, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x9e, 0x01, 0x0a,
0x08, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x6f, 0x61,
0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x62, 0x6f, 0x61, 0x72, 0x64,
0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x70, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
0x04, 0x63, 0x70, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x61,
0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x72,
0x65, 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x6f, 0x63,
0x6b, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x6f, 0x63, 0x6b,
0x65, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x06,
0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x22, 0x3f, 0x0a,
0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x12, 0x2e,
0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x49,
0x6e, 0x66, 0x6f, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x2a, 0x1a,
0x0a, 0x0c, 0x53, 0x6c, 0x75, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a,
0x0a, 0x06, 0x74, 0x69, 0x61, 0x6e, 0x68, 0x65, 0x10, 0x00, 0x32, 0x46, 0x0a, 0x0c, 0x53, 0x6c,
0x75, 0x72, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x4c, 0x69,
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x13, 0x2e, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x2e,
0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x1a, 0x14, 0x2e, 0x73,
0x6c, 0x75, 0x72, 0x6d, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65,
0x73, 0x70, 0x42, 0x0a, 0x5a, 0x08, 0x2f, 0x73, 0x6c, 0x75, 0x72, 0x6d, 0x70, 0x62, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_idl_slurm_proto_rawDescOnce sync.Once
file_idl_slurm_proto_rawDescData = file_idl_slurm_proto_rawDesc
)
func file_idl_slurm_proto_rawDescGZIP() []byte {
file_idl_slurm_proto_rawDescOnce.Do(func() {
file_idl_slurm_proto_rawDescData = protoimpl.X.CompressGZIP(file_idl_slurm_proto_rawDescData)
})
return file_idl_slurm_proto_rawDescData
}
var file_idl_slurm_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_idl_slurm_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_idl_slurm_proto_goTypes = []interface{}{
(SlurmVersion)(0), // 0: slurm.SlurmVersion
(*ListNodesReq)(nil), // 1: slurm.ListNodesReq
(*NodeInfo)(nil), // 2: slurm.NodeInfo
(*ListNodesResp)(nil), // 3: slurm.ListNodesResp
}
var file_idl_slurm_proto_depIdxs = []int32{
2, // 0: slurm.ListNodesResp.node_infos:type_name -> slurm.NodeInfo
1, // 1: slurm.SlurmService.ListNodes:input_type -> slurm.ListNodesReq
3, // 2: slurm.SlurmService.ListNodes:output_type -> slurm.ListNodesResp
2, // [2:3] is the sub-list for method output_type
1, // [1:2] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_idl_slurm_proto_init() }
func file_idl_slurm_proto_init() {
if File_idl_slurm_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_idl_slurm_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNodesReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_idl_slurm_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NodeInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_idl_slurm_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ListNodesResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_idl_slurm_proto_rawDesc,
NumEnums: 1,
NumMessages: 3,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_idl_slurm_proto_goTypes,
DependencyIndexes: file_idl_slurm_proto_depIdxs,
EnumInfos: file_idl_slurm_proto_enumTypes,
MessageInfos: file_idl_slurm_proto_msgTypes,
}.Build()
File_idl_slurm_proto = out.File
file_idl_slurm_proto_rawDesc = nil
file_idl_slurm_proto_goTypes = nil
file_idl_slurm_proto_depIdxs = nil
}

View File

@ -1,173 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: idl/slurm.proto
/*
Package slurmpb is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package slurmpb
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
var (
filter_SlurmService_ListNodes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_SlurmService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, client SlurmServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListNodesReq
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SlurmService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListNodes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_SlurmService_ListNodes_0(ctx context.Context, marshaler runtime.Marshaler, server SlurmServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListNodesReq
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SlurmService_ListNodes_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListNodes(ctx, &protoReq)
return msg, metadata, err
}
// RegisterSlurmServiceHandlerServer registers the http handlers for service SlurmService to "mux".
// UnaryRPC :call SlurmServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSlurmServiceHandlerFromEndpoint instead.
func RegisterSlurmServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SlurmServiceServer) error {
mux.Handle("GET", pattern_SlurmService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/slurm.SlurmService/ListNodes", runtime.WithHTTPPathPattern("/apis/slurm/listNodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_SlurmService_ListNodes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_SlurmService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterSlurmServiceHandlerFromEndpoint is same as RegisterSlurmServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterSlurmServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterSlurmServiceHandler(ctx, mux, conn)
}
// RegisterSlurmServiceHandler registers the http handlers for service SlurmService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterSlurmServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterSlurmServiceHandlerClient(ctx, mux, NewSlurmServiceClient(conn))
}
// RegisterSlurmServiceHandlerClient registers the http handlers for service SlurmService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SlurmServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SlurmServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "SlurmServiceClient" to call the correct interceptors.
func RegisterSlurmServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SlurmServiceClient) error {
mux.Handle("GET", pattern_SlurmService_ListNodes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/slurm.SlurmService/ListNodes", runtime.WithHTTPPathPattern("/apis/slurm/listNodes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_SlurmService_ListNodes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_SlurmService_ListNodes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_SlurmService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "slurm", "listNodes"}, ""))
)
var (
forward_SlurmService_ListNodes_0 = runtime.ForwardResponseMessage
)

View File

@ -1,107 +0,0 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc (unknown)
// source: idl/slurm.proto
package slurmpb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// SlurmServiceClient is the client API for SlurmService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type SlurmServiceClient interface {
// Echo 样例接口
ListNodes(ctx context.Context, in *ListNodesReq, opts ...grpc.CallOption) (*ListNodesResp, error)
}
type slurmServiceClient struct {
cc grpc.ClientConnInterface
}
func NewSlurmServiceClient(cc grpc.ClientConnInterface) SlurmServiceClient {
return &slurmServiceClient{cc}
}
func (c *slurmServiceClient) ListNodes(ctx context.Context, in *ListNodesReq, opts ...grpc.CallOption) (*ListNodesResp, error) {
out := new(ListNodesResp)
err := c.cc.Invoke(ctx, "/slurm.SlurmService/ListNodes", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// SlurmServiceServer is the server API for SlurmService service.
// All implementations must embed UnimplementedSlurmServiceServer
// for forward compatibility
type SlurmServiceServer interface {
// Echo 样例接口
ListNodes(context.Context, *ListNodesReq) (*ListNodesResp, error)
mustEmbedUnimplementedSlurmServiceServer()
}
// UnimplementedSlurmServiceServer must be embedded to have forward compatible implementations.
type UnimplementedSlurmServiceServer struct {
}
func (UnimplementedSlurmServiceServer) ListNodes(context.Context, *ListNodesReq) (*ListNodesResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListNodes not implemented")
}
func (UnimplementedSlurmServiceServer) mustEmbedUnimplementedSlurmServiceServer() {}
// UnsafeSlurmServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to SlurmServiceServer will
// result in compilation errors.
type UnsafeSlurmServiceServer interface {
mustEmbedUnimplementedSlurmServiceServer()
}
func RegisterSlurmServiceServer(s grpc.ServiceRegistrar, srv SlurmServiceServer) {
s.RegisterService(&SlurmService_ServiceDesc, srv)
}
func _SlurmService_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListNodesReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SlurmServiceServer).ListNodes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/slurm.SlurmService/ListNodes",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SlurmServiceServer).ListNodes(ctx, req.(*ListNodesReq))
}
return interceptor(ctx, in, info, handler)
}
// SlurmService_ServiceDesc is the grpc.ServiceDesc for SlurmService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var SlurmService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "slurm.SlurmService",
HandlerType: (*SlurmServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ListNodes",
Handler: _SlurmService_ListNodes_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "idl/slurm.proto",
}

View File

@ -1,87 +0,0 @@
{
"swagger": "2.0",
"info": {
"title": "idl/slurm.proto",
"version": "version not set"
},
"tags": [
{
"name": "SlurmService"
}
],
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {},
"definitions": {
"protobufAny": {
"type": "object",
"properties": {
"@type": {
"type": "string"
}
},
"additionalProperties": {}
},
"rpcStatus": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"$ref": "#/definitions/protobufAny"
}
}
}
},
"slurmListNodesResp": {
"type": "object",
"properties": {
"nodeInfos": {
"type": "array",
"items": {
"$ref": "#/definitions/slurmNodeInfo"
}
}
}
},
"slurmNodeInfo": {
"type": "object",
"properties": {
"boards": {
"type": "integer",
"format": "int32"
},
"cpus": {
"type": "integer",
"format": "int32"
},
"name": {
"type": "string"
},
"realMemory": {
"type": "integer",
"format": "int32"
},
"sockets": {
"type": "integer",
"format": "int32"
},
"threads": {
"type": "integer",
"format": "int32"
}
}
}
}
}

View File

@ -2,32 +2,88 @@ syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "idl/slurm_node.proto";
import "idl/slurm_partition.proto";
import "idl/slurm_reservation.proto";
import "idl/slurm_ping.proto";
import "idl/slurmdb_cluster.proto";
import "idl/slurmdb_user.proto";
import "idl/slurmdb_assoc.proto";
import "idl/slurmdb_account.proto";
import "idl/slurmdb_qos.proto";
import "idl/slurmdb_wckey.proto";
// slurm version
enum SlurmVersion {
// 0 -
tianhe = 0;
}
message ListNodesReq{
int32 SlurmVersion = 1;
}
message NodeInfo{
int32 boards = 1 ;
int32 cpus = 2 ;
string name = 3 ;
int32 realMemory = 4 ;
int32 sockets = 5 ;
int32 threads = 6 ;
}
message ListNodesResp {
repeated NodeInfo node_infos =1;
}
// Slurm Services
service SlurmService {
// Echo
// list all users from slurmdb
rpc ListUsers(ListUsersReq) returns (ListUsersResp);
// get specific user info from slurmdb
rpc GetUser(GetUserReq) returns (GetUserResp);
// add new user
rpc AddUser(AddUserReq) returns (AddUserResp);
// delete specific user
rpc DeleteUser(DeleteUserReq) returns (DeleteUserResp);
// list all associations from slurmdb
rpc ListAssociations(ListAssociationsReq) returns (ListAssociationsResp);
// get specific association info from slurmdb
rpc GetAssociation(GetAssociationReq) returns (GetAssociationResp);
// list all accounts info from slurmdb
rpc ListAccounts(ListAccountsReq) returns (ListAccountsResp);
// get specific account info from slurmdb
rpc GetAccount(GetAccountReq) returns (GetAccountResp);
// add account
rpc AddAccount(AddAccountReq) returns (AddAccountResp);
// delete account
rpc DeleteAccount(DeleteAccountReq) returns (DeleteAccountResp);
// list all qos info from slurmdb
rpc ListQoss(ListQossReq) returns (ListQossResp);
// list all wckeys info from slurmdb
rpc ListWckeys(ListWckeysReq) returns (ListWckeysResp);
// get specific wckey info from slurmdb
rpc GetWckey(GetWckeyReq) returns (GetWckeyResp);
// list all Cluster from slurmdb
rpc ListClusters(ListClustersReq) returns (ListClustersResp);
// get specific user info from slurmdb
rpc GetCluster(GetClusterReq) returns (GetClusterResp);
// add new user
rpc AddCluster(AddClusterReq) returns (AddClusterResp);
// delete specific user
rpc DeleteCluster(DeleteClusterReq) returns (DeleteClusterResp);
// list all Node from slurm
rpc ListNodes(ListNodesReq) returns (ListNodesResp);
// get specific Node info from slurm
rpc GetNode(GetNodeReq) returns (GetNodeResp);
// list all Partition from slurm
rpc ListPartitions(ListPartitionsReq) returns (ListPartitionsResp);
// get specific Partition info from slurm
rpc GetPartition(GetPartitionReq) returns (GetPartitionResp);
// list all Reservation from slurm
rpc ListReservations(ListReservationsReq) returns (ListReservationsResp);
// get specific Reservation info from slurm
rpc GetReservation(GetReservationReq) returns (GetReservationResp);
}

View File

@ -3,5 +3,49 @@ config_version: 3
http:
rules:
- selector: slurm.SlurmService.ListUsers
get: "/apis/slurm/listUsers"
- selector: slurm.SlurmService.GetUser
get: "/apis/slurm/getUser"
- selector: slurm.SlurmService.AddUser
post: "/apis/slurm/addUser"
- selector: slurm.SlurmService.DeleteUser
delete: "/apis/slurm/deleteUser"
- selector: slurm.SlurmService.ListAssociations
get: "/apis/slurm/listAssociations"
- selector: slurm.SlurmService.GetAssociation
get: "/apis/slurm/getAssociation"
- selector: slurm.SlurmService.ListAccounts
get: "/apis/slurm/listAccounts"
- selector: slurm.SlurmService.GetAccount
get: "/apis/slurm/getAccount"
- selector: slurm.SlurmService.AddAccount
post: "/apis/slurm/addAccount"
- selector: slurm.SlurmService.DeleteAccount
delete: "/apis/slurm/deleteAccount"
- selector: slurm.SlurmService.ListQoss
get: "/apis/slurm/listQoss"
- selector: slurm.SlurmService.ListWckeys
get: "/apis/slurm/listWckeys"
- selector: slurm.SlurmService.GetWckey
get: "/apis/slurm/getWckey"
- selector: slurm.SlurmService.ListClusters
get: "/apis/slurm/listClusters"
- selector: slurm.SlurmService.GetCluster
get: "/apis/slurm/getCluster"
- selector: slurm.SlurmService.AddCluster
post: "/apis/slurm/addCluster"
- selector: slurm.SlurmService.DeleteCluster
delete: "/apis/slurm/deleteCluster"
- selector: slurm.SlurmService.ListNodes
get: "/apis/slurm/listNodes"
get: "/apis/slurm/listNodes"
- selector: slurm.SlurmService.GetNode
get: "/apis/slurm/getNode"
- selector: slurm.SlurmService.ListPartitions
get: "/apis/slurm/listPartitions"
- selector: slurm.SlurmService.GetPartition
get: "/apis/slurm/getPartition"
- selector: slurm.SlurmService.ListReservations
get: "/apis/slurm/listReservations"
- selector: slurm.SlurmService.GetReservation
get: "/apis/slurm/getReservation"

View File

@ -0,0 +1,56 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "idl/static.proto";
message NodeInfo{
string Arch = 1 ;
uint32 Boards = 2 ;
int64 Boot_time = 3 ;
uint32 Cores = 4 ;
uint32 Cpu_load = 5 ;
uint32 Cpus = 6 ;
string Features = 7;
string Gres = 8 ;
string Name = 9 ;
string Node_addr = 10 ;
string Node_hostname = 11 ;
uint32 Node_state = 12 ;
string Os = 13 ;
uint64 Real_memory = 14 ;
string Reason = 15 ;
int64 Reason_time = 16 ;
uint32 Reason_uid = 17 ;
int64 Slurmd_start_time = 18 ;
uint32 Sockets = 19 ;
uint32 Threads = 20 ;
uint32 Tmp_disk = 21 ;
uint32 Weight = 22 ;
}
message ListNodesReq{
SlurmVersion slurm_version = 1;
}
message ListNodesResp {
repeated NodeInfo node_infos = 1;
}
message GetNodeReq{
SlurmVersion slurm_version = 1;
string node_name = 2;
}
message GetNodeResp {
repeated NodeInfo node_infos = 1;
}
/*message Nodes_info_msg{
int64 Last_update =1;
uint32 Record_count =2;
uint32 Error_code =3;
repeated NodeInfo node_infos =4;
}*/

View File

@ -0,0 +1,54 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "idl/static.proto";
message PartitionInfo{
string Allow_alloc_nodes =1;
string Allow_groups =2;
string Alternate =3;
uint32 Cr_type =4;
uint64 Def_mem_per_cpu =5;
uint32 Default_time = 6;
uint32 Flags =7;
uint32 Grace_time =8;
uint32 Max_cpus_per_node =9;
uint64 Max_mem_per_cpu =10;
uint32 Max_nodes =11;
uint32 Max_share =12;
uint32 Max_time =13;
uint32 Min_nodes =14;
string Name =15;
int32 Node_inx =16;
string Nodes =17;
uint32 Preempt_mode =18;
uint32 State_up =19;
uint32 Total_cpus =20;
uint32 Total_nodes =21;
}
message ListPartitionsReq{
SlurmVersion SlurmVersion = 1;
}
message ListPartitionsResp{
repeated PartitionInfo Partition_list =1;
}
message GetPartitionReq{
SlurmVersion slurm_version = 1;
string partition_name = 2;
}
message GetPartitionResp {
repeated PartitionInfo partition_infos = 1;
}
/*message Partition_info_msg{
int64 Last_update =1;
uint32 Record_count =2;
uint32 Error_code =3;
repeated PartitionInfo Partition_list =4;
}*/

View File

@ -0,0 +1,14 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "idl/static.proto";
message PingInfoMsgReq{
SlurmVersion SlurmVersion = 1;
int32 controller = 2;
}
message PingInfoMsgResp{
int32 Ping_result =1;
}

View File

@ -0,0 +1,46 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "idl/static.proto";
message ReservationInfo{
string Accounts = 1;
int64 End_time =2;
string Features =3;
uint32 Flags =4;
string Licenses =5;
string Name =6;
uint32 Node_cnt =7;
uint32 Core_cnt =8;
int32 Node_inx =9;
string Node_list =10;
string Partition =11;
int64 Start_time =12;
string Users =13;
}
message ListReservationsReq{
SlurmVersion SlurmVersion = 1;
}
message ListReservationsResp{
repeated ReservationInfo Reservation_list =1;
}
message GetReservationReq{
SlurmVersion slurm_version = 1;
string reservation_name = 2;
}
message GetReservationResp {
repeated ReservationInfo reservation_infos = 1;
}
/*message Partition_info_msg{
int64 Last_update =1;
uint32 Record_count =2;
uint32 Error_code =3;
repeated PartitionInfo Partition_list =4;
}*/

View File

@ -0,0 +1,71 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
import "idl/slurmdb_assoc.proto";
import "idl/slurmdb_coord.proto";
message AccountInfo{
repeated AssociationInfo assoc_list = 1;
repeated CoordInfo coordinators = 2;
string description = 3;
string name = 4;
string organization = 5;
}
message ListAccountsReq{
SlurmVersion slurm_version = 1;
}
message ListAccountsResp {
repeated AccountInfo account_infos = 1;
}
message GetAccountReq{
SlurmVersion slurm_version = 1;
string account_name = 2;
}
message GetAccountResp {
repeated AccountInfo account_infos = 1;
}
message AddAccountReq{
SlurmVersion slurm_version = 1;
string clusters =2;
string default_qos=3;
string description=4;
string fair_share=5;
string grp_cpu_mins=6;
string grp_cpus=7;
string grp_jobs=8;
string grp_memory=9;
string grp_nodes=10;
string grp_submit_jobs=11;
string grp_wall=12;
string max_cpu_mins=13;
string max_cpus=14;
string max_jobs=15;
string max_nodes=16;
string max_submit_jobs=17;
string max_wall=18;
string names=19;
string organization=20;
string parent=21;
string qos_level=22;
}
message AddAccountResp {
string result = 1;
}
message DeleteAccountReq{
SlurmVersion slurm_version = 1;
string names=2;
}
message DeleteAccountResp {
string result = 1;
}

View File

@ -0,0 +1,12 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
message AccountingInfo{
int64 alloc_secs = 1 ;
int32 id = 2 ;
google.protobuf.Timestamp period_start = 3;
}

View File

@ -0,0 +1,76 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
import "idl/slurmdb_accounting.proto";
import "idl/slurmdb_qos.proto";
message AssocUsageInfo{
}
message AssociationInfo{
repeated AccountingInfo accounting_list = 1 ;
string acct = 2 ;
string cluster = 3 ;
int32 def_qos_id = 4 ;
int64 grp_cpu_mins = 5 ;
int64 grp_gpu_run_mins = 6 ;
int32 grp_cpus = 7 ;
int32 grp_jobs = 8 ;
int32 grp_mem = 9 ;
int32 grp_nodes = 10 ;
int32 grp_submit_jobs = 11 ;
int32 grp_wall = 12 ;
int32 id = 13 ;
int32 is_def = 14 ;
int32 lft = 15 ;
int64 max_cpu_mins_pj = 16 ;
int64 max_cpu_run_mins = 17 ;
int32 max_cpus_pj = 18 ;
int32 max_jobs = 19 ;
int32 max_nodes_pj = 20 ;
int32 max_submit_jobs = 21 ;
int32 max_wall_pj = 22 ;
string parent_acct = 23 ;
int32 parent_id = 24 ;
string partition = 25 ;
repeated QosInfo qos_list = 26 ;
int32 rgt = 27 ;
int32 shares_raw = 28 ;
int32 uid = 29 ;
AssocUsageInfo assoc_usage = 30 ;
string user = 31 ;
}
message ListAssociationsReq{
SlurmVersion slurm_version = 1;
}
message ListAssociationsResp {
repeated AssociationInfo assoc_infos = 1;
}
message GetAssociationReq{
SlurmVersion slurm_version = 1;
string cluster = 2;
string account = 3;
string user = 4;
string partition = 5;
}
message GetAssociationResp {
repeated AssociationInfo assoc_infos = 1;
}
message SetAssociationReq{
SlurmVersion slurm_version = 1;
AssociationInfo assoc_info = 2;
}
message SetAssociationResp {
bool result = 1;
}

View File

@ -0,0 +1,69 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
import "idl/slurmdb_accounting.proto";
import "idl/slurmdb_assoc.proto";
message ClusterInfo{
repeated AccountingInfo accounting_list = 1 ;
int32 classification = 2 ;
string control_host = 3 ;
int32 control_port = 4 ;
int32 cpu_count = 5 ;
int32 dimensions = 6 ;
int32 dim_size = 7 ;
int32 flags = 8 ;
string name = 9 ;
string nodes = 10 ;
int32 plugin_id_select = 11 ;
AssociationInfo root_assoc = 12 ;
int32 rpc_version = 13 ;
}
message ListClustersReq{
SlurmVersion slurm_version = 1;
}
message ListClustersResp {
repeated ClusterInfo cluster_infos = 1;
}
message GetClusterReq{
SlurmVersion slurm_version = 1;
string cluster_name = 2;
}
message GetClusterResp {
repeated ClusterInfo cluster_infos = 1;
}
message AddClusterReq{
SlurmVersion slurm_version = 1;
string name =2;
string nodes =3;
string control_port = 4 ;
string cpu_count = 5 ;
string dimensions = 6 ;
string dim_size = 7 ;
string flags = 8 ;
string classification = 9 ;
string control_host = 10 ;
string plugin_id_select = 11 ;
string rpc_version = 13 ;
}
message AddClusterResp {
string result = 1;
}
message DeleteClusterReq{
SlurmVersion slurm_version = 1;
string names=2;
}
message DeleteClusterResp {
string result = 1;
}

View File

@ -0,0 +1,12 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
message CoordInfo{
string name = 1 ;
int32 direct = 2 ;
}

View File

@ -0,0 +1,49 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
message QosUsageInfo{
}
message QosInfo{
string description = 1 ;
int32 id = 2 ;
int32 flags = 3 ;
int32 grace_time = 4 ;
int64 grp_cpu_mins = 5;
int64 grp_cpu_run_mins = 6;
int32 grp_cpus = 7 ;
int32 grp_jobs = 8 ;
int32 grp_mem = 9 ;
int32 grp_nodes = 10 ;
int32 grp_submit_jobs = 11 ;
int32 grp_wall = 12 ;
int64 max_cpu_mins_pj = 13 ;
int64 max_cpu_run_mins_pu = 14 ;
int32 max_cpus_pj = 15 ;
int32 max_cpus_pu = 16 ;
int32 max_jobs_pu = 17 ;
int32 max_nodes_pj = 18 ;
int32 max_submit_jobs_pu = 19 ;
int32 max_wall_pj = 20 ;
string name = 21 ;
int32 preempt_bitstr = 22 ;
repeated string preempt_list = 23 ;
int32 preempt_mode = 24 ;
int32 priority = 25 ;
QosUsageInfo usage = 26 ;
double usageFactor = 27 ;
double usage_thres = 28 ;
}
message ListQossReq{
SlurmVersion slurm_version = 1;
}
message ListQossResp {
repeated QosInfo qos_infos =1;
}

View File

@ -0,0 +1,75 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
import "idl/slurmdb_coord.proto";
import "idl/slurmdb_wckey.proto";
import "idl/slurmdb_assoc.proto";
message UserInfoList{
repeated UserInfo userInfos = 1 ;
}
message UserInfo{
int32 adminLevel = 1 ;
repeated AssociationInfo association_list= 2 ;
repeated CoordInfo coord_list = 3 ;
string default_acct = 4 ;
string default_wckey = 5 ;
string name = 6 ;
string oldName = 7 ;
int32 uid = 8 ;
repeated WckeyInfo wckey_list = 9 ;
}
message ListUsersReq{
SlurmVersion slurm_version = 1;
}
message ListUsersResp {
repeated UserInfo user_infos =1;
}
message GetUserReq{
SlurmVersion slurm_version = 1;
string user_name = 2;
}
message GetUserResp {
repeated UserInfo user_info = 1;
}
message AddUserReq{
SlurmVersion slurm_version = 1;
string accounts=2;
string admin_level=3;
string clusters=4;
string default_account=5;
string default_qos=6;
string default_wckey=7;
string fair_share=8;
string max_cpu_mins=9;
string max_cpus=10;
string max_jobs=11;
string max_nodes=12;
string max_submit_jobs=13;
string max_wall=14;
string names=15;
string partitions=16;
string qos_level=17;
}
message AddUserResp {
string result =1;
}
message DeleteUserReq{
SlurmVersion slurm_version = 1;
string names=2;
}
message DeleteUserResp {
string result =1;
}

View File

@ -0,0 +1,34 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
import "google/protobuf/timestamp.proto";
import "idl/static.proto";
import "idl/slurmdb_accounting.proto";
message WckeyInfo{
repeated AccountingInfo accounting_list = 1 ;
string cluster= 2 ;
int32 id = 3 ;
int32 is_def = 4 ;
string name = 5 ;
int32 uid = 6 ;
string user = 7 ;
}
message ListWckeysReq{
SlurmVersion slurm_version = 1;
}
message ListWckeysResp {
repeated WckeyInfo wckey_infos =1;
}
message GetWckeyReq{
SlurmVersion slurm_version = 1;
string wckey = 2;
}
message GetWckeyResp {
repeated WckeyInfo wckey_info = 1;
}

View File

@ -0,0 +1,12 @@
syntax = "proto3";
package slurm;
option go_package = "/slurmpb";
// slurm version
enum SlurmVersion {
// 0 - tianhe hpc
tianhe = 0;
// 1 - shenwei hpc
shenwei = 1;
}

View File

@ -12,7 +12,187 @@ type Server struct {
slurmpb.UnimplementedSlurmServiceServer
}
// ListNodes return all slurm nodes
// ListUsers return all slurm users
func (s *Server) ListUsers(ctx context.Context, req *slurmpb.ListUsersReq) (*slurmpb.ListUsersResp, error) {
resp, err := ListUsers(ctx, req)
if err != nil {
glog.Errorf("ListSlurmUsers error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetUser return specific slurm user
func (s *Server) GetUser(ctx context.Context, req *slurmpb.GetUserReq) (*slurmpb.GetUserResp, error) {
resp, err := GetUser(ctx, req)
if err != nil {
glog.Errorf("GetSlurmUser error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// AddUser add slurm user
func (s *Server) AddUser(ctx context.Context, req *slurmpb.AddUserReq) (*slurmpb.AddUserResp, error) {
resp, err := AddUser(ctx, req)
if err != nil {
glog.Errorf("AddSlurmUser error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// DeleteUser delete specific slurm user
func (s *Server) DeleteUser(ctx context.Context, req *slurmpb.DeleteUserReq) (*slurmpb.DeleteUserResp, error) {
resp, err := DeleteUser(ctx, req)
if err != nil {
glog.Errorf("DeleteSlurmUser error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// ListAssociations return all slurm associations
func (s *Server) ListAssociations(ctx context.Context, req *slurmpb.ListAssociationsReq) (*slurmpb.ListAssociationsResp, error) {
resp, err := ListAssociations(ctx, req)
if err != nil {
glog.Errorf("ListSlurmAssociations error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetAssociation return specific slurm associations
func (s *Server) GetAssociation(ctx context.Context, req *slurmpb.GetAssociationReq) (*slurmpb.GetAssociationResp, error) {
resp, err := GetAssociation(ctx, req)
if err != nil {
glog.Errorf("GetSlurmAssociations error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// ListAccounts return all slurm accounts
func (s *Server) ListAccounts(ctx context.Context, req *slurmpb.ListAccountsReq) (*slurmpb.ListAccountsResp, error) {
resp, err := ListAccounts(ctx, req)
if err != nil {
glog.Errorf("ListSlurmAccounts error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetAccount return specific slurm account
func (s *Server) GetAccount(ctx context.Context, req *slurmpb.GetAccountReq) (*slurmpb.GetAccountResp, error) {
resp, err := GetAccount(ctx, req)
if err != nil {
glog.Errorf("GetSlurmAccount error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// AddAccount return result
func (s *Server) AddAccount(ctx context.Context, req *slurmpb.AddAccountReq) (*slurmpb.AddAccountResp, error) {
resp, err := AddAccount(ctx, req)
if err != nil {
glog.Errorf("Add error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// DeleteAccount return result
func (s *Server) DeleteAccount(ctx context.Context, req *slurmpb.DeleteAccountReq) (*slurmpb.DeleteAccountResp, error) {
resp, err := DeleteAccount(ctx, req)
if err != nil {
glog.Errorf("Delete error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// ListQoss return all slurm qos
func (s *Server) ListQoss(ctx context.Context, req *slurmpb.ListQossReq) (*slurmpb.ListQossResp, error) {
resp, err := ListQoss(ctx, req)
if err != nil {
glog.Errorf("ListSlurmQoss error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// ListWckeys return all slurm wckeys
func (s *Server) ListWckeys(ctx context.Context, req *slurmpb.ListWckeysReq) (*slurmpb.ListWckeysResp, error) {
resp, err := ListWckeys(ctx, req)
if err != nil {
glog.Errorf("ListSlurmWckeys error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetWckey return specific slurm wckey
func (s *Server) GetWckey(ctx context.Context, req *slurmpb.GetWckeyReq) (*slurmpb.GetWckeyResp, error) {
resp, err := GetWckey(ctx, req)
if err != nil {
glog.Errorf("GetSlurmWckey error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// ListUsers return all slurm Clusters
func (s *Server) ListClusters(ctx context.Context, req *slurmpb.ListClustersReq) (*slurmpb.ListClustersResp, error) {
resp, err := ListClusters(ctx, req)
if err != nil {
glog.Errorf("ListSlurmUsers error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetUser return specific slurm user
func (s *Server) GetCluster(ctx context.Context, req *slurmpb.GetClusterReq) (*slurmpb.GetClusterResp, error) {
resp, err := GetCluster(ctx, req)
if err != nil {
glog.Errorf("GetSlurmUser error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// DeleteUser delete specific slurm user
func (s *Server) DeleteCluster(ctx context.Context, req *slurmpb.DeleteClusterReq) (*slurmpb.DeleteClusterResp, error) {
resp, err := DeleteCluster(ctx, req)
if err != nil {
glog.Errorf("DeleteSlurmUser error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// AddCluster add slurm user
func (s *Server) AddCluster(ctx context.Context, req *slurmpb.AddClusterReq) (*slurmpb.AddClusterResp, error) {
resp, err := AddCluster(ctx, req)
if err != nil {
glog.Errorf("AddSlurmCluster error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetNode return specific slurm user
func (s *Server) GetNode(ctx context.Context, req *slurmpb.GetNodeReq) (*slurmpb.GetNodeResp, error) {
resp, err := GetNode(ctx, req)
if err != nil {
glog.Errorf("GetSlurmUser error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// ListUsers return all slurm Clusters
func (s *Server) ListNodes(ctx context.Context, req *slurmpb.ListNodesReq) (*slurmpb.ListNodesResp, error) {
resp, err := ListNodes(ctx, req)
if err != nil {
@ -21,3 +201,43 @@ func (s *Server) ListNodes(ctx context.Context, req *slurmpb.ListNodesReq) (*slu
}
return resp, nil
}
// ListUsers return all slurm Clusters
func (s *Server) ListPartitions(ctx context.Context, req *slurmpb.ListPartitionsReq) (*slurmpb.ListPartitionsResp, error) {
resp, err := ListPartitions(ctx, req)
if err != nil {
glog.Errorf("ListPartitions error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetNode return specific slurm user
func (s *Server) GetPartition(ctx context.Context, req *slurmpb.GetPartitionReq) (*slurmpb.GetPartitionResp, error) {
resp, err := GetPartition(ctx, req)
if err != nil {
glog.Errorf("GetPartition error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// GetReservation return specific slurm user
func (s *Server) GetReservation(ctx context.Context, req *slurmpb.GetReservationReq) (*slurmpb.GetReservationResp, error) {
resp, err := GetReservation(ctx, req)
if err != nil {
glog.Errorf("GetPartition error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}
// ListUsers return all slurm Clusters
func (s *Server) ListReservations(ctx context.Context, req *slurmpb.ListReservationsReq) (*slurmpb.ListReservationsResp, error) {
resp, err := ListReservations(ctx, req)
if err != nil {
glog.Errorf("ListReservations error %+v", err)
return nil, status.Errorf(codes.Internal, err.Error())
}
return resp, nil
}

View File

@ -6,8 +6,209 @@ import (
"context"
)
func ListUsers(ctx context.Context, req *pbslurm.ListUsersReq) (*pbslurm.ListUsersResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListUsers(ctx, req)
return resp, nil
}
func GetUser(ctx context.Context, req *pbslurm.GetUserReq) (*pbslurm.GetUserResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetUser(ctx, req)
return resp, nil
}
func AddUser(ctx context.Context, req *pbslurm.AddUserReq) (*pbslurm.AddUserResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.AddUser(ctx, req)
return resp, nil
}
func DeleteUser(ctx context.Context, req *pbslurm.DeleteUserReq) (*pbslurm.DeleteUserResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.DeleteUser(ctx, req)
return resp, nil
}
func ListAssociations(ctx context.Context, req *pbslurm.ListAssociationsReq) (*pbslurm.ListAssociationsResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListAssociations(ctx, req)
return resp, nil
}
func GetAssociation(ctx context.Context, req *pbslurm.GetAssociationReq) (*pbslurm.GetAssociationResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetAssociation(ctx, req)
return resp, nil
}
func ListAccounts(ctx context.Context, req *pbslurm.ListAccountsReq) (*pbslurm.ListAccountsResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListAccounts(ctx, req)
return resp, nil
}
func GetAccount(ctx context.Context, req *pbslurm.GetAccountReq) (*pbslurm.GetAccountResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetAccount(ctx, req)
return resp, nil
}
func AddAccount(ctx context.Context, req *pbslurm.AddAccountReq) (*pbslurm.AddAccountResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.AddAccount(ctx, req)
return resp, nil
}
func DeleteAccount(ctx context.Context, req *pbslurm.DeleteAccountReq) (*pbslurm.DeleteAccountResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.DeleteAccount(ctx, req)
return resp, nil
}
func ListQoss(ctx context.Context, req *pbslurm.ListQossReq) (*pbslurm.ListQossResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListQoss(ctx, req)
return resp, nil
}
func ListWckeys(ctx context.Context, req *pbslurm.ListWckeysReq) (*pbslurm.ListWckeysResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListWckeys(ctx, req)
return resp, nil
}
func GetWckey(ctx context.Context, req *pbslurm.GetWckeyReq) (*pbslurm.GetWckeyResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetWckey(ctx, req)
return resp, nil
}
func ListClusters(ctx context.Context, req *pbslurm.ListClustersReq) (*pbslurm.ListClustersResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListClusters(ctx, req)
return resp, nil
}
func GetCluster(ctx context.Context, req *pbslurm.GetClusterReq) (*pbslurm.GetClusterResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetCluster(ctx, req)
return resp, nil
}
func DeleteCluster(ctx context.Context, req *pbslurm.DeleteClusterReq) (*pbslurm.DeleteClusterResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.DeleteCluster(ctx, req)
return resp, nil
}
func AddCluster(ctx context.Context, req *pbslurm.AddClusterReq) (*pbslurm.AddClusterResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.AddCluster(ctx, req)
return resp, nil
}
func ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (*pbslurm.ListNodesResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListNodes(ctx, req)
return resp, nil
}
func GetNode(ctx context.Context, req *pbslurm.GetNodeReq) (*pbslurm.GetNodeResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetNode(ctx, req)
return resp, nil
}
func ListPartitions(ctx context.Context, req *pbslurm.ListPartitionsReq) (*pbslurm.ListPartitionsResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListPartitions(ctx, req)
return resp, nil
}
func GetPartition(ctx context.Context, req *pbslurm.GetPartitionReq) (*pbslurm.GetPartitionResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetPartition(ctx, req)
return resp, nil
}
func GetReservation(ctx context.Context, req *pbslurm.GetReservationReq) (*pbslurm.GetReservationResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.GetReservation(ctx, req)
return resp, nil
}
func ListReservations(ctx context.Context, req *pbslurm.ListReservationsReq) (*pbslurm.ListReservationsResp, error) {
slurm, _ := slurmer.SelectSlurmVersion(req.SlurmVersion)
if slurm == nil {
return nil, nil
}
resp, _ := slurm.ListReservations(ctx, req)
return resp, nil
}

View File

@ -0,0 +1,197 @@
package slurmer
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
inline uint8_t uint8_ptr(uint8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int8_t int8_ptr(int8_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
uint16_t uint16_ptr(uint16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int16_t int16_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint32_t uint32_ptr(uint32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int32_t int32_ptr(int32_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline uint64_t uint64_ptr(uint64_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
inline int64_t int64_ptr(int16_t* pointer) {
if (NULL == pointer) {
return -1;}
return *pointer;
}
struct node_info_msg *get_node_info(){
struct node_info_msg* node_buffer;
if(slurm_load_node ((time_t) NULL,
&node_buffer, SHOW_ALL))
return NULL;
return node_buffer;
}
struct node_info_msg *get_single_node_info(char* name){
struct node_info_msg* node_buffer;
if( slurm_load_node_single (&node_buffer, name, SHOW_DETAIL))
return NULL;
return node_buffer;
}
struct node_info* node_from_list(struct node_info_msg *list, int i){
return &list->node_array[i];
}
void free_node_buffer(void* buffer){
slurm_free_node_info_msg ((struct node_info_msg*)buffer);
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
"strings"
)
type NodeInfoMsg struct {
LastUpdate int64
RecordCount uint32
ErrorCode uint32
NodeInfoList []pbslurm.NodeInfo
}
func Node_info_convert_c_to_go(c_struct *C.struct_node_info) pbslurm.NodeInfo {
var go_struct pbslurm.NodeInfo
go_struct.Arch = C.GoString(c_struct.arch)
go_struct.Boards = uint32(c_struct.boards)
go_struct.BootTime = int64(c_struct.boot_time)
go_struct.Cores = uint32(c_struct.cores)
go_struct.CpuLoad = uint32(c_struct.cpu_load)
go_struct.Cpus = uint32(c_struct.cpus)
go_struct.Features = C.GoString(c_struct.features)
go_struct.Gres = C.GoString(c_struct.gres)
go_struct.Name = C.GoString(c_struct.name)
go_struct.NodeAddr = C.GoString(c_struct.node_addr)
go_struct.NodeHostname = C.GoString(c_struct.node_hostname)
go_struct.NodeState = uint32(c_struct.node_state)
go_struct.Os = C.GoString(c_struct.os)
go_struct.RealMemory = uint64(c_struct.real_memory)
go_struct.Reason = C.GoString(c_struct.reason)
go_struct.ReasonTime = int64(c_struct.reason_time)
go_struct.ReasonUid = uint32(c_struct.reason_uid)
go_struct.SlurmdStartTime = int64(c_struct.slurmd_start_time)
go_struct.Sockets = uint32(c_struct.sockets)
go_struct.Threads = uint32(c_struct.threads)
go_struct.TmpDisk = uint32(c_struct.tmp_disk)
go_struct.Weight = uint32(c_struct.weight)
return go_struct
}
func (slurmStruct SlurmStruct) ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (*pbslurm.ListNodesResp, error) {
nodeList := Get_all_nodes()
var resp = pbslurm.ListNodesResp{}
for _, node := range nodeList.NodeInfoList {
nodeInfoResult := pbslurm.NodeInfo{}
nodeInfoResult.Cpus = uint32(node.Cpus)
nodeInfoResult.Boards = uint32(node.Boards)
nodeInfoResult.RealMemory = node.RealMemory
nodeInfoResult.Sockets = uint32(node.Sockets)
nodeInfoResult.Threads = uint32(node.Threads)
resp.NodeInfos = append(resp.NodeInfos, &nodeInfoResult)
}
return &resp, nil
}
func Get_all_nodes() NodeInfoMsg {
var go_node_buffer NodeInfoMsg
c_node_buffer := C.get_node_info()
if c_node_buffer == nil {
go_node_buffer.LastUpdate = int64(0)
go_node_buffer.RecordCount = uint32(0)
go_node_buffer.ErrorCode = uint32(C.slurm_get_errno())
return go_node_buffer
}
go_node_buffer.LastUpdate = int64(c_node_buffer.last_update)
go_node_buffer.RecordCount = uint32(c_node_buffer.record_count)
go_node_buffer.NodeInfoList = make([]pbslurm.NodeInfo, c_node_buffer.record_count, c_node_buffer.record_count)
for i := uint32(0); i < go_node_buffer.RecordCount; i++ {
node := C.node_from_list(c_node_buffer, C.int(i))
go_node := Node_info_convert_c_to_go(node)
go_node_buffer.NodeInfoList[i] = go_node
}
C.slurm_free_node_info_msg(c_node_buffer)
return go_node_buffer
}
func NodeDescriptorConvertCToGo(cStruct *C.struct_node_info) pbslurm.NodeInfo {
var goStruct pbslurm.NodeInfo
goStruct.Name = C.GoString(cStruct.name)
return goStruct
}
func GetNodeInfo() NodeInfoMsg {
var goNodeBuffer NodeInfoMsg
cNodeBuffer := C.get_node_info()
goNodeBuffer.RecordCount = uint32(cNodeBuffer.record_count)
goNodeBuffer.NodeInfoList = make([]pbslurm.NodeInfo, cNodeBuffer.record_count, cNodeBuffer.record_count)
for i := uint32(0); i < goNodeBuffer.RecordCount; i++ {
Node := C.node_from_list(cNodeBuffer, C.int(i))
goNode := NodeDescriptorConvertCToGo(Node)
goNodeBuffer.NodeInfoList[i] = goNode
}
return goNodeBuffer
}
func (slurmStruct SlurmStruct) GetNode(ctx context.Context, req *pbslurm.GetNodeReq) (*pbslurm.GetNodeResp, error) {
NodeList := GetNodeInfo()
resp := pbslurm.GetNodeResp{}
for _, node := range NodeList.NodeInfoList {
nodeInfoResult := node
if strings.Contains(node.Name, req.NodeName) {
resp.NodeInfos = append(resp.NodeInfos, &nodeInfoResult)
}
}
return &resp, nil
}
/*func (slurmStruct SlurmStruct) GetNodeByName(ctx context.Context, req *pbnode.NodeInfoMsgReq) (*pbnode.NodeInfoMsgResp, error) {
node := Get_node_info(req.NodeName)
var resp = pbnode.NodeInfoMsgResp{}
for _, node := range node.Node_list {
nodeInfoResult := pbnode.Node_Info{}
nodeInfoResult.Cpus = uint32(node.Cpus)
nodeInfoResult.Boards = uint32(node.Boards)
nodeInfoResult.RealMemory = node.RealMemory
nodeInfoResult.Sockets = uint32(node.Sockets)
nodeInfoResult.Threads = uint32(node.Threads)
resp.NodeList = append(resp.NodeList, &nodeInfoResult)
}
return &resp, nil
}*/

View File

@ -0,0 +1,133 @@
package slurmer
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
struct partition_info_msg *get_partition_info(){
struct partition_info_msg* partition_buffer;
if( slurm_load_partitions ((time_t) NULL,
&partition_buffer, SHOW_ALL))
return NULL;
return partition_buffer;
}
struct partition_info* partition_from_list(struct partition_info_msg *list, int i){
return &list->partition_array[i];
}
void free_partition_buffer(void* buffer){
slurm_free_partition_info_msg ((struct partition_info_msg*)buffer);
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
"strings"
)
func Partition_info_convert_c_to_go(c_struct *C.struct_partition_info) pbslurm.PartitionInfo {
var go_struct pbslurm.PartitionInfo
go_struct.AllowAllocNodes = C.GoString(c_struct.allow_alloc_nodes)
go_struct.AllowGroups = C.GoString(c_struct.allow_groups)
go_struct.Alternate = C.GoString(c_struct.alternate)
go_struct.CrType = uint32(c_struct.cr_type)
go_struct.DefMemPerCpu = uint64(c_struct.def_mem_per_cpu)
go_struct.DefaultTime = uint32(c_struct.default_time)
go_struct.Flags = uint32(c_struct.flags)
go_struct.GraceTime = uint32(c_struct.grace_time)
go_struct.MaxCpusPerNode = uint32(c_struct.max_cpus_per_node)
go_struct.MaxMemPerCpu = uint64(c_struct.max_mem_per_cpu)
go_struct.MaxNodes = uint32(c_struct.max_nodes)
go_struct.MaxShare = uint32(c_struct.max_share)
go_struct.MaxTime = uint32(c_struct.max_time)
go_struct.MinNodes = uint32(c_struct.min_nodes)
go_struct.Name = C.GoString(c_struct.name)
//go_struct.Node_inx = int32(C.int32_ptr(c_struct.node_inx))
/*t := C.find_node_inx(c_struct.node_inx)
fmt.Printf("%d", t)
go_struct.Node_inx = make([]int32, t, t)
for i := int32(0); i < int32(t); i++ {
go_struct.Node_inx[i] = int32(C.int32_ptr(c_struct.node_inx, C.int(i)))
}*/
go_struct.Nodes = C.GoString(c_struct.nodes)
go_struct.PreemptMode = uint32(c_struct.preempt_mode)
go_struct.StateUp = uint32(c_struct.state_up)
go_struct.TotalCpus = uint32(c_struct.total_cpus)
go_struct.TotalNodes = uint32(c_struct.total_nodes)
return go_struct
}
type PartitionInfoMsg struct {
LastUpdate int64
RecordCount uint32
PartitionInfoList []pbslurm.PartitionInfo
}
func Get_partitions() PartitionInfoMsg {
var go_partition_buffer PartitionInfoMsg
c_partition_buffer := C.get_partition_info()
if c_partition_buffer == nil {
go_partition_buffer.LastUpdate = int64(0)
go_partition_buffer.RecordCount = uint32(0)
return go_partition_buffer
}
go_partition_buffer.LastUpdate = int64(c_partition_buffer.last_update)
go_partition_buffer.RecordCount = uint32(c_partition_buffer.record_count)
go_partition_buffer.PartitionInfoList = make([]pbslurm.PartitionInfo, c_partition_buffer.record_count, c_partition_buffer.record_count)
for i := uint32(0); i < go_partition_buffer.RecordCount; i++ {
partition := C.partition_from_list(c_partition_buffer, C.int(i))
go_partition := Partition_info_convert_c_to_go(partition)
go_partition_buffer.PartitionInfoList[i] = go_partition
}
C.slurm_free_partition_info_msg(c_partition_buffer)
return go_partition_buffer
}
func (slurmStruct SlurmStruct) ListPartitions(ctx context.Context, req *pbslurm.ListPartitionsReq) (*pbslurm.ListPartitionsResp, error) {
partitionList := Get_partitions()
var resp = pbslurm.ListPartitionsResp{}
for _, partition := range partitionList.PartitionInfoList {
partitionInfoResult := partition
resp.PartitionList = append(resp.PartitionList, &partitionInfoResult)
}
return &resp, nil
}
func GetPartitionsInfo() PartitionInfoMsg {
var goPartitionBuffer PartitionInfoMsg
cPartitionBuffer := C.get_partition_info()
goPartitionBuffer.RecordCount = uint32(cPartitionBuffer.record_count)
goPartitionBuffer.PartitionInfoList = make([]pbslurm.PartitionInfo, cPartitionBuffer.record_count, cPartitionBuffer.record_count)
for i := uint32(0); i < goPartitionBuffer.RecordCount; i++ {
partition := C.partition_from_list(cPartitionBuffer, C.int(i))
goPartition := PartitionDescriptorConvertCToGo(partition)
goPartitionBuffer.PartitionInfoList[i] = goPartition
}
return goPartitionBuffer
}
func PartitionDescriptorConvertCToGo(cStruct *C.struct_partition_info) pbslurm.PartitionInfo {
var goStruct pbslurm.PartitionInfo
goStruct.Name = C.GoString(cStruct.name)
return goStruct
}
func (slurmStruct SlurmStruct) GetPartition(ctx context.Context, req *pbslurm.GetPartitionReq) (*pbslurm.GetPartitionResp, error) {
PartitionList := GetPartitionsInfo()
resp := pbslurm.GetPartitionResp{}
for _, partition := range PartitionList.PartitionInfoList {
partitionInfoResult := partition
if strings.Contains(partition.Name, req.PartitionName) {
resp.PartitionInfos = append(resp.PartitionInfos, &partitionInfoResult)
}
}
return &resp, nil
}

View File

@ -0,0 +1,31 @@
package slurmer
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
// int ping_result;
// ping_result = slurm_ping(int ping)
int slurm_ping(int controller){
int ping_result;
ping_result = slurm_ping(controller);
return ping_result;
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
)
func (slurmStruct SlurmStruct) GetPingResult(ctx context.Context, req *pbslurm.PingInfoMsgReq) (*pbslurm.PingInfoMsgResp, error) {
cgoIntA := C.int(req.Controller)
Ping_result := C.slurm_ping(cgoIntA)
//C.free(unsafe.Pointer(cgoIntA))
var resp = pbslurm.PingInfoMsgResp{}
resp.PingResult = int32(Ping_result)
return &resp, nil
}

View File

@ -0,0 +1,161 @@
package slurmer
/*
#cgo LDFLAGS: -lslurm
#include<stdlib.h>
#include<slurm/slurm.h>
#include<slurm/slurm_errno.h>
struct reserve_info_msg *get_reserve_info(){
struct reserve_info_msg* reservation_buffer;
if(slurm_load_reservations ((time_t) NULL,
&reservation_buffer))
return NULL;
return reservation_buffer;
}
struct reserve_info* reservation_from_list(struct reserve_info_msg *list, int i){
return &list->reservation_array[i];
}
void free_reservation_buffer(void* buffer){
slurm_free_reservation_info_msg ((struct reserve_info_msg*)buffer);
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
"strings"
)
type ReservationInfoMsg struct {
LastUpdate int64
RecordCount uint32
ReservationList []pbslurm.ReservationInfo
ErrorCode uint32
}
func Reservation_info_convert_c_to_go(c_struct *C.struct_reserve_info) pbslurm.ReservationInfo {
var go_struct pbslurm.ReservationInfo
go_struct.Accounts = C.GoString(c_struct.accounts)
go_struct.EndTime = int64(c_struct.end_time)
go_struct.Features = C.GoString(c_struct.features)
go_struct.Flags = uint32(c_struct.flags)
go_struct.Licenses = C.GoString(c_struct.licenses)
go_struct.Name = C.GoString(c_struct.name)
go_struct.Features = C.GoString(c_struct.features)
go_struct.NodeCnt = uint32(c_struct.node_cnt)
go_struct.CoreCnt = uint32(c_struct.core_cnt)
/*t := C.find_node_inx(c_struct.node_inx)
fmt.Printf("%d", t)
go_struct.Node_inx = make([]int32, t, t)
for i := int32(0); i < int32(t); i++ {
go_struct.Node_inx[i] = int32(C.int32_ptr(c_struct.node_inx, C.int(i)))
}*/
go_struct.NodeList = C.GoString(c_struct.node_list)
go_struct.Partition = C.GoString(c_struct.partition)
go_struct.StartTime = int64(c_struct.start_time)
//go_struct.clusters = C.GoString(c_struct.clusters)
return go_struct
}
func Get_all_reservation() ReservationInfoMsg {
var go_reservation_buffer ReservationInfoMsg
c_reservation_buffer := C.get_reserve_info()
if c_reservation_buffer == nil {
go_reservation_buffer.LastUpdate = int64(0)
go_reservation_buffer.RecordCount = uint32(0)
go_reservation_buffer.ErrorCode = uint32(C.slurm_get_errno())
return go_reservation_buffer
}
go_reservation_buffer.LastUpdate = int64(c_reservation_buffer.last_update)
go_reservation_buffer.RecordCount = uint32(c_reservation_buffer.record_count)
go_reservation_buffer.ReservationList = make([]pbslurm.ReservationInfo, c_reservation_buffer.record_count, c_reservation_buffer.record_count)
for i := uint32(0); i < go_reservation_buffer.RecordCount; i++ {
reservation := C.reservation_from_list(c_reservation_buffer, C.int(i))
go_reservation := Reservation_info_convert_c_to_go(reservation)
go_reservation_buffer.ReservationList[i] = go_reservation
}
C.slurm_free_reservation_info_msg(c_reservation_buffer)
return go_reservation_buffer
}
func (slurmStruct SlurmStruct) ListReservations(ctx context.Context, req *pbslurm.ListReservationsReq) (*pbslurm.ListReservationsResp, error) {
reservationList := Get_all_reservation()
var resp = pbslurm.ListReservationsResp{}
for _, reservation := range reservationList.ReservationList {
reservationInfoResult := reservation
resp.ReservationList = append(resp.ReservationList, &reservationInfoResult)
}
return &resp, nil
}
func GetReservationsInfo() ReservationInfoMsg {
var goReservationBuffer ReservationInfoMsg
cReservationBuffer := C.get_reserve_info()
goReservationBuffer.RecordCount = uint32(cReservationBuffer.record_count)
goReservationBuffer.ReservationList = make([]pbslurm.ReservationInfo, cReservationBuffer.record_count, cReservationBuffer.record_count)
for i := uint32(0); i < goReservationBuffer.RecordCount; i++ {
Reservation := C.reservation_from_list(cReservationBuffer, C.int(i))
goReservation := ReservationDescriptorConvertCToGo(Reservation)
goReservationBuffer.ReservationList[i] = goReservation
}
return goReservationBuffer
}
func ReservationDescriptorConvertCToGo(cStruct *C.struct_reserve_info) pbslurm.ReservationInfo {
var goStruct pbslurm.ReservationInfo
goStruct.Name = C.GoString(cStruct.name)
return goStruct
}
func (slurmStruct SlurmStruct) GetReservation(ctx context.Context, req *pbslurm.GetReservationReq) (*pbslurm.GetReservationResp, error) {
ReservationList := GetReservationsInfo()
resp := pbslurm.GetReservationResp{}
for _, reservation := range ReservationList.ReservationList {
reservationInfoResult := reservation
if strings.Contains(reservation.Name, req.ReservationName) {
resp.ReservationInfos = append(resp.ReservationInfos, &reservationInfoResult)
}
}
return &resp, nil
}
/*func (slurmStruct SlurmStruct) GetReservationByName(ctx context.Context, req *pbslurm.ReservationInfoMsgReq) (*pbslurm.ReservationInfoMsgResp, error) {
reservation := Get_reservation_info(req.ReservationName)
var resp = pbslurm.ReservationInfoMsgResp{}
for _, reservation := range reservation.Reservation_list {
reservationInfoResult := pbslurm.Reservation_Info{}
reservationInfoResult.Name = reservation.Name
/*nodeInfoResult.Boards = uint32(node.Boards)
nodeInfoResult.RealMemory = node.Real_memory
nodeInfoResult.Sockets = uint32(node.Sockets)
nodeInfoResult.Threads = uint32(node.Threads)*
resp.ReservationList = append(resp.ReservationList, &reservationInfoResult)
}
return &resp, nil
}
func Get_reservation_info(name string) ReserveInfoMsg {
go_reservation_buffer := Get_all_reservation()
//partitionList := list.New()
for i := range go_reservation_buffer.ReservationList {
//遍历nodeList集合
reservation := go_reservation_buffer.ReservationList[i]
fmt.Println("reservationName: " + reservation.Name)
//当集合中的名字等于传过来的值
if name == reservation.Name {
/*partitionList.PushFront(partition.Name)
partitionList.PushFront(partition.Nodes)*
} else {
}
//fmt.Println(partition.Node_inx)
}
return go_reservation_buffer
}*/

View File

@ -0,0 +1,206 @@
package slurmer
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
#include <memory.h>
#include <malloc.h>
typedef struct account_info_msg {
uint32_t record_count;
slurmdb_account_rec_t *account_array;
} account_info_msg_t;
typedef struct slurmdb_account_rec{
List assoc_list;
List coord_accts;
char *description;
char *name;
char *organization;
} slurmdb_account_rec_pcm;
struct account_info_msg get_account_info() {
struct account_info_msg accountinfo;
List accountList = NULL;
slurmdb_account_cond_t *account_cond = NULL;
void *db_conn;
db_conn = slurmdb_connection_get();
accountList = slurmdb_accounts_get(db_conn, account_cond);
slurmdb_connection_close(&db_conn);
slurmdb_account_rec_t *rec = NULL;
ListIterator itr = slurm_list_iterator_create(accountList);
int i = 0;
uint32_t length;
length = slurm_list_count(accountList);
accountinfo.record_count = length;
accountinfo.account_array = malloc(length * sizeof(slurmdb_account_rec_t));
while ((rec = slurm_list_next(itr))) {
accountinfo.account_array[i] = *rec;
i++;
}
return accountinfo;
}
struct slurmdb_account_rec *account_from_list(struct account_info_msg *list, int i) {
return (struct slurmdb_account_rec *) &list->account_array[i];
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/common/ssh"
"context"
"strings"
)
type AcctInfoMsg struct {
LastUpdate int64
RecordCount uint32
AcctInfoList []pbslurm.AccountInfo
}
func AcctDescriptorConvertCToGo(cStruct *C.struct_slurmdb_account_rec) pbslurm.AccountInfo {
var goStruct pbslurm.AccountInfo
goStruct.Name = C.GoString(cStruct.name)
return goStruct
}
func GetAcctInfo() AcctInfoMsg {
var goAcctBuffer AcctInfoMsg
cAcctBuffer := C.get_account_info()
goAcctBuffer.RecordCount = uint32(cAcctBuffer.record_count)
goAcctBuffer.AcctInfoList = make([]pbslurm.AccountInfo, cAcctBuffer.record_count, cAcctBuffer.record_count)
for i := uint32(0); i < goAcctBuffer.RecordCount; i++ {
Acct := C.account_from_list(&cAcctBuffer, C.int(i))
goAcct := AcctDescriptorConvertCToGo(Acct)
goAcctBuffer.AcctInfoList[i] = goAcct
}
return goAcctBuffer
}
func (slurmStruct SlurmStruct) ListAccounts(ctx context.Context, req *pbslurm.ListAccountsReq) (*pbslurm.ListAccountsResp, error) {
acctList := GetAcctInfo()
resp := pbslurm.ListAccountsResp{}
for _, acct := range acctList.AcctInfoList {
//AcctInfoResult := pbslurm.AccountInfo{}
//AcctInfoResult.Name = Acct.Name
acctInfoResult := acct
resp.AccountInfos = append(resp.AccountInfos, &acctInfoResult)
}
return &resp, nil
}
func (slurmStruct SlurmStruct) GetAccount(ctx context.Context, req *pbslurm.GetAccountReq) (*pbslurm.GetAccountResp, error) {
acctList := GetAcctInfo()
resp := pbslurm.GetAccountResp{}
for _, acct := range acctList.AcctInfoList {
acctInfoResult := acct
if strings.Contains(acct.Name, req.AccountName) {
resp.AccountInfos = append(resp.AccountInfos, &acctInfoResult)
}
}
return &resp, nil
}
func (slurmStruct SlurmStruct) AddAccount(ctx context.Context, req *pbslurm.AddAccountReq) (*pbslurm.AddAccountResp, error) {
cmd := "/usr/local/bin/sacctmgr add account "
cmd = cmd + req.Names
if len(req.Clusters) != 0 {
cmd = cmd + " Clusters=" + req.Clusters
}
if len(req.DefaultQos) != 0 {
cmd = cmd + " DefaultQOS=" + req.DefaultQos
}
if len(req.Description) != 0 {
cmd = cmd + " Description=\"" + req.Description + "\""
}
if len(req.FairShare) != 0 {
cmd = cmd + " Fairshare=" + req.FairShare
}
if len(req.GrpCpuMins) != 0 {
cmd = cmd + " GrpCPUMins=" + req.GrpCpuMins
}
if len(req.GrpCpus) != 0 {
cmd = cmd + " GrpCPUs=" + req.GrpCpus
}
if len(req.GrpJobs) != 0 {
cmd = cmd + " GrpJobs=" + req.GrpJobs
}
if len(req.GrpMemory) != 0 {
cmd = cmd + " GrpMemory=" + req.GrpMemory
}
if len(req.GrpNodes) != 0 {
cmd = cmd + " GrpNodes=" + req.GrpNodes
}
if len(req.GrpSubmitJobs) != 0 {
cmd = cmd + " GrpSubmitJob=" + req.GrpSubmitJobs
}
if len(req.GrpWall) != 0 {
cmd = cmd + " GrpWall=" + req.GrpWall
}
if len(req.MaxCpuMins) != 0 {
cmd = cmd + " MaxCPUMins=" + req.MaxCpuMins
}
if len(req.MaxCpus) != 0 {
cmd = cmd + " MaxCPUs=" + req.MaxCpus
}
if len(req.MaxJobs) != 0 {
cmd = cmd + " MaxJobs=" + req.MaxJobs
}
if len(req.MaxNodes) != 0 {
cmd = cmd + " MaxNodes=" + req.MaxNodes
}
if len(req.MaxSubmitJobs) != 0 {
cmd = cmd + " MaxSubmitJobs=" + req.MaxSubmitJobs
}
if len(req.MaxWall) != 0 {
cmd = cmd + " MaxWall=" + req.MaxWall
}
if len(req.Organization) != 0 {
cmd = cmd + " Organization=" + req.Organization
}
if len(req.Parent) != 0 {
cmd = cmd + " Parent=" + req.Parent
}
if len(req.QosLevel) != 0 {
cmd = cmd + " QosLevel=" + req.QosLevel
}
cmd = cmd + " -i"
result := ssh.ExecCommand(cmd)
resp := pbslurm.AddAccountResp{}
resp.Result = result
return &resp, nil
}
func (slurmStruct SlurmStruct) DeleteAccount(ctx context.Context, req *pbslurm.DeleteAccountReq) (*pbslurm.DeleteAccountResp, error) {
cmd := "/usr/local/bin/sacctmgr delete account "
cmd = cmd + req.Names
cmd = cmd + " -i"
result := ssh.ExecCommand(cmd)
resp := pbslurm.DeleteAccountResp{}
resp.Result = result
return &resp, nil
}

View File

@ -0,0 +1,204 @@
package slurmer
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
"strings"
)
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
#include <memory.h>
#include <malloc.h>
typedef struct assoc_info_msg {
uint32_t record_count;
slurmdb_association_rec_t *assoc_array;
} assoc_info_msg_t;
typedef struct list_assoc_req {
uint32_t slurm_version;
char *cluster;
char *account;
char *user;
char *partition;
} list_assoc_req_t;
typedef struct slurmdb_association_rec{
List accounting_list;
char *acct;
char *cluster;
uint32_t def_qos_id;
uint64_t grp_cpu_mins;
uint64_t grp_cpu_run_mins;
uint32_t grp_cpus;
uint32_t grp_jobs;
uint32_t grp_mem;
uint32_t grp_nodes;
uint32_t grp_submit_jobs;
uint32_t grp_wall;
uint32_t id;
uint16_t is_def;
uint32_t lft;
uint64_t max_cpu_mins_pj;
uint64_t max_cpu_run_mins;
uint32_t max_cpus_pj;
uint32_t max_jobs;
uint32_t max_nodes_pj;
uint32_t max_submit_jobs;
uint32_t max_wall_pj;
char *parent_acct;
uint32_t parent_id;
char *partition;
List qos_list;
uint32_t rgt;
uint32_t shares_raw;
uint32_t uid;
assoc_mgr_association_usage_t *usage;
char *user;
} slurmdb_association_rec_pcm;
struct assoc_info_msg get_assoc_infos() {
struct assoc_info_msg associnfo;
List assocList = NULL;
slurmdb_association_cond_t *assoc_cond = NULL;
void *db_conn;
db_conn = slurmdb_connection_get();
assocList = slurmdb_associations_get(db_conn, assoc_cond);
slurmdb_connection_close(&db_conn);
slurmdb_association_rec_t *rec = NULL;
ListIterator itr = slurm_list_iterator_create(assocList);
int i = 0;
uint32_t length;
length = slurm_list_count(assocList);
associnfo.record_count = length;
associnfo.assoc_array = malloc(length * sizeof(slurmdb_association_rec_t));
while ((rec = slurm_list_next(itr))) {
associnfo.assoc_array[i] = *rec;
i++;
}
return associnfo;
}
struct slurmdb_association_rec *assoc_from_list(struct assoc_info_msg *list, int i) {
return (struct slurmdb_association_rec *) &list->assoc_array[i];
}
*/
import "C"
type AssocInfoMsg struct {
LastUpdate int64
RecordCount uint32
AssocInfoList []pbslurm.AssociationInfo
}
func AssocDescriptorConvertCToGo(cStruct *C.struct_slurmdb_association_rec) pbslurm.AssociationInfo {
var goStruct pbslurm.AssociationInfo
//goStruct.AccountingList = cStruct.accounting_list
goStruct.Acct = C.GoString(cStruct.acct)
goStruct.Cluster = C.GoString(cStruct.cluster)
goStruct.DefQosId = int32(cStruct.def_qos_id)
goStruct.GrpCpuMins = int64(cStruct.grp_cpu_mins)
goStruct.GrpGpuRunMins = int64(cStruct.grp_cpu_run_mins)
goStruct.GrpCpus = int32(cStruct.grp_cpus)
goStruct.GrpJobs = int32(cStruct.grp_jobs)
goStruct.GrpMem = int32(cStruct.grp_mem)
goStruct.GrpNodes = int32(cStruct.grp_nodes)
goStruct.GrpSubmitJobs = int32(cStruct.grp_submit_jobs)
goStruct.GrpWall = int32(cStruct.grp_wall)
goStruct.Id = int32(cStruct.id)
goStruct.IsDef = int32(cStruct.is_def)
goStruct.Lft = int32(cStruct.lft)
goStruct.MaxCpuMinsPj = int64(cStruct.max_cpu_mins_pj)
goStruct.MaxCpuRunMins = int64(cStruct.max_cpu_run_mins)
goStruct.MaxCpusPj = int32(cStruct.max_cpus_pj)
goStruct.MaxJobs = int32(cStruct.max_jobs)
goStruct.MaxNodesPj = int32(cStruct.max_nodes_pj)
goStruct.MaxSubmitJobs = int32(cStruct.max_submit_jobs)
goStruct.MaxWallPj = int32(cStruct.max_wall_pj)
goStruct.ParentAcct = C.GoString(cStruct.parent_acct)
goStruct.ParentId = int32(cStruct.parent_id)
goStruct.Partition = C.GoString(cStruct.partition)
//goStruct.QosList = cStruct.qos_list
goStruct.Rgt = int32(cStruct.rgt)
goStruct.SharesRaw = int32(cStruct.shares_raw)
goStruct.Uid = int32(cStruct.uid)
//goStruct.AssocUsage = cStruct.usage
goStruct.User = C.GoString(cStruct.user)
return goStruct
}
// GetAssociationInfos list info for all associations
func GetAssociationInfos() AssocInfoMsg {
var goAssocBuffer AssocInfoMsg
cAssocBuffer := C.get_assoc_infos()
goAssocBuffer.RecordCount = uint32(cAssocBuffer.record_count)
goAssocBuffer.AssocInfoList = make([]pbslurm.AssociationInfo, cAssocBuffer.record_count, cAssocBuffer.record_count)
for i := uint32(0); i < goAssocBuffer.RecordCount; i++ {
assoc := C.assoc_from_list(&cAssocBuffer, C.int(i))
goAssociation := AssocDescriptorConvertCToGo(assoc)
goAssocBuffer.AssocInfoList[i] = goAssociation
}
return goAssocBuffer
}
// SetAssociation set association info
func SetAssociation(accocInfo pbslurm.AssociationInfo) bool {
var goAssocBuffer AssocInfoMsg
cAssocBuffer := C.get_assoc_infos()
goAssocBuffer.RecordCount = uint32(cAssocBuffer.record_count)
goAssocBuffer.AssocInfoList = make([]pbslurm.AssociationInfo, cAssocBuffer.record_count, cAssocBuffer.record_count)
for i := uint32(0); i < goAssocBuffer.RecordCount; i++ {
assoc := C.assoc_from_list(&cAssocBuffer, C.int(i))
goAssociation := AssocDescriptorConvertCToGo(assoc)
goAssocBuffer.AssocInfoList[i] = goAssociation
}
return true
}
func (slurmStruct SlurmStruct) ListAssociations(ctx context.Context, req *pbslurm.ListAssociationsReq) (*pbslurm.ListAssociationsResp, error) {
assocList := GetAssociationInfos()
resp := pbslurm.ListAssociationsResp{}
for _, assoc := range assocList.AssocInfoList {
assocResult := assoc
resp.AssocInfos = append(resp.AssocInfos, &assocResult)
}
return &resp, nil
}
func (slurmStruct SlurmStruct) SetAssociation(ctx context.Context, req *pbslurm.SetAssociationReq) (*pbslurm.SetAssociationResp, error) {
result := SetAssociation(*req.AssocInfo)
resp := pbslurm.SetAssociationResp{}
resp.Result = result
return &resp, nil
}
func (slurmStruct SlurmStruct) GetAssociation(ctx context.Context, req *pbslurm.GetAssociationReq) (*pbslurm.GetAssociationResp, error) {
assocList := GetAssociationInfos()
resp := pbslurm.GetAssociationResp{}
for _, assoc := range assocList.AssocInfoList {
if (len(req.Cluster) != 0 || len(req.User) != 0 || len(req.Account) != 0 || len(req.Partition) != 0) && strings.Contains(assoc.Cluster, req.Cluster) && strings.Contains(assoc.Acct, req.Account) && strings.Contains(assoc.User, req.User) && strings.Contains(assoc.Partition, req.Partition) {
assocResult := assoc
resp.AssocInfos = append(resp.AssocInfos, &assocResult)
}
}
return &resp, nil
}

View File

@ -0,0 +1,169 @@
package slurmer
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
#include <memory.h>
#include <malloc.h>
typedef struct cluster_info_msg {
uint32_t record_count;
slurmdb_cluster_rec_t *cluster_array;
} cluster_info_msg_t;
typedef struct slurmdb_cluster_rec{
char *name;
} slurmdb_cluster_rec_pcm;
struct cluster_info_msg get_cluster_info() {
struct cluster_info_msg clusterinfo;
List clusterList = NULL;
slurmdb_cluster_cond_t *cluster_cond = NULL;
void *db_conn;
db_conn = slurmdb_connection_get();
clusterList = slurmdb_clusters_get(db_conn, cluster_cond);
slurmdb_connection_close(&db_conn);
slurmdb_cluster_rec_t *rec = NULL;
ListIterator itr = slurm_list_iterator_create(clusterList);
int i = 0;
uint32_t length;
length = slurm_list_count(clusterList);
clusterinfo.record_count = length;
clusterinfo.cluster_array = malloc(length * sizeof(slurmdb_cluster_rec_t));
while ((rec = slurm_list_next(itr))) {
clusterinfo.cluster_array[i] = *rec;
i++;
}
return clusterinfo;
}
struct slurmdb_cluster_rec *cluster_from_list(struct cluster_info_msg *list, int i) {
return (struct slurmdb_cluster_rec *) &list->cluster_array[i];
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"code.gitlink.org.cn/JCCE/PCM.git/common/ssh"
"context"
"strings"
)
type ClusterInfoMsg struct {
Last_update int64
Record_count uint32
ClusterInfoList []pbslurm.ClusterInfo
}
func Cluster_descriptor_convert_c_to_go(c_struct *C.struct_slurmdb_cluster_rec) pbslurm.ClusterInfo {
var go_struct pbslurm.ClusterInfo
go_struct.Name = C.GoString(c_struct.name)
return go_struct
}
func GetClusterInfo() ClusterInfoMsg {
var go_cluster_buffer ClusterInfoMsg
c_cluster_buffer := C.get_cluster_info()
go_cluster_buffer.Record_count = uint32(c_cluster_buffer.record_count)
go_cluster_buffer.ClusterInfoList = make([]pbslurm.ClusterInfo, c_cluster_buffer.record_count, c_cluster_buffer.record_count)
for i := uint32(0); i < go_cluster_buffer.Record_count; i++ {
cluster := C.cluster_from_list(&c_cluster_buffer, C.int(i))
go_cluster := Cluster_descriptor_convert_c_to_go(cluster)
go_cluster_buffer.ClusterInfoList[i] = go_cluster
}
return go_cluster_buffer
}
func (slurmStruct SlurmStruct) ListClusters(ctx context.Context, req *pbslurm.ListClustersReq) (*pbslurm.ListClustersResp, error) {
clusterList := GetClusterInfo()
resp := pbslurm.ListClustersResp{}
for _, cluster := range clusterList.ClusterInfoList {
clusterInfoResult := pbslurm.ClusterInfo{}
clusterInfoResult = cluster
resp.ClusterInfos = append(resp.ClusterInfos, &clusterInfoResult)
}
return &resp, nil
}
func (slurmStruct SlurmStruct) DeleteCluster(ctx context.Context, req *pbslurm.DeleteClusterReq) (*pbslurm.DeleteClusterResp, error) {
cmd := "usr/local/bin/sacctmgr delete cluster"
cmd = cmd + req.Names
cmd = cmd + "-i"
result := ssh.ExecCommand(cmd)
//var go_cluster_buffer ClusterInfoMsg
resp := pbslurm.DeleteClusterResp{}
resp.Result = result
return &resp, nil
}
func (slurmStruct SlurmStruct) AddCluster(ctx context.Context, req *pbslurm.AddClusterReq) (*pbslurm.AddClusterResp, error) {
cmd := "/usr/local/bin/sacctmgr add cluster "
cmd = cmd + req.Name
if len(req.Nodes) != 0 {
cmd = cmd + " Nodes=" + req.Nodes
}
if len(req.Classification) != 0 {
cmd = cmd + " Classification=" + req.Classification
}
if len(req.ControlHost) != 0 {
cmd = cmd + " ControlHost=\"" + req.ControlHost + "\""
}
if len(req.ControlPort) != 0 {
cmd = cmd + " ControlPort=" + req.ControlPort
}
if len(req.CpuCount) != 0 {
cmd = cmd + " CpuCount=" + req.CpuCount
}
if len(req.Dimensions) != 0 {
cmd = cmd + " Dimensions=" + req.Dimensions
}
if len(req.DimSize) != 0 {
cmd = cmd + " DimSize=" + req.DimSize
}
if len(req.Flags) != 0 {
cmd = cmd + " Flags=" + req.Flags
}
if len(req.PluginIdSelect) != 0 {
cmd = cmd + " GrpNodes=" + req.PluginIdSelect
}
if len(req.RpcVersion) != 0 {
cmd = cmd + " RpcVersion=" + req.RpcVersion
}
cmd = cmd + " -i"
result := ssh.ExecCommand(cmd)
resp := pbslurm.AddClusterResp{}
resp.Result = result
return &resp, nil
}
func (slurmStruct SlurmStruct) GetCluster(ctx context.Context, req *pbslurm.GetClusterReq) (*pbslurm.GetClusterResp, error) {
clusterList := GetClusterInfo()
resp := pbslurm.GetClusterResp{}
for _, cluster := range clusterList.ClusterInfoList {
clusterInfoResult := cluster
if strings.Contains(cluster.Name, req.ClusterName) {
resp.ClusterInfos = append(resp.ClusterInfos, &clusterInfoResult)
}
}
return &resp, nil
}

View File

@ -0,0 +1,158 @@
package slurmer
import "C"
import pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
#include<slurm/slurm_errno.h>
#include <memory.h>
#include <malloc.h>
typedef struct slurmdb_qos_rec {
char *description;
uint32_t id;
uint32_t flags;
uint32_t grace_time;
uint64_t grp_cpu_mins;
uint64_t grp_cpu_run_mins;
uint32_t grp_cpus;
uint32_t grp_jobs;
uint32_t grp_mem;
uint32_t grp_nodes;
uint32_t grp_submit_jobs;
uint32_t grp_wall;
uint64_t max_cpu_mins_pj;
uint64_t max_cpu_run_mins_pu;
uint32_t max_cpus_pj;
uint32_t max_cpus_pu;
uint32_t max_jobs_pu;
uint32_t max_nodes_pj;
uint32_t max_nodes_pu;
uint32_t max_submit_jobs_pu;
uint32_t max_wall_pj;
char *name;
bitstr_t *preempt_bitstr;
List preempt_list;
uint16_t preempt_mode;
uint32_t priority;
assoc_mgr_qos_usage_t *usage;
double usage_factor;
double usage_thres;
} slurmdb_qos_rec_a;
typedef struct qos_info {
uint32_t record_count;
slurmdb_qos_rec_t *array;
} qos_info_t;
struct qos_info get_qos_list() {
struct qos_info qosinfo;
slurmdb_qos_cond_t *qos_cond = NULL;
void *conn = slurmdb_connection_get();
List qoslist = slurmdb_qos_get(conn, qos_cond);
uint16_t size = slurm_list_count(qoslist);
qosinfo.record_count = size;
qosinfo.array = malloc(size * sizeof(slurmdb_qos_rec_t));
//slurmdb_qos_rec_t qosArray[size];
slurmdb_qos_rec_t *rec = NULL;
ListIterator itr = slurm_list_iterator_create(qoslist);
int i = 0;
while ((rec = slurm_list_next(itr))) {
qosinfo.array[i] = *rec;
i++;
}
slurmdb_connection_close(&conn);
slurm_list_destroy(qoslist);
return qosinfo;
}
struct slurmdb_qos_rec *qos_from_list(struct qos_info *qos_rec_t, int i) {
return (struct slurmdb_qos_rec *) &qos_rec_t->array[i];
}
*/
import "C"
import (
"context"
)
type Slurmdb_qos_rec struct {
Description string
Id uint32
Flags uint32
GraceTime uint32
GrpCpuMins uint64
GrpCpuRunMins uint64
GrpCpus uint32
GrpJobs uint32
GrpMem uint32
GrpNodes uint32
GrpSubmitJobs uint32
MaxCpuMinsPj uint64
MaxCpuRunMinsPu uint64
MaxCpusPj uint32
MaxCpusPu uint32
MaxJobsPu uint32
MaxNodesPj uint32
MaxNodesPu uint32
MaxSubmitJobsPu uint32
MaxWallPj uint32
Name string
preemptList []string
preempt_mode uint16
priority uint32
usage_factor float64
usage_thres float64
}
type QosInfoMsg struct {
RecordCount uint32
QosList []pbslurm.QosInfo
}
func QosDescriptorConvertCToGo(cStruct *C.struct_slurmdb_qos_rec) pbslurm.QosInfo {
var goStruct pbslurm.QosInfo
goStruct.Name = C.GoString(cStruct.name)
return goStruct
}
func GetQosInfo() QosInfoMsg {
var goQosBuffer QosInfoMsg
cQosBuffer := C.get_qos_list()
goQosBuffer.RecordCount = uint32(cQosBuffer.record_count)
goQosBuffer.QosList = make([]pbslurm.QosInfo, cQosBuffer.record_count, cQosBuffer.record_count)
for i := uint32(0); i < goQosBuffer.RecordCount; i++ {
qos := C.qos_from_list(&cQosBuffer, C.int(i))
goQos := QosDescriptorConvertCToGo(qos)
goQosBuffer.QosList[i] = goQos
}
return goQosBuffer
}
func (slurmStruct SlurmStruct) ListQoss(ctx context.Context, req *pbslurm.ListQossReq) (*pbslurm.ListQossResp, error) {
qosList := GetQosInfo()
resp := pbslurm.ListQossResp{}
for _, qos := range qosList.QosList {
qosInfoResult := qos
//userInfoResult.Name = user.Name
resp.QosInfos = append(resp.QosInfos, &qosInfoResult)
}
return &resp, nil
}

View File

@ -0,0 +1,195 @@
package slurmer
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
#include <memory.h>
#include <malloc.h>
typedef struct user_info_msg {
uint32_t record_count;
slurmdb_user_rec_t *user_array;
} user_info_msg_t;
typedef struct slurmdb_user_rec{
uint16_t admin_level;
List assoc_list;
List coord_accts;
char *default_acct;
char *default_wckey;
char *name;
char *old_name;
uint32_t uid;
List wckey_list;
} slurmdb_user_rec_pcm;
struct user_info_msg get_user_info() {
struct user_info_msg userinfo;
List userList = NULL;
slurmdb_user_cond_t *user_cond = NULL;
void *db_conn;
db_conn = slurmdb_connection_get();
userList = slurmdb_users_get(db_conn, user_cond);
slurmdb_connection_close(&db_conn);
slurmdb_user_rec_t *rec = NULL;
ListIterator itr = slurm_list_iterator_create(userList);
int i = 0;
uint32_t length;
length = slurm_list_count(userList);
userinfo.record_count = length;
userinfo.user_array = malloc(length * sizeof(slurmdb_user_rec_t));
while ((rec = slurm_list_next(itr))) {
userinfo.user_array[i] = *rec;
i++;
}
return userinfo;
}
struct slurmdb_user_rec *user_from_list(struct user_info_msg *list, int i) {
return (struct slurmdb_user_rec *) &list->user_array[i];
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
ssh "code.gitlink.org.cn/JCCE/PCM.git/common/ssh"
"context"
"strings"
)
type UserInfoMsg struct {
LastUpdate int64
RecordCount uint32
UserInfoList []pbslurm.UserInfo
}
func UserDescriptorConvertCToGo(cStruct *C.struct_slurmdb_user_rec) pbslurm.UserInfo {
var goStruct pbslurm.UserInfo
goStruct.Name = C.GoString(cStruct.name)
return goStruct
}
func GetUserInfo() UserInfoMsg {
var goUserBuffer UserInfoMsg
cUserBuffer := C.get_user_info()
goUserBuffer.RecordCount = uint32(cUserBuffer.record_count)
goUserBuffer.UserInfoList = make([]pbslurm.UserInfo, cUserBuffer.record_count, cUserBuffer.record_count)
for i := uint32(0); i < goUserBuffer.RecordCount; i++ {
user := C.user_from_list(&cUserBuffer, C.int(i))
goUser := UserDescriptorConvertCToGo(user)
goUserBuffer.UserInfoList[i] = goUser
}
return goUserBuffer
}
func (slurmStruct SlurmStruct) ListUsers(ctx context.Context, req *pbslurm.ListUsersReq) (*pbslurm.ListUsersResp, error) {
userList := GetUserInfo()
resp := pbslurm.ListUsersResp{}
for _, user := range userList.UserInfoList {
userInfoResult := user
//userInfoResult.Name = user.Name
resp.UserInfos = append(resp.UserInfos, &userInfoResult)
}
return &resp, nil
}
func (slurmStruct SlurmStruct) GetUser(ctx context.Context, req *pbslurm.GetUserReq) (*pbslurm.GetUserResp, error) {
userList := GetUserInfo()
resp := pbslurm.GetUserResp{}
for _, user := range userList.UserInfoList {
if strings.Contains(user.Name, req.UserName) {
userInfoResult := user
resp.UserInfo = append(resp.UserInfo, &userInfoResult)
}
}
return &resp, nil
}
func (slurmStruct SlurmStruct) AddUser(ctx context.Context, req *pbslurm.AddUserReq) (*pbslurm.AddUserResp, error) {
cmd := "/usr/local/bin/sacctmgr add user "
cmd = cmd + req.Names
if len(req.Accounts) != 0 {
cmd = cmd + " Accounts=" + req.Accounts
}
if len(req.AdminLevel) != 0 {
cmd = cmd + " AdminLevel=" + req.AdminLevel
}
if len(req.Clusters) != 0 {
cmd = cmd + " Clusters=" + req.Clusters
}
if len(req.DefaultAccount) != 0 {
cmd = cmd + " DefaultAccount=" + req.DefaultAccount
}
if len(req.DefaultQos) != 0 {
cmd = cmd + " DefaultQOS=" + req.DefaultQos
}
if len(req.DefaultWckey) != 0 {
cmd = cmd + " DefaultWCKey=" + req.DefaultWckey
}
if len(req.FairShare) != 0 {
cmd = cmd + " Fairshare=" + req.FairShare
}
if len(req.MaxCpuMins) != 0 {
cmd = cmd + " MaxCPUMins=" + req.MaxCpuMins
}
if len(req.MaxCpus) != 0 {
cmd = cmd + " MaxCPUs=" + req.MaxCpus
}
if len(req.MaxJobs) != 0 {
cmd = cmd + " MaxJobs=" + req.MaxJobs
}
if len(req.MaxNodes) != 0 {
cmd = cmd + " MaxNodes=" + req.MaxNodes
}
if len(req.MaxSubmitJobs) != 0 {
cmd = cmd + " MaxSubmitJobs=" + req.MaxSubmitJobs
}
if len(req.MaxWall) != 0 {
cmd = cmd + " MaxWall=" + req.MaxWall
}
if len(req.Partitions) != 0 {
cmd = cmd + " Partitions=" + req.Partitions
}
if len(req.QosLevel) != 0 {
cmd = cmd + " QosLevel=" + req.QosLevel
}
cmd = cmd + " -i"
result := ssh.ExecCommand(cmd)
resp := pbslurm.AddUserResp{}
resp.Result = result
return &resp, nil
}
func (slurmStruct SlurmStruct) DeleteUser(ctx context.Context, req *pbslurm.DeleteUserReq) (*pbslurm.DeleteUserResp, error) {
cmd := "/usr/local/bin/sacctmgr delete user "
cmd = cmd + req.Names
cmd = cmd + " -i"
result := ssh.ExecCommand(cmd)
resp := pbslurm.DeleteUserResp{}
resp.Result = result
return &resp, nil
}

View File

@ -0,0 +1,119 @@
package slurmer
/*
#cgo LDFLAGS: -lslurmdb
#include <stdio.h>
#include <slurm/slurm.h>
#include <slurm/slurmdb.h>
#include <memory.h>
#include <malloc.h>
typedef struct wckey_info_msg {
uint32_t record_count;
slurmdb_wckey_rec_t *wckey_array;
} wckey_info_msg_t;
typedef struct slurmdb_wckey_rec{
uint16_t admin_level;
List assoc_list;
List coord_accts;
char *default_acct;
char *default_wckey;
char *name;
char *old_name;
uint32_t uid;
List wckey_list;
} slurmdb_wckey_rec_pcm;
struct wckey_info_msg get_wckey_info() {
struct wckey_info_msg wckeyinfo;
List wckeyList = NULL;
slurmdb_wckey_cond_t *wckey_cond = NULL;
void *db_conn;
db_conn = slurmdb_connection_get();
wckeyList = slurmdb_wckeys_get(db_conn, wckey_cond);
slurmdb_connection_close(&db_conn);
slurmdb_wckey_rec_t *rec = NULL;
ListIterator itr = slurm_list_iterator_create(wckeyList);
int i = 0;
uint32_t length;
length = slurm_list_count(wckeyList);
wckeyinfo.record_count = length;
wckeyinfo.wckey_array = malloc(length * sizeof(slurmdb_wckey_rec_t));
while ((rec = slurm_list_next(itr))) {
wckeyinfo.wckey_array[i] = *rec;
i++;
}
return wckeyinfo;
}
struct slurmdb_wckey_rec *wckey_from_list(struct wckey_info_msg *list, int i) {
return (struct slurmdb_wckey_rec *) &list->wckey_array[i];
}
*/
import "C"
import (
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
)
type WckeyInfoMsg struct {
LastUpdate int64
RecordCount uint32
WckeyInfoList []pbslurm.WckeyInfo
}
func WckeyDescriptorConvertCToGo(cStruct *C.struct_slurmdb_wckey_rec) pbslurm.WckeyInfo {
var goStruct pbslurm.WckeyInfo
goStruct.Name = C.GoString(cStruct.name)
return goStruct
}
func GetWckeyInfo() WckeyInfoMsg {
var goWckeyBuffer WckeyInfoMsg
cWckeyBuffer := C.get_wckey_info()
goWckeyBuffer.RecordCount = uint32(cWckeyBuffer.record_count)
goWckeyBuffer.WckeyInfoList = make([]pbslurm.WckeyInfo, cWckeyBuffer.record_count, cWckeyBuffer.record_count)
for i := uint32(0); i < goWckeyBuffer.RecordCount; i++ {
wckey := C.wckey_from_list(&cWckeyBuffer, C.int(i))
goWckey := WckeyDescriptorConvertCToGo(wckey)
goWckeyBuffer.WckeyInfoList[i] = goWckey
}
return goWckeyBuffer
}
func (slurmStruct SlurmStruct) ListWckeys(ctx context.Context, req *pbslurm.ListWckeysReq) (*pbslurm.ListWckeysResp, error) {
wckeyList := GetWckeyInfo()
resp := pbslurm.ListWckeysResp{}
for _, wckey := range wckeyList.WckeyInfoList {
wckeyInfoResult := wckey
//wckeyInfoResult.Name = wckey.Name
resp.WckeyInfos = append(resp.WckeyInfos, &wckeyInfoResult)
}
return &resp, nil
}
func (slurmStruct SlurmStruct) GetWckey(ctx context.Context, req *pbslurm.GetWckeyReq) (*pbslurm.GetWckeyResp, error) {
wckeyList := GetWckeyInfo()
resp := pbslurm.GetWckeyResp{}
for _, wckey := range wckeyList.WckeyInfoList {
//wckeyInfoResult := pbslurm.WckeyInfo{}
//todo add filter logic
wckeyInfoResult := wckey
//wckeyInfoResult.Name = wckey.Name
resp.WckeyInfo = append(resp.WckeyInfo, &wckeyInfoResult)
}
return &resp, nil
}

View File

@ -6,14 +6,38 @@ import (
)
type Slurmer interface {
ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (resp *pbslurm.ListNodesResp, err error) //list slurm nodes
ListUsers(ctx context.Context, req *pbslurm.ListUsersReq) (resp *pbslurm.ListUsersResp, err error) //list slurm users
GetUser(ctx context.Context, req *pbslurm.GetUserReq) (resp *pbslurm.GetUserResp, err error) //get specific slurm user
AddUser(ctx context.Context, req *pbslurm.AddUserReq) (resp *pbslurm.AddUserResp, err error) //add slurm user
DeleteUser(ctx context.Context, req *pbslurm.DeleteUserReq) (resp *pbslurm.DeleteUserResp, err error) //delete slurm user
ListAssociations(ctx context.Context, req *pbslurm.ListAssociationsReq) (resp *pbslurm.ListAssociationsResp, err error) //list slurm associations
GetAssociation(ctx context.Context, req *pbslurm.GetAssociationReq) (resp *pbslurm.GetAssociationResp, err error) //get specific slurm associations
ListAccounts(ctx context.Context, req *pbslurm.ListAccountsReq) (resp *pbslurm.ListAccountsResp, err error) //list slurm accounts
GetAccount(ctx context.Context, req *pbslurm.GetAccountReq) (resp *pbslurm.GetAccountResp, err error) //get specific slurm account
AddAccount(ctx context.Context, req *pbslurm.AddAccountReq) (resp *pbslurm.AddAccountResp, err error) //add slurm account
DeleteAccount(ctx context.Context, req *pbslurm.DeleteAccountReq) (resp *pbslurm.DeleteAccountResp, err error) //delete slurm account
ListQoss(ctx context.Context, req *pbslurm.ListQossReq) (resp *pbslurm.ListQossResp, err error) //list slurm qoss
ListWckeys(ctx context.Context, req *pbslurm.ListWckeysReq) (resp *pbslurm.ListWckeysResp, err error) //list slurm wckeys
GetWckey(ctx context.Context, req *pbslurm.GetWckeyReq) (resp *pbslurm.GetWckeyResp, err error) //list slurm wckeys
ListClusters(ctx context.Context, req *pbslurm.ListClustersReq) (resp *pbslurm.ListClustersResp, err error)
GetCluster(ctx context.Context, req *pbslurm.GetClusterReq) (resp *pbslurm.GetClusterResp, err error)
DeleteCluster(ctx context.Context, req *pbslurm.DeleteClusterReq) (resp *pbslurm.DeleteClusterResp, err error)
AddCluster(ctx context.Context, req *pbslurm.AddClusterReq) (resp *pbslurm.AddClusterResp, err error)
ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (resp *pbslurm.ListNodesResp, err error)
GetNode(ctx context.Context, req *pbslurm.GetNodeReq) (resp *pbslurm.GetNodeResp, err error)
ListPartitions(ctx context.Context, req *pbslurm.ListPartitionsReq) (resp *pbslurm.ListPartitionsResp, err error)
GetPartition(ctx context.Context, req *pbslurm.GetPartitionReq) (resp *pbslurm.GetPartitionResp, err error)
ListReservations(ctx context.Context, req *pbslurm.ListReservationsReq) (resp *pbslurm.ListReservationsResp, err error)
GetReservation(ctx context.Context, req *pbslurm.GetReservationReq) (resp *pbslurm.GetReservationResp, err error)
}
func SelectSlurmVersion(slurmVersion int32) (slurmer Slurmer, err error) {
func SelectSlurmVersion(slurmVersion pbslurm.SlurmVersion) (slurmer Slurmer, err error) {
switch slurmVersion {
case int32(pbslurm.SlurmVersion_tianhe):
case pbslurm.SlurmVersion_tianhe:
return SelectTianhe()
case pbslurm.SlurmVersion_shenwei:
print("no adaptor for shenwei right now")
}
return
}

View File

@ -1,11 +1,5 @@
package slurmer
import (
nodeinfo "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/nodeinfo"
pbslurm "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/gen/idl"
"context"
)
type SlurmStruct struct {
version string
}
@ -16,20 +10,3 @@ func SelectTianhe() (Slurmer, error) {
version: "2.6.9.1",
}, nil
}
func (slurmStruct SlurmStruct) ListNodes(ctx context.Context, req *pbslurm.ListNodesReq) (*pbslurm.ListNodesResp, error) {
nodeList := nodeinfo.Get_all_nodes()
var resp = pbslurm.ListNodesResp{}
for _, node := range nodeList.Node_list {
nodeInfoResult := pbslurm.NodeInfo{}
nodeInfoResult.Cpus = int32(node.Cpus)
nodeInfoResult.Boards = int32(node.Boards)
nodeInfoResult.RealMemory = int32(node.Real_memory)
nodeInfoResult.Sockets = int32(node.Sockets)
nodeInfoResult.Threads = int32(node.Threads)
resp.NodeInfos = append(resp.NodeInfos, &nodeInfoResult)
}
return &resp, nil
}

View File

@ -37,8 +37,8 @@ const (
var (
// ManagementResourceTypes lists the types we use the management client for
ManagementResourceTypes = []string{"cluster", "node", "project"}
// ProjectResourceTypes lists the types we use the cluster client for
ManagementResourceTypes = []string{"user", "node", "project"}
// ProjectResourceTypes lists the types we use the user client for
ProjectResourceTypes = []string{"secret", "namespacedSecret", "workload"}
// ClusterResourceTypes lists the types we use the project client for
ClusterResourceTypes = []string{"persistentVolume", "storageClass", "namespace"}
@ -322,7 +322,7 @@ func parseClusterAndProjectID(id string) (string, string, error) {
return "", "", fmt.Errorf("unable to extract clusterid and projectid from [%s]", id)
}
// getClusterNames maps cluster ID to name and defaults to ID if name is blank
// getClusterNames maps user ID to name and defaults to ID if name is blank
func getClusterNames(ctx *cli.Context, c *cliclient.MasterClient) (map[string]string, error) {
clusterNames := make(map[string]string)
clusterCollection, err := c.ManagementClient.Cluster.List(defaultListOpts(ctx))

70
common/ssh/ssh_util.go Normal file
View File

@ -0,0 +1,70 @@
package ssh
import (
"fmt"
gossh "golang.org/x/crypto/ssh"
"io"
"log"
"net"
"os/exec"
)
// Cli 连接信息
type Cli struct {
User string
Pwd string
Addr string
Client *gossh.Client
Session *gossh.Session
LastResult string
}
// Connect 连接对象
func (c *Cli) Connect() (*Cli, error) {
config := &gossh.ClientConfig{}
config.SetDefaults()
config.User = c.User
config.Auth = []gossh.AuthMethod{gossh.Password(c.Pwd)}
config.HostKeyCallback = func(hostname string, remote net.Addr, key gossh.PublicKey) error { return nil }
client, err := gossh.Dial("tcp", c.Addr, config)
if nil != err {
return c, err
}
c.Client = client
return c, nil
}
// Run 执行shell
func (c Cli) Run(shell string) (string, error) {
if c.Client == nil {
if _, err := c.Connect(); err != nil {
return "", err
}
}
session, err := c.Client.NewSession()
if err != nil {
return "", err
}
// 关闭会话
defer session.Close()
buf, err := session.CombinedOutput(shell)
c.LastResult = string(buf)
return c.LastResult, err
}
func ExecCommand(strCommand string) string {
cmd := exec.Command("/bin/bash", "-c", strCommand)
stdout, _ := cmd.StdoutPipe()
errReader, _ := cmd.StderrPipe()
defer stdout.Close()
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
cmdReader := io.MultiReader(stdout, errReader)
outBytes, _ := io.ReadAll(cmdReader)
if err := cmd.Wait(); err != nil {
fmt.Println("err", err.Error())
}
return string(outBytes)
}

View File

@ -1,19 +1,20 @@
package main
import "slurm/partitioninfo"
import "fmt"
import (
partition_info "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/partitioninfo"
"fmt"
)
func main(){
func main() {
partition_list := partition_info.Get_partitions()
fmt.Printf("Found %d partions \n", partition_list.Record_count)
/* a little bit nicer */
/* a little bit nicer */
fmt.Printf("Name\t Nodes\t\t\t Max_time(min)\t\t Tres\n")
fmt.Printf("________________________________________\n")
for i := range partition_list.Partition_list {
partition := partition_list.Partition_list[i]
fmt.Printf("%s\t %s\t %d\t %d\n", partition.Name, partition.Nodes, partition.Max_time, partition.Node_inx )
fmt.Printf("%s\t %s\t %d\t %d\n", partition.Name, partition.Nodes, partition.Max_time, partition.Node_inx)
}

View File

@ -1,127 +1,128 @@
package main
import "slurm/submitjob"
import "slurm"
import "os"
import "strconv"
import "fmt"
import "os/exec"
import "path/filepath"
import "slurm/jobinfo"
import "time"
import "os/user"
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func build_container(file_name,container_name string){
cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build",container_name, file_name)
fmt.Print("Now build new container")
fmt.Printf("%s\n", cmd.String())
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
fmt.Printf("error in creating container %s \n", err);
//
//
//import "slurm/submitjob"
//import "slurm"
//import "os"
//import "strconv"
//import "fmt"
//import "os/exec"
//import "path/filepath"
//import "slurm/jobinfo"
//import "time"
//import "os/user"
//
//
//
//func fileExists(filename string) bool {
// info, err := os.Stat(filename)
// if os.IsNotExist(err) {
// return false
// }
// return !info.IsDir()
//}
//func build_container(file_name,container_name string){
//
// cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build",container_name, file_name)
// fmt.Print("Now build new container")
// fmt.Printf("%s\n", cmd.String())
// stdoutStderr, err := cmd.CombinedOutput()
// if err != nil {
// fmt.Printf("error in creating container %s \n", err);
//// return
// }
// fmt.Printf("%s\n", stdoutStderr)
//}
//
//func main(){
// job_desc := submit_job.Job_descriptor{}
//
// dir, _ := os.Getwd()
// container := filepath.Join(dir, "mpi_container.img")
// definition := filepath.Join(dir, "mpi_container.def")
// if !fileExists(container){
// build_container(definition,container)
// }
//
// if !fileExists(container){
// return
}
fmt.Printf("%s\n", stdoutStderr)
}
func main(){
job_desc := submit_job.Job_descriptor{}
dir, _ := os.Getwd()
container := filepath.Join(dir, "mpi_container.img")
definition := filepath.Join(dir, "mpi_container.def")
if !fileExists(container){
build_container(definition,container)
}
if !fileExists(container){
return
}
/* use Cmd to create our script */
job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n srun hostname \n"
cmd := exec.Command( "/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec",container, "/opt/mpi_pingpong" )
job_desc.Script+= cmd.String()
fmt.Printf("cmd %s\n", job_desc.Script)
user, _:= user.Current()
userid , _ := strconv.Atoi(user.Uid)
job_desc.User_id= uint32(userid)
groupid , _ := strconv.Atoi(user.Gid)
job_desc.Group_id= uint32(groupid)
job_desc.Name = "flex_mpi_job"
job_desc.Partition="long"
job_desc.Time_limit = uint32(60)
job_desc.Ntasks_per_node = uint16(1)
job_desc.Num_tasks = uint32(2)
job_desc.Std_out = ("./%j-out.txt")
job_desc.Std_err = ("./%j-err.txt")
job_desc.Work_dir = dir
time.Sleep(3 * time.Second)
answer := submit_job.Submit_job(&job_desc)
if(answer.Error_code != 0) {
msg := slurm.GetErrorString(answer.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
fmt.Printf("Submitted Job %d\n", answer.Job_id)
time.Sleep(5 * time.Second)
job_list := job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job := job_list.Job_list[0]
fmt.Printf("job %d is %s\n", answer.Job_id, job.Job_stateS)
state := job.Job_stateS
if state == "Pending" {
fmt.Printf("Move job %d to another partition \n", answer.Job_id)
var ops submit_job.Update_job_options
ops.Qos = "shortjobs"
ops.Partition = "short"
err2 := submit_job.Update_job(ops, uint32(answer.Job_id))
if err2!= uint32(0) {
fmt.Printf("error %s \n", slurm.GetErrorString(err2))
}
}
for state == "Pending" || state == "Running" {
time.Sleep(2 * time.Second)
job_list = job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job = job_list.Job_list[0]
state = job.Job_stateS
fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS)
}
fmt.Printf("Total runtime Job %d %s\n",job.Job_id, job_info.Get_job_runtime(job).String() )
}
// }
// /* use Cmd to create our script */
//
// job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n srun hostname \n"
// cmd := exec.Command( "/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec",container, "/opt/mpi_pingpong" )
// job_desc.Script+= cmd.String()
// fmt.Printf("cmd %s\n", job_desc.Script)
// user, _:= user.Current()
// userid , _ := strconv.Atoi(user.Uid)
// job_desc.User_id= uint32(userid)
// groupid , _ := strconv.Atoi(user.Gid)
//
// job_desc.Group_id= uint32(groupid)
// job_desc.Name = "flex_mpi_job"
// job_desc.Partition="long"
// job_desc.Time_limit = uint32(60)
// job_desc.Ntasks_per_node = uint16(1)
// job_desc.Num_tasks = uint32(2)
// job_desc.Std_out = ("./%j-out.txt")
// job_desc.Std_err = ("./%j-err.txt")
// job_desc.Work_dir = dir
//
// time.Sleep(3 * time.Second)
// answer := submit_job.Submit_job(&job_desc)
// if(answer.Error_code != 0) {
// msg := slurm.GetErrorString(answer.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
// }
// fmt.Printf("Submitted Job %d\n", answer.Job_id)
//
// time.Sleep(5 * time.Second)
//
// job_list := job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job := job_list.Job_list[0]
//
// fmt.Printf("job %d is %s\n", answer.Job_id, job.Job_stateS)
// state := job.Job_stateS
// if state == "Pending" {
// fmt.Printf("Move job %d to another partition \n", answer.Job_id)
// var ops submit_job.Update_job_options
//
// ops.Qos = "shortjobs"
// ops.Partition = "short"
// err2 := submit_job.Update_job(ops, uint32(answer.Job_id))
// if err2!= uint32(0) {
// fmt.Printf("error %s \n", slurm.GetErrorString(err2))
// }
// }
//
// for state == "Pending" || state == "Running" {
// time.Sleep(2 * time.Second)
// job_list = job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// state = job.Job_stateS
//
// fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS)
//
//
// }
//
// fmt.Printf("Total runtime Job %d %s\n",job.Job_id, job_info.Get_job_runtime(job).String() )
//}
//
//
//

View File

@ -1,111 +1,112 @@
package main
import "slurm/submitjob"
import "slurm"
import "os/user"
import "os"
import "strconv"
import "fmt"
import "os/exec"
import "path/filepath"
import "slurm/jobinfo"
import "time"
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func build_container(file_name,container_name string){
cmd := exec.Command("sudo","/usr/local/bin/singularity", "build",container_name, file_name)
fmt.Print("Now build new container")
fmt.Printf("%s\n", cmd.String())
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
fmt.Printf("error in creating container %s \n", err)
fmt.Printf("%s\n", stdoutStderr)
//
//import "slurm/submitjob"
//import "slurm"
//import "os/user"
//import "os"
//import "strconv"
//import "fmt"
//import "os/exec"
//import "path/filepath"
//import "slurm/jobinfo"
//import "time"
//
//
//func fileExists(filename string) bool {
// info, err := os.Stat(filename)
// if os.IsNotExist(err) {
// return false
// }
// return !info.IsDir()
//}
//func build_container(file_name,container_name string){
//
// cmd := exec.Command("sudo","/usr/local/bin/singularity", "build",container_name, file_name)
// fmt.Print("Now build new container")
// fmt.Printf("%s\n", cmd.String())
// stdoutStderr, err := cmd.CombinedOutput()
// if err != nil {
// fmt.Printf("error in creating container %s \n", err)
//
// fmt.Printf("%s\n", stdoutStderr)
//// return
// }
// fmt.Printf("%s\n", stdoutStderr)
//}
//
//func main(){
// job_desc := submit_job.Job_descriptor{}
//
// dir, _ := os.Getwd()
// container := filepath.Join(dir, "mpi_container.img")
// definition := filepath.Join(dir, "mpi_container.def")
// if !fileExists(container){
// build_container(definition,container)
// }
//
// if !fileExists(container){
// return
}
fmt.Printf("%s\n", stdoutStderr)
}
func main(){
job_desc := submit_job.Job_descriptor{}
dir, _ := os.Getwd()
container := filepath.Join(dir, "mpi_container.img")
definition := filepath.Join(dir, "mpi_container.def")
if !fileExists(container){
build_container(definition,container)
}
if !fileExists(container){
return
}
/* use Cmd to create our script */
job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
cmd := exec.Command( "/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec",container, "/opt/mpi_pingpong" )
job_desc.Script+= cmd.String()
fmt.Printf("cmd %s\n", job_desc.Script)
user, _:= user.Current()
userid , _ := strconv.Atoi(user.Uid)
job_desc.User_id= uint32(userid)
groupid , _ := strconv.Atoi(user.Gid)
job_desc.Group_id= uint32(groupid)
job_desc.Name = "mpi_job"
job_desc.Partition="long"
job_desc.Time_limit = uint32(60)
job_desc.Min_nodes =uint32(2)
job_desc.Num_tasks = uint32(2)
job_desc.Std_out = ("./%j-out.txt")
job_desc.Std_err = ("./%j-err.txt")
job_desc.Work_dir = dir
answer := submit_job.Submit_job(&job_desc)
if(answer.Error_code != 0) {
msg := slurm.GetErrorString(answer.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
fmt.Printf("Submitted Job %d\n", answer.Job_id)
job_list := job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job := job_list.Job_list[0]
fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS)
state := job.Job_stateS
for state == "Pending" || state == "Running" {
time.Sleep(2 * time.Second)
job_list = job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job = job_list.Job_list[0]
state = job.Job_stateS
fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS)
}
fmt.Printf("Total runtime Job %d: %s\n",job.Job_id, job_info.Get_job_runtime(job).String() )
}
// }
// /* use Cmd to create our script */
//
// job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
// cmd := exec.Command( "/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec",container, "/opt/mpi_pingpong" )
// job_desc.Script+= cmd.String()
// fmt.Printf("cmd %s\n", job_desc.Script)
// user, _:= user.Current()
// userid , _ := strconv.Atoi(user.Uid)
// job_desc.User_id= uint32(userid)
// groupid , _ := strconv.Atoi(user.Gid)
//
// job_desc.Group_id= uint32(groupid)
// job_desc.Name = "mpi_job"
// job_desc.Partition="long"
// job_desc.Time_limit = uint32(60)
// job_desc.Min_nodes =uint32(2)
// job_desc.Num_tasks = uint32(2)
// job_desc.Std_out = ("./%j-out.txt")
// job_desc.Std_err = ("./%j-err.txt")
// job_desc.Work_dir = dir
//
// answer := submit_job.Submit_job(&job_desc)
// if(answer.Error_code != 0) {
// msg := slurm.GetErrorString(answer.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
// }
// fmt.Printf("Submitted Job %d\n", answer.Job_id)
//
//
// job_list := job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job := job_list.Job_list[0]
//
// fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS)
// state := job.Job_stateS
// for state == "Pending" || state == "Running" {
// time.Sleep(2 * time.Second)
// job_list = job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// state = job.Job_stateS
//
// fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS)
//
//
// }
//
// fmt.Printf("Total runtime Job %d: %s\n",job.Job_id, job_info.Get_job_runtime(job).String() )
//
//}

View File

@ -1,162 +1,162 @@
package main
import "slurm/submitjob"
import "slurm"
import "os/user"
import "os"
import "strconv"
import "fmt"
import "os/exec"
import "path/filepath"
import "slurm/jobinfo"
import "time"
func fileExists(filename string) bool {
info, err := os.Stat(filename)
if os.IsNotExist(err) {
return false
}
return !info.IsDir()
}
func build_container(file_name,container_name string){
cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build",container_name, file_name)
fmt.Print("Now build new container")
fmt.Printf("%s\n", cmd.String())
stdoutStderr, err := cmd.CombinedOutput()
if err != nil {
fmt.Printf("error in creating container %s \n", err)
fmt.Printf("%s\n", stdoutStderr)
//import "slurm/submitjob"
//import "slurm"
//import "os/user"
//import "os"
//import "strconv"
//import "fmt"
//import "os/exec"
//import "path/filepath"
//import "slurm/jobinfo"
//import "time"
//
//
//func fileExists(filename string) bool {
// info, err := os.Stat(filename)
// if os.IsNotExist(err) {
// return false
// }
// return !info.IsDir()
//}
//func build_container(file_name,container_name string){
//
// cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build",container_name, file_name)
// fmt.Print("Now build new container")
// fmt.Printf("%s\n", cmd.String())
// stdoutStderr, err := cmd.CombinedOutput()
// if err != nil {
// fmt.Printf("error in creating container %s \n", err)
//
// fmt.Printf("%s\n", stdoutStderr)
//// return
// }
// fmt.Printf("%s\n", stdoutStderr)
//}
//
//func main(){
// job_desc := submit_job.Job_descriptor{}
//
// dir, _ := os.Getwd()
// container := filepath.Join(dir, "openmp_container.img")
// definition := filepath.Join(dir, "openmp_container.def")
// if !fileExists(container){
// build_container(definition,container)
// }
//
// if !fileExists(container){
// return
}
fmt.Printf("%s\n", stdoutStderr)
}
func main(){
job_desc := submit_job.Job_descriptor{}
dir, _ := os.Getwd()
container := filepath.Join(dir, "openmp_container.img")
definition := filepath.Join(dir, "openmp_container.def")
if !fileExists(container){
build_container(definition,container)
}
if !fileExists(container){
return
}
/* use Cmd to create our script */
job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
job_desc.Script+= "export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE\n"
cmd := exec.Command( "/usr/local/bin/singularity", "exec",container, "/opt/openmp_example" )
job_desc.Script+= cmd.String()
fmt.Printf("cmd %s\n", job_desc.Script)
user, _:= user.Current()
userid , _ := strconv.Atoi(user.Uid)
job_desc.User_id= uint32(userid)
groupid , _ := strconv.Atoi(user.Gid)
job_desc.Group_id= uint32(groupid)
job_desc.Name = "test_job"
job_desc.Partition="long"
job_desc.Time_limit = uint32(60)
job_desc.Min_nodes =uint32(1)
job_desc.Num_tasks = uint32(1)
job_desc.Cpus_per_task = uint16(2)
job_desc.Std_out = ("./%j-out.txt")
job_desc.Std_err = ("./%j-err.txt")
job_desc.Work_dir = dir
answer := submit_job.Submit_job(&job_desc)
if(answer.Error_code != 0) {
msg := slurm.GetErrorString(answer.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
fmt.Printf("Submitted Job %d\n", answer.Job_id)
/*Now, we submit the same jon again, ut with some oversubsciption */
job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
job_desc.Script+= "export OMP_NUM_THREADS=4\n"
job_desc.Script+= cmd.String()
fmt.Printf("cmd %s\n", job_desc.Script)
answer2 := submit_job.Submit_job(&job_desc)
if(answer2.Error_code != 0) {
msg := slurm.GetErrorString(answer.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
fmt.Printf("Submitted Job %d\n", answer2.Job_id)
job_list := job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job := job_list.Job_list[0]
fmt.Printf("job is %s\n",job.Job_stateS)
state := job.Job_stateS
for state == "Pending" || state == "Running" {
time.Sleep(2 * time.Second)
job_list = job_info.Get_job(answer.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job = job_list.Job_list[0]
state = job.Job_stateS
fmt.Printf("job is %s\n",job.Job_stateS)
}
fmt.Printf("Total runtime first job %s\n",job_info.Get_job_runtime(job).String() )
/*wait for second job */
job_list = job_info.Get_job(answer2.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job = job_list.Job_list[0]
fmt.Printf("job is %s\n",job.Job_stateS)
state = job.Job_stateS
for state == "Pending" || state == "Running" {
time.Sleep(2 * time.Second)
job_list = job_info.Get_job(answer2.Job_id)
if job_list.Error_code != 0 {
msg := slurm.GetErrorString(job_list.Error_code)
fmt.Printf("Error: %s\n" ,msg)
return
}
job = job_list.Job_list[0]
state = job.Job_stateS
fmt.Printf("job is %s\n",job.Job_stateS)
}
fmt.Printf("Total runtime second job %s\n",job_info.Get_job_runtime(job).String() )
}
// }
// /* use Cmd to create our script */
//
// job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
// job_desc.Script+= "export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE\n"
// cmd := exec.Command( "/usr/local/bin/singularity", "exec",container, "/opt/openmp_example" )
//
// job_desc.Script+= cmd.String()
// fmt.Printf("cmd %s\n", job_desc.Script)
// user, _:= user.Current()
// userid , _ := strconv.Atoi(user.Uid)
// job_desc.User_id= uint32(userid)
// groupid , _ := strconv.Atoi(user.Gid)
//
// job_desc.Group_id= uint32(groupid)
// job_desc.Name = "test_job"
// job_desc.Partition="long"
// job_desc.Time_limit = uint32(60)
// job_desc.Min_nodes =uint32(1)
// job_desc.Num_tasks = uint32(1)
//
// job_desc.Cpus_per_task = uint16(2)
// job_desc.Std_out = ("./%j-out.txt")
// job_desc.Std_err = ("./%j-err.txt")
// job_desc.Work_dir = dir
//
// answer := submit_job.Submit_job(&job_desc)
// if(answer.Error_code != 0) {
// msg := slurm.GetErrorString(answer.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
// }
// fmt.Printf("Submitted Job %d\n", answer.Job_id)
//
// /*Now, we submit the same jon again, ut with some oversubsciption */
// job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n"
// job_desc.Script+= "export OMP_NUM_THREADS=4\n"
//
// job_desc.Script+= cmd.String()
// fmt.Printf("cmd %s\n", job_desc.Script)
// answer2 := submit_job.Submit_job(&job_desc)
// if(answer2.Error_code != 0) {
// msg := slurm.GetErrorString(answer.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
// }
// fmt.Printf("Submitted Job %d\n", answer2.Job_id)
//
//
//
// job_list := job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job := job_list.Job_list[0]
//
// fmt.Printf("job is %s\n",job.Job_stateS)
// state := job.Job_stateS
// for state == "Pending" || state == "Running" {
// time.Sleep(2 * time.Second)
// job_list = job_info.Get_job(answer.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// state = job.Job_stateS
//
// fmt.Printf("job is %s\n",job.Job_stateS)
//
//
// }
//
// fmt.Printf("Total runtime first job %s\n",job_info.Get_job_runtime(job).String() )
// /*wait for second job */
// job_list = job_info.Get_job(answer2.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// fmt.Printf("job is %s\n",job.Job_stateS)
// state = job.Job_stateS
// for state == "Pending" || state == "Running" {
// time.Sleep(2 * time.Second)
// job_list = job_info.Get_job(answer2.Job_id)
// if job_list.Error_code != 0 {
// msg := slurm.GetErrorString(job_list.Error_code)
// fmt.Printf("Error: %s\n" ,msg)
// return
//
// }
// job = job_list.Job_list[0]
//
// state = job.Job_stateS
//
// fmt.Printf("job is %s\n",job.Job_stateS)
//
//
// }
//
//
// fmt.Printf("Total runtime second job %s\n",job_info.Get_job_runtime(job).String() )
//
//}

View File

@ -1,20 +1,22 @@
package main
import "slurm/submitjob"
import "slurm"
import (
"code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm"
submit_job "code.gitlink.org.cn/JCCE/PCM.git/adaptor/pcm_slurm/cgo_tianhe/src/slurm/submitjob"
)
import "os"
import "strconv"
import "fmt"
func main(){
if len(os.Args)<4 {
func main() {
if len(os.Args) < 4 {
fmt.Printf("Synthax specify JobID, qos and partition \n")
return
}
var ops submit_job.Update_job_options
id,err := strconv.Atoi(os.Args[1])
if err != nil {
fmt.Printf("Invalid job id (no int) %s\n", os.Args[1] )
id, err := strconv.Atoi(os.Args[1])
if err != nil {
fmt.Printf("Invalid job id (no int) %s\n", os.Args[1])
return
}
@ -22,7 +24,7 @@ func main(){
ops.Partition = os.Args[3]
err2 := submit_job.Update_job(ops, uint32(id))
if err2!= uint32(0) {
if err2 != uint32(0) {
fmt.Printf("error %s \n", slurm.GetErrorString(err2))
}
}

View File

@ -15,14 +15,11 @@
"protobufAny": {
"type": "object",
"properties": {
"typeUrl": {
"@type": {
"type": "string"
},
"value": {
"type": "string",
"format": "byte"
}
}
},
"additionalProperties": {}
},
"rpcStatus": {
"type": "object",