diff --git a/adaptor/slurm/extra/slurm_extra.go b/adaptor/slurm/extra/slurm_extra.go new file mode 100644 index 00000000..667107db --- /dev/null +++ b/adaptor/slurm/extra/slurm_extra.go @@ -0,0 +1,121 @@ +/*These are some extra functions to work with slurm in go +** They are seperated, since they don't use the slurm-API +** but wrap arround the SLURM comand line tools */ +package extra + + +import ( + "fmt" + "os" + "os/exec" + "strings" + "errors" + "path/filepath" + "slurm/jobinfo" + "slurm" + "strconv" + +) + +var slurm_path string + + +func find_slurm_path () { + var err error + var path string + path=os.Getenv("SLURM_PATH") + if path == " "{ + path, err = exec.LookPath("sinfo") + if err != nil { + fmt.Printf("could not find slurm executables\n Either add slum-bins to your PATH or define SLURM_PATH\n") + } else { + slurm_path=strings.TrimSuffix(path, "bin/sinfo") + } +} else { + test_path := filepath.Join(path, "bin/sinfo") + _, err := os.Stat(test_path) + if os.IsNotExist(err) { + fmt.Printf("Slurm executable sinfo does no exist at %s\n", test_path) + } else { + slurm_path = path + + } +} +} + +func Cancel_job( JobId uint32) error{ + find_slurm_path() + if slurm_path == "" { + return errors.New("Cannot find slurm executable") + } + job_list := job_info.Get_job(JobId) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf(msg) + return errors.New(msg) +} + path := filepath.Join(slurm_path,"bin","scancel") + cmd := exec.Command(path, strconv.FormatInt(int64(JobId), 10)) + fmt.Print(cmd.String()) + out, err := cmd.CombinedOutput() + if err!= nil { + msg := string(out) + err.Error() + return errors.New(msg) + } + return nil +} + +type Acc_Job_info struct { + JobId uint32; + User string; + Account string; + State string; + JobName string; +} + var sacct_format_string string + +func parse_sacct_output(input string) []Acc_Job_info { + var job_list []Acc_Job_info + lines := strings.Split(string(input), "\n") + fmt.Printf("len %d\n",len(lines)-1) + for l := range lines { + var job_info Acc_Job_info + elements := strings.Split(lines[l], "|") + if len(elements) < 5 { + break //Well, this is not clean, but keep it like this for Now + } + id, ierr := strconv.Atoi(elements[0]) + + if ierr != nil { + break //we have no useable entry here but something like 323.batch . Ignore these for now + } + job_info.JobId =uint32(id) + job_info.User = elements[1] + job_info.Account = elements[2] + job_info.State = elements[3] + job_info.JobName =elements[4] + job_list = append(job_list, job_info) + } + return job_list +} + +func Get_job_info_accounting(JobId uint32 ) ([]Acc_Job_info, error) { + + sacct_format_string = "JobId,user,account,state,JobName" + find_slurm_path() + if slurm_path == "" { + return nil, errors.New("Cannot find slurm executable") + } + path := filepath.Join(slurm_path,"bin","sacct") + cmd:= exec.Command(path, "-j", strconv.FormatInt(int64(JobId), 10),"--format", sacct_format_string,"-p","-n") + //fmt.Printf(cmd.String()) + out, err := cmd.CombinedOutput() + if err!= nil { + msg := string(out) + err.Error() + return nil, errors.New(msg) + } + list := parse_sacct_output(string(out)) + + + return list, nil +} diff --git a/adaptor/slurm/jobinfo/slurm_job_info.go b/adaptor/slurm/jobinfo/slurm_job_info.go new file mode 100644 index 00000000..9a28f4f0 --- /dev/null +++ b/adaptor/slurm/jobinfo/slurm_job_info.go @@ -0,0 +1,1035 @@ +package job_info +/* +#cgo LDFLAGS: -lslurm +#include +#include + + +inline uint8_t uint8_ptr(uint8_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int8_t int8_ptr(int8_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline uint16_t uint16_ptr(uint16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int16_t int16_ptr(int16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline uint32_t uint32_ptr(uint32_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int32_t int32_ptr(int32_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline uint64_t uint64_ptr(uint64_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int64_t int64_ptr(int16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} + struct job_info_msg *get_job_info(){ + struct job_info_msg* job_buffer; + if(slurm_load_jobs ((time_t) NULL, + &job_buffer, SHOW_ALL)) { + return NULL; + } + return job_buffer; + } + struct job_info_msg *get_single_job_info(uint32_t id){ + struct job_info_msg* job_buffer; + if( slurm_load_job (&job_buffer, id, SHOW_DETAIL)) { + return NULL; + } + return job_buffer; + } + + struct job_info* job_from_list(struct job_info_msg *list, int i){ + return &list->job_array[i]; +} + void free_job_buffer(void* buffer){ + + slurm_free_job_info_msg ((struct job_info_msg*)buffer); +} + +struct job_info_msg *get_user_job_info(uint32_t id){ + + struct job_info_msg* job_buffer; + if( slurm_load_job_user(&job_buffer, id, SHOW_DETAIL)) + return NULL; + return job_buffer; + +} + +int64_t get_job_endtime(int32_t jobid){ + time_t end_time; + + if(slurm_get_end_time (jobid, &end_time)) + return -1; + else + return end_time; + + +} + +int char_len(char* c) { + uint i = 0; + while(c[i]!='\0') i++; + return i; + } + + +*/ +import "C" + +import "fmt" +import "os/user" +import "strconv" +import "time" + +func Reason_to_string(state uint16) string{ + switch s := C.uint16_t(state); s { + case C.WAIT_NO_REASON: + return "wait no reason" + case C.WAIT_PRIORITY: + return "wait priority" + case C.WAIT_DEPENDENCY: + return "wait dependency" + case C.WAIT_RESOURCES: + return "wait resources" + case C.WAIT_PART_NODE_LIMIT: + return "wait part node limit" + case C.WAIT_PART_TIME_LIMIT: + return "wait part time limit" + case C.WAIT_PART_DOWN: + return "wait part down" + case C.WAIT_PART_INACTIVE: + return "wait part inactive" + case C.WAIT_HELD: + return "wait held" + case C.WAIT_TIME: + return "wait time" + case C.WAIT_LICENSES: + return "wait licenses" + case C.WAIT_ASSOC_JOB_LIMIT: + return "wait assoc job limit" + case C.WAIT_ASSOC_RESOURCE_LIMIT: + return "wait assoc resource limit" + case C.WAIT_ASSOC_TIME_LIMIT: + return "wait assoc time limit" + case C.WAIT_RESERVATION: + return "wait reservation" + case C.WAIT_NODE_NOT_AVAIL: + return "wait node not avail" + case C.WAIT_HELD_USER: + return "wait held user" + case C.WAIT_FRONT_END: + return "wait front end" + case C.FAIL_DOWN_PARTITION: + return "fail down partition" + case C.FAIL_DOWN_NODE: + return "fail down node" + case C.FAIL_BAD_CONSTRAINTS: + return "fail bad constraints" + case C.FAIL_SYSTEM: + return "fail system" + case C.FAIL_LAUNCH: + return "fail launch" + case C.FAIL_EXIT_CODE: + return "fail exit code" + case C.FAIL_TIMEOUT: + return "fail timeout" + case C.FAIL_INACTIVE_LIMIT: + return "fail inactive limit" + case C.FAIL_ACCOUNT: + return "fail account" + case C.FAIL_QOS: + return "fail qos" + case C.WAIT_QOS_THRES: + return "wait qos thres" + case C.WAIT_QOS_JOB_LIMIT: + return "wait qos job limit" + case C.WAIT_QOS_RESOURCE_LIMIT: + return "wait qos resource limit" + case C.WAIT_QOS_TIME_LIMIT: + return "wait qos time limit" + case C.WAIT_BLOCK_MAX_ERR: + return "wait block max err" + case C.WAIT_BLOCK_D_ACTION: + return "wait block d action" + case C.WAIT_CLEANING: + return "wait cleaning" + case C.WAIT_PROLOG: + return "wait prolog" + case C.WAIT_QOS: + return "wait qos" + case C.WAIT_ACCOUNT: + return "wait account" + case C.WAIT_DEP_INVALID: + return "wait dep invalid" + case C.WAIT_QOS_GRP_CPU: + return "wait qos grp cpu" + case C.WAIT_QOS_GRP_CPU_MIN: + return "wait qos grp cpu min" + case C.WAIT_QOS_GRP_CPU_RUN_MIN: + return "wait qos grp cpu run min" + case C.WAIT_QOS_GRP_JOB: + return "wait qos grp job" + case C.WAIT_QOS_GRP_MEM: + return "wait qos grp mem" + case C.WAIT_QOS_GRP_NODE: + return "wait qos grp node" + case C.WAIT_QOS_GRP_SUB_JOB: + return "wait qos grp sub job" + case C.WAIT_QOS_GRP_WALL: + return "wait qos grp wall" + case C.WAIT_QOS_MAX_CPU_PER_JOB: + return "wait qos max cpu per job" + case C.WAIT_QOS_MAX_CPU_MINS_PER_JOB: + return "wait qos max cpu mins per job" + case C.WAIT_QOS_MAX_NODE_PER_JOB: + return "wait qos max node per job" + case C.WAIT_QOS_MAX_WALL_PER_JOB: + return "wait qos max wall per job" + case C.WAIT_QOS_MAX_CPU_PER_USER: + return "wait qos max cpu per user" + case C.WAIT_QOS_MAX_JOB_PER_USER: + return "wait qos max job per user" + case C.WAIT_QOS_MAX_NODE_PER_USER: + return "wait qos max node per user" + case C.WAIT_QOS_MAX_SUB_JOB: + return "wait qos max sub job" + case C.WAIT_QOS_MIN_CPU: + return "wait qos min cpu" + case C.WAIT_ASSOC_GRP_CPU: + return "wait assoc grp cpu" + case C.WAIT_ASSOC_GRP_CPU_MIN: + return "wait assoc grp cpu min" + case C.WAIT_ASSOC_GRP_CPU_RUN_MIN: + return "wait assoc grp cpu run min" + case C.WAIT_ASSOC_GRP_JOB: + return "wait assoc grp job" + case C.WAIT_ASSOC_GRP_MEM: + return "wait assoc grp mem" + case C.WAIT_ASSOC_GRP_NODE: + return "wait assoc grp node" + case C.WAIT_ASSOC_GRP_SUB_JOB: + return "wait assoc grp sub job" + case C.WAIT_ASSOC_GRP_WALL: + return "wait assoc grp wall" + case C.WAIT_ASSOC_MAX_JOBS: + return "wait assoc max jobs" + case C.WAIT_ASSOC_MAX_CPU_PER_JOB: + return "wait assoc max cpu per job" + case C.WAIT_ASSOC_MAX_CPU_MINS_PER_JOB: + return "wait assoc max cpu mins per job" + case C.WAIT_ASSOC_MAX_NODE_PER_JOB: + return "wait assoc max node per job" + case C.WAIT_ASSOC_MAX_WALL_PER_JOB: + return "wait assoc max wall per job" + case C.WAIT_ASSOC_MAX_SUB_JOB: + return "wait assoc max sub job" + case C.WAIT_MAX_REQUEUE: + return "wait max requeue" + case C.WAIT_ARRAY_TASK_LIMIT: + return "wait array task limit" + case C.WAIT_BURST_BUFFER_RESOURCE: + return "wait burst buffer resource" + case C.WAIT_BURST_BUFFER_STAGING: + return "wait burst buffer staging" + case C.FAIL_BURST_BUFFER_OP: + return "fail burst buffer op" + case C.WAIT_POWER_NOT_AVAIL: + return "wait power not avail" + case C.WAIT_POWER_RESERVED: + return "wait power reserved" + case C.WAIT_ASSOC_GRP_UNK: + return "wait assoc grp unk" + case C.WAIT_ASSOC_GRP_UNK_MIN: + return "wait assoc grp unk min" + case C.WAIT_ASSOC_GRP_UNK_RUN_MIN: + return "wait assoc grp unk run min" + case C.WAIT_ASSOC_MAX_UNK_PER_JOB: + return "wait assoc max unk per job" + case C.WAIT_ASSOC_MAX_UNK_PER_NODE: + return "wait assoc max unk per node" + case C.WAIT_ASSOC_MAX_UNK_MINS_PER_JOB: + return "wait assoc max unk mins per job" + case C.WAIT_ASSOC_MAX_CPU_PER_NODE: + return "wait assoc max cpu per node" + case C.WAIT_ASSOC_GRP_MEM_MIN: + return "wait assoc grp mem min" + case C.WAIT_ASSOC_GRP_MEM_RUN_MIN: + return "wait assoc grp mem run min" + case C.WAIT_ASSOC_MAX_MEM_PER_JOB: + return "wait assoc max mem per job" + case C.WAIT_ASSOC_MAX_MEM_PER_NODE: + return "wait assoc max mem per node" + case C.WAIT_ASSOC_MAX_MEM_MINS_PER_JOB: + return "wait assoc max mem mins per job" + case C.WAIT_ASSOC_GRP_NODE_MIN: + return "wait assoc grp node min" + case C.WAIT_ASSOC_GRP_NODE_RUN_MIN: + return "wait assoc grp node run min" + case C.WAIT_ASSOC_MAX_NODE_MINS_PER_JOB: + return "wait assoc max node mins per job" + case C.WAIT_ASSOC_GRP_ENERGY: + return "wait assoc grp energy" + case C.WAIT_ASSOC_GRP_ENERGY_MIN: + return "wait assoc grp energy min" + case C.WAIT_ASSOC_GRP_ENERGY_RUN_MIN: + return "wait assoc grp energy run min" + case C.WAIT_ASSOC_MAX_ENERGY_PER_JOB: + return "wait assoc max energy per job" + case C.WAIT_ASSOC_MAX_ENERGY_PER_NODE: + return "wait assoc max energy per node" + case C.WAIT_ASSOC_MAX_ENERGY_MINS_PER_JOB: + return "wait assoc max energy mins per job" + case C.WAIT_ASSOC_GRP_GRES: + return "wait assoc grp gres" + case C.WAIT_ASSOC_GRP_GRES_MIN: + return "wait assoc grp gres min" + case C.WAIT_ASSOC_GRP_GRES_RUN_MIN: + return "wait assoc grp gres run min" + case C.WAIT_ASSOC_MAX_GRES_PER_JOB: + return "wait assoc max gres per job" + case C.WAIT_ASSOC_MAX_GRES_PER_NODE: + return "wait assoc max gres per node" + case C.WAIT_ASSOC_MAX_GRES_MINS_PER_JOB: + return "wait assoc max gres mins per job" + case C.WAIT_ASSOC_GRP_LIC: + return "wait assoc grp lic" + case C.WAIT_ASSOC_GRP_LIC_MIN: + return "wait assoc grp lic min" + case C.WAIT_ASSOC_GRP_LIC_RUN_MIN: + return "wait assoc grp lic run min" + case C.WAIT_ASSOC_MAX_LIC_PER_JOB: + return "wait assoc max lic per job" + case C.WAIT_ASSOC_MAX_LIC_MINS_PER_JOB: + return "wait assoc max lic mins per job" + case C.WAIT_ASSOC_GRP_BB: + return "wait assoc grp bb" + case C.WAIT_ASSOC_GRP_BB_MIN: + return "wait assoc grp bb min" + case C.WAIT_ASSOC_GRP_BB_RUN_MIN: + return "wait assoc grp bb run min" + case C.WAIT_ASSOC_MAX_BB_PER_JOB: + return "wait assoc max bb per job" + case C.WAIT_ASSOC_MAX_BB_PER_NODE: + return "wait assoc max bb per node" + case C.WAIT_ASSOC_MAX_BB_MINS_PER_JOB: + return "wait assoc max bb mins per job" + case C.WAIT_QOS_GRP_UNK: + return "wait qos grp unk" + case C.WAIT_QOS_GRP_UNK_MIN: + return "wait qos grp unk min" + case C.WAIT_QOS_GRP_UNK_RUN_MIN: + return "wait qos grp unk run min" + case C.WAIT_QOS_MAX_UNK_PER_JOB: + return "wait qos max unk per job" + case C.WAIT_QOS_MAX_UNK_PER_NODE: + return "wait qos max unk per node" + case C.WAIT_QOS_MAX_UNK_PER_USER: + return "wait qos max unk per user" + case C.WAIT_QOS_MAX_UNK_MINS_PER_JOB: + return "wait qos max unk mins per job" + case C.WAIT_QOS_MIN_UNK: + return "wait qos min unk" + case C.WAIT_QOS_MAX_CPU_PER_NODE: + return "wait qos max cpu per node" + case C.WAIT_QOS_GRP_MEM_MIN: + return "wait qos grp mem min" + case C.WAIT_QOS_GRP_MEM_RUN_MIN: + return "wait qos grp mem run min" + case C.WAIT_QOS_MAX_MEM_MINS_PER_JOB: + return "wait qos max mem mins per job" + case C.WAIT_QOS_MAX_MEM_PER_JOB: + return "wait qos max mem per job" + case C.WAIT_QOS_MAX_MEM_PER_NODE: + return "wait qos max mem per node" + case C.WAIT_QOS_MAX_MEM_PER_USER: + return "wait qos max mem per user" + case C.WAIT_QOS_MIN_MEM: + return "wait qos min mem" + case C.WAIT_QOS_GRP_ENERGY: + return "wait qos grp energy" + case C.WAIT_QOS_GRP_ENERGY_MIN: + return "wait qos grp energy min" + case C.WAIT_QOS_GRP_ENERGY_RUN_MIN: + return "wait qos grp energy run min" + case C.WAIT_QOS_MAX_ENERGY_PER_JOB: + return "wait qos max energy per job" + case C.WAIT_QOS_MAX_ENERGY_PER_NODE: + return "wait qos max energy per node" + case C.WAIT_QOS_MAX_ENERGY_PER_USER: + return "wait qos max energy per user" + case C.WAIT_QOS_MAX_ENERGY_MINS_PER_JOB: + return "wait qos max energy mins per job" + case C.WAIT_QOS_MIN_ENERGY: + return "wait qos min energy" + case C.WAIT_QOS_GRP_NODE_MIN: + return "wait qos grp node min" + case C.WAIT_QOS_GRP_NODE_RUN_MIN: + return "wait qos grp node run min" + case C.WAIT_QOS_MAX_NODE_MINS_PER_JOB: + return "wait qos max node mins per job" + case C.WAIT_QOS_MIN_NODE: + return "wait qos min node" + case C.WAIT_QOS_GRP_GRES: + return "wait qos grp gres" + case C.WAIT_QOS_GRP_GRES_MIN: + return "wait qos grp gres min" + case C.WAIT_QOS_GRP_GRES_RUN_MIN: + return "wait qos grp gres run min" + case C.WAIT_QOS_MAX_GRES_PER_JOB: + return "wait qos max gres per job" + case C.WAIT_QOS_MAX_GRES_PER_NODE: + return "wait qos max gres per node" + case C.WAIT_QOS_MAX_GRES_PER_USER: + return "wait qos max gres per user" + case C.WAIT_QOS_MAX_GRES_MINS_PER_JOB: + return "wait qos max gres mins per job" + case C.WAIT_QOS_MIN_GRES: + return "wait qos min gres" + case C.WAIT_QOS_GRP_LIC: + return "wait qos grp lic" + case C.WAIT_QOS_GRP_LIC_MIN: + return "wait qos grp lic min" + case C.WAIT_QOS_GRP_LIC_RUN_MIN: + return "wait qos grp lic run min" + case C.WAIT_QOS_MAX_LIC_PER_JOB: + return "wait qos max lic per job" + case C.WAIT_QOS_MAX_LIC_PER_USER: + return "wait qos max lic per user" + case C.WAIT_QOS_MAX_LIC_MINS_PER_JOB: + return "wait qos max lic mins per job" + case C.WAIT_QOS_MIN_LIC: + return "wait qos min lic" + case C.WAIT_QOS_GRP_BB: + return "wait qos grp bb" + case C.WAIT_QOS_GRP_BB_MIN: + return "wait qos grp bb min" + case C.WAIT_QOS_GRP_BB_RUN_MIN: + return "wait qos grp bb run min" + case C.WAIT_QOS_MAX_BB_PER_JOB: + return "wait qos max bb per job" + case C.WAIT_QOS_MAX_BB_PER_NODE: + return "wait qos max bb per node" + case C.WAIT_QOS_MAX_BB_PER_USER: + return "wait qos max bb per user" + case C.WAIT_QOS_MAX_BB_MINS_PER_JOB: + return "wait qos max bb mins per job" + case C.WAIT_QOS_MIN_BB: + return "wait qos min bb" + case C.FAIL_DEADLINE: + return "fail deadline" + case C.WAIT_QOS_MAX_BB_PER_ACCT: + return "wait qos max bb per acct" + case C.WAIT_QOS_MAX_CPU_PER_ACCT: + return "wait qos max cpu per acct" + case C.WAIT_QOS_MAX_ENERGY_PER_ACCT: + return "wait qos max energy per acct" + case C.WAIT_QOS_MAX_GRES_PER_ACCT: + return "wait qos max gres per acct" + case C.WAIT_QOS_MAX_NODE_PER_ACCT: + return "wait qos max node per acct" + case C.WAIT_QOS_MAX_LIC_PER_ACCT: + return "wait qos max lic per acct" + case C.WAIT_QOS_MAX_MEM_PER_ACCT: + return "wait qos max mem per acct" + case C.WAIT_QOS_MAX_UNK_PER_ACCT: + return "wait qos max unk per acct" + case C.WAIT_QOS_MAX_JOB_PER_ACCT: + return "wait qos max job per acct" + case C.WAIT_QOS_MAX_SUB_JOB_PER_ACCT: + return "wait qos max sub job per acct" + case C.WAIT_PART_CONFIG: + return "wait part config" + case C.WAIT_ACCOUNT_POLICY: + return "wait account policy" + case C.WAIT_FED_JOB_LOCK: + return "wait fed job lock" + case C.FAIL_OOM: + return "fail oom" + case C.WAIT_PN_MEM_LIMIT: + return "wait pn mem limit" + case C.WAIT_ASSOC_GRP_BILLING: + return "wait assoc grp billing" + case C.WAIT_ASSOC_GRP_BILLING_MIN: + return "wait assoc grp billing min" + case C.WAIT_ASSOC_GRP_BILLING_RUN_MIN: + return "wait assoc grp billing run min" + case C.WAIT_ASSOC_MAX_BILLING_PER_JOB: + return "wait assoc max billing per job" + case C.WAIT_ASSOC_MAX_BILLING_PER_NODE: + return "wait assoc max billing per node" + case C.WAIT_ASSOC_MAX_BILLING_MINS_PER_JOB: + return "wait assoc max billing mins per job" + case C.WAIT_QOS_GRP_BILLING: + return "wait qos grp billing" + case C.WAIT_QOS_GRP_BILLING_MIN: + return "wait qos grp billing min" + case C.WAIT_QOS_GRP_BILLING_RUN_MIN: + return "wait qos grp billing run min" + case C.WAIT_QOS_MAX_BILLING_PER_JOB: + return "wait qos max billing per job" + case C.WAIT_QOS_MAX_BILLING_PER_NODE: + return "wait qos max billing per node" + case C.WAIT_QOS_MAX_BILLING_PER_USER: + return "wait qos max billing per user" + case C.WAIT_QOS_MAX_BILLING_MINS_PER_JOB: + return "wait qos max billing mins per job" + case C.WAIT_QOS_MAX_BILLING_PER_ACCT: + return "wait qos max billing per acct" + case C.WAIT_QOS_MIN_BILLING: + return "wait qos min billing" + case C.WAIT_RESV_DELETED: + return "wait resv deleted" + } + return "unkown reason" + +} +func state_to_string(state uint32) string{ + switch s := C.uint32_t(state); s { + case C.JOB_PENDING: + return "Pending" /* queued waiting for initiation */ + case C.JOB_RUNNING: + return "Running" /* allocated resources and executing */ + case C.JOB_SUSPENDED: + return "Suspended" /* allocated resources, execution suspended */ + case C.JOB_COMPLETE: + return "Complete" /* completed execution successfully */ + case C.JOB_CANCELLED: + return "Cancelled" /* cancelled by user */ + case C.JOB_FAILED: + return "Failed" /* completed execution unsuccessfully */ + case C.JOB_TIMEOUT: + return "Timeout" /* terminated on reaching time limit */ + case C.JOB_NODE_FAIL: + return "Node Fail" /* terminated on node failure */ + case C.JOB_PREEMPTED: + return "Preempted" /* terminated due to preemption */ + case C.JOB_BOOT_FAIL: + return "Boot Fail" /* terminated due to node boot failure */ + case C.JOB_DEADLINE: + return "Term. Deadline" /* terminated on deadline */ + case C.JOB_OOM: + return "Out of memory" /* experienced out of memory error */ + } + + return "Unknow status"; /* not a real state, last entry in table */ +} + + +type Job_info struct { + Account string; + Accrue_time int64; + Admin_comment string; + Alloc_node string; + Alloc_sid uint32; + //array_bitmap void; + Array_job_id uint32; + Array_task_id uint32; + Array_max_tasks uint32; + Array_task_str string; + Assoc_id uint32; + Batch_features string; + Batch_flag uint16; + Batch_host string; + Bitflags uint32; + Boards_per_node uint16; + Burst_buffer string; + Burst_buffer_state string; + Cluster string; + Cluster_features string; + Command string; + Comment string; + Contiguous uint16; + Core_spec uint16; + Cores_per_socket uint16; + Cpus_per_task uint16; + Cpu_freq_min uint32; + Cpu_freq_max uint32; + Cpu_freq_gov uint32; + Cpus_per_tres string; + Deadline int64; + Delay_boot uint32; + Dependency string; + Derived_ec uint32; + Eligible_time int64; + End_time int64; + Exc_nodes string; + Exc_node_inx int32; + Exit_code uint32; + Features string; + Fed_origin_str string; + Fed_siblings_active uint64; + Fed_siblings_active_str string; + Fed_siblings_viable uint64; + Fed_siblings_viable_str string; + Gres_detail_cnt uint32; + //gres_detail_str char**; + Group_id uint32; + Job_id uint32; + Job_state uint32; + Job_stateS string; + Last_sched_eval int64; + Licenses string; + Max_cpus uint32; + Max_nodes uint32; + Mcs_label string; + Mem_per_tres string; + Name string; + Network string; + Nodes string; + Nice uint32; + Node_inx int32; + Ntasks_per_core uint16; + Ntasks_per_node uint16; + Ntasks_per_socket uint16; + Ntasks_per_board uint16; + Num_cpus uint32; + Num_nodes uint32; + Num_tasks uint32; + Pack_job_id uint32; + Pack_job_id_set string; + Pack_job_offset uint32; + Partition string; + Pn_min_memory uint64; + Pn_min_cpus uint16; + Pn_min_tmp_disk uint32; + Power_flags uint8; + Preempt_time int64; + Preemptable_time int64; + Pre_sus_time int64; + Priority uint32; + Profile uint32; + Qos string; + Reboot uint8; + Req_nodes string; + Req_node_inx int32; + Req_switch uint32; + Requeue uint16; + Resize_time int64; + Restart_cnt uint16; + Resv_name string; + Sched_nodes string; + Shared uint16; + Show_flags uint16; + Site_factor uint32; + Sockets_per_board uint16; + Sockets_per_node uint16; + Start_time int64; + Start_protocol_ver uint16; + State_desc string; + State_reason uint16; + Std_err string; + Std_in string; + Std_out string; + Submit_time int64; + Suspend_time int64; + System_comment string; + Time_limit uint32; + Time_min uint32; + Threads_per_core uint16; + Tres_bind string; + Tres_freq string; + Tres_per_job string; + Tres_per_node string; + Tres_per_socket string; + Tres_per_task string; + Tres_req_str string; + Tres_alloc_str string; + User_id uint32; + User_name string; + //accurate void; + Wait4switch uint32; + Wckey string; + Work_dir string; +} + +func Job_info_convert_c_to_go(c_struct *C.struct_job_info) Job_info{ + var go_struct Job_info + + go_struct.Account = C.GoString(c_struct.account) + go_struct.Accrue_time = int64(c_struct.accrue_time) + go_struct.Admin_comment = C.GoString(c_struct.admin_comment) + go_struct.Alloc_node = C.GoString(c_struct.alloc_node) + go_struct.Alloc_sid = uint32(c_struct.alloc_sid) + go_struct.Array_job_id = uint32(c_struct.array_job_id) + go_struct.Array_task_id = uint32(c_struct.array_task_id) + go_struct.Array_max_tasks = uint32(c_struct.array_max_tasks) + go_struct.Array_task_str = C.GoString(c_struct.array_task_str) + go_struct.Assoc_id = uint32(c_struct.assoc_id) + go_struct.Batch_features = C.GoString(c_struct.batch_features) + go_struct.Batch_flag = uint16(c_struct.batch_flag) + go_struct.Batch_host = C.GoString(c_struct.batch_host) + go_struct.Bitflags = uint32(c_struct.bitflags) + go_struct.Boards_per_node = uint16(c_struct.boards_per_node) + go_struct.Burst_buffer = C.GoString(c_struct.burst_buffer) + go_struct.Burst_buffer_state = C.GoString(c_struct.burst_buffer_state) + go_struct.Cluster = C.GoString(c_struct.cluster) + go_struct.Cluster_features = C.GoString(c_struct.cluster_features) + go_struct.Command = C.GoString(c_struct.command) + go_struct.Comment = C.GoString(c_struct.comment) + go_struct.Contiguous = uint16(c_struct.contiguous) + go_struct.Core_spec = uint16(c_struct.core_spec) + go_struct.Cores_per_socket = uint16(c_struct.cores_per_socket) + go_struct.Cpus_per_task = uint16(c_struct.cpus_per_task) + go_struct.Cpu_freq_min = uint32(c_struct.cpu_freq_min) + go_struct.Cpu_freq_max = uint32(c_struct.cpu_freq_max) + go_struct.Cpu_freq_gov = uint32(c_struct.cpu_freq_gov) + go_struct.Cpus_per_tres = C.GoString(c_struct.cpus_per_tres) + go_struct.Deadline = int64(c_struct.deadline) + go_struct.Delay_boot = uint32(c_struct.delay_boot) + go_struct.Dependency = C.GoString(c_struct.dependency) + go_struct.Derived_ec = uint32(c_struct.derived_ec) + go_struct.Eligible_time = int64(c_struct.eligible_time) + go_struct.End_time = int64(c_struct.end_time) + go_struct.Exc_nodes = C.GoString(c_struct.exc_nodes) + go_struct.Exc_node_inx = int32(C.int32_ptr(c_struct.exc_node_inx)) + go_struct.Exit_code = uint32(c_struct.exit_code) + go_struct.Features = C.GoString(c_struct.features) + go_struct.Fed_origin_str = C.GoString(c_struct.fed_origin_str) + go_struct.Fed_siblings_active = uint64(c_struct.fed_siblings_active) + go_struct.Fed_siblings_active_str = C.GoString(c_struct.fed_siblings_active_str) + go_struct.Fed_siblings_viable = uint64(c_struct.fed_siblings_viable) + go_struct.Fed_siblings_viable_str = C.GoString(c_struct.fed_siblings_viable_str) + go_struct.Gres_detail_cnt = uint32(c_struct.gres_detail_cnt) + go_struct.Group_id = uint32(c_struct.group_id) + go_struct.Job_id = uint32(c_struct.job_id) + go_struct.Job_state = uint32(c_struct.job_state) + go_struct.Job_stateS = state_to_string(uint32(c_struct.job_state)) + go_struct.Last_sched_eval = int64(c_struct.last_sched_eval) + go_struct.Licenses = C.GoString(c_struct.licenses) + go_struct.Max_cpus = uint32(c_struct.max_cpus) + go_struct.Max_nodes = uint32(c_struct.max_nodes) + go_struct.Mcs_label = C.GoString(c_struct.mcs_label) + go_struct.Mem_per_tres = C.GoString(c_struct.mem_per_tres) + go_struct.Name = C.GoString(c_struct.name) + go_struct.Network = C.GoString(c_struct.network) + go_struct.Nodes = C.GoString(c_struct.nodes) + go_struct.Nice = uint32(c_struct.nice) + go_struct.Node_inx = int32(C.int32_ptr(c_struct.node_inx)) + go_struct.Ntasks_per_core = uint16(c_struct.ntasks_per_core) + go_struct.Ntasks_per_node = uint16(c_struct.ntasks_per_node) + go_struct.Ntasks_per_socket = uint16(c_struct.ntasks_per_socket) + go_struct.Ntasks_per_board = uint16(c_struct.ntasks_per_board) + go_struct.Num_cpus = uint32(c_struct.num_cpus) + go_struct.Num_nodes = uint32(c_struct.num_nodes) + go_struct.Num_tasks = uint32(c_struct.num_tasks) + go_struct.Pack_job_id = uint32(c_struct.pack_job_id) + go_struct.Pack_job_id_set = C.GoString(c_struct.pack_job_id_set) + go_struct.Pack_job_offset = uint32(c_struct.pack_job_offset) + go_struct.Partition = C.GoString(c_struct.partition) + go_struct.Pn_min_memory = uint64(c_struct.pn_min_memory) + go_struct.Pn_min_cpus = uint16(c_struct.pn_min_cpus) + go_struct.Pn_min_tmp_disk = uint32(c_struct.pn_min_tmp_disk) + go_struct.Power_flags = uint8(c_struct.power_flags) + go_struct.Preempt_time = int64(c_struct.preempt_time) + go_struct.Preemptable_time = int64(c_struct.preemptable_time) + go_struct.Pre_sus_time = int64(c_struct.pre_sus_time) + go_struct.Priority = uint32(c_struct.priority) + go_struct.Profile = uint32(c_struct.profile) + go_struct.Qos = C.GoString(c_struct.qos) + go_struct.Reboot = uint8(c_struct.reboot) + go_struct.Req_nodes = C.GoString(c_struct.req_nodes) + go_struct.Req_node_inx = int32(C.int32_ptr(c_struct.req_node_inx)) + go_struct.Req_switch = uint32(c_struct.req_switch) + go_struct.Requeue = uint16(c_struct.requeue) + go_struct.Resize_time = int64(c_struct.resize_time) + go_struct.Restart_cnt = uint16(c_struct.restart_cnt) + go_struct.Resv_name = C.GoString(c_struct.resv_name) + go_struct.Sched_nodes = C.GoString(c_struct.sched_nodes) + go_struct.Shared = uint16(c_struct.shared) + go_struct.Show_flags = uint16(c_struct.show_flags) + go_struct.Site_factor = uint32(c_struct.site_factor) + go_struct.Sockets_per_board = uint16(c_struct.sockets_per_board) + go_struct.Sockets_per_node = uint16(c_struct.sockets_per_node) + go_struct.Start_time = int64(c_struct.start_time) + go_struct.Start_protocol_ver = uint16(c_struct.start_protocol_ver) + go_struct.State_desc = C.GoString(c_struct.state_desc) + go_struct.State_reason = uint16(c_struct.state_reason) + go_struct.Std_err = C.GoString(c_struct.std_err) + go_struct.Std_in = C.GoString(c_struct.std_in) + go_struct.Std_out = C.GoString(c_struct.std_out) + go_struct.Submit_time = int64(c_struct.submit_time) + go_struct.Suspend_time = int64(c_struct.suspend_time) + go_struct.System_comment = C.GoString(c_struct.system_comment) + go_struct.Time_limit = uint32(c_struct.time_limit) + go_struct.Time_min = uint32(c_struct.time_min) + go_struct.Threads_per_core = uint16(c_struct.threads_per_core) + go_struct.Tres_bind = C.GoString(c_struct.tres_bind) + go_struct.Tres_freq = C.GoString(c_struct.tres_freq) + go_struct.Tres_per_job = C.GoString(c_struct.tres_per_job) + go_struct.Tres_per_node = C.GoString(c_struct.tres_per_node) + go_struct.Tres_per_socket = C.GoString(c_struct.tres_per_socket) + go_struct.Tres_per_task = C.GoString(c_struct.tres_per_task) + go_struct.Tres_req_str = C.GoString(c_struct.tres_req_str) + go_struct.Tres_alloc_str = C.GoString(c_struct.tres_alloc_str) + go_struct.User_id = uint32(c_struct.user_id) + go_struct.User_name = C.GoString(c_struct.user_name) + if len(go_struct.User_name) == 0 { + tmp_user,_ := user.LookupId(strconv.Itoa(int(go_struct.User_id))) + go_struct.User_name= tmp_user.Username + } + go_struct.Wait4switch = uint32(c_struct.wait4switch) + go_struct.Wckey = C.GoString(c_struct.wckey) + go_struct.Work_dir = C.GoString(c_struct.work_dir) + return go_struct + } + + func Print_Job_info(go_struct Job_info){ + fmt.Printf("%s:\t %s\n","account", go_struct.Account) + fmt.Printf("%s:\t %d\n","accrue time", go_struct.Accrue_time) + fmt.Printf("%s:\t %s\n","admin comment", go_struct.Admin_comment) + fmt.Printf("%s:\t %s\n","alloc node", go_struct.Alloc_node) + fmt.Printf("%s:\t %d\n","alloc sid", go_struct.Alloc_sid) + fmt.Printf("%s:\t %d\n","array job id", go_struct.Array_job_id) + fmt.Printf("%s:\t %d\n","array task id", go_struct.Array_task_id) + fmt.Printf("%s:\t %d\n","array max tasks", go_struct.Array_max_tasks) + fmt.Printf("%s:\t %s\n","array task str", go_struct.Array_task_str) + fmt.Printf("%s:\t %d\n","assoc id", go_struct.Assoc_id) + fmt.Printf("%s:\t %s\n","batch features", go_struct.Batch_features) + fmt.Printf("%s:\t %d\n","batch flag", go_struct.Batch_flag) + fmt.Printf("%s:\t %s\n","batch host", go_struct.Batch_host) + fmt.Printf("%s:\t %d\n","bitflags", go_struct.Bitflags) + fmt.Printf("%s:\t %d\n","boards per node", go_struct.Boards_per_node) + fmt.Printf("%s:\t %s\n","burst buffer", go_struct.Burst_buffer) + fmt.Printf("%s:\t %s\n","burst buffer state", go_struct.Burst_buffer_state) + fmt.Printf("%s:\t %s\n","cluster", go_struct.Cluster) + fmt.Printf("%s:\t %s\n","cluster features", go_struct.Cluster_features) + fmt.Printf("%s:\t %s\n","command", go_struct.Command) + fmt.Printf("%s:\t %s\n","comment", go_struct.Comment) + fmt.Printf("%s:\t %d\n","contiguous", go_struct.Contiguous) + fmt.Printf("%s:\t %d\n","core spec", go_struct.Core_spec) + fmt.Printf("%s:\t %d\n","cores per socket", go_struct.Cores_per_socket) + fmt.Printf("%s:\t %d\n","cpus per task", go_struct.Cpus_per_task) + fmt.Printf("%s:\t %d\n","cpu freq min", go_struct.Cpu_freq_min) + fmt.Printf("%s:\t %d\n","cpu freq max", go_struct.Cpu_freq_max) + fmt.Printf("%s:\t %d\n","cpu freq gov", go_struct.Cpu_freq_gov) + fmt.Printf("%s:\t %s\n","cpus per tres", go_struct.Cpus_per_tres) + fmt.Printf("%s:\t %d\n","deadline", go_struct.Deadline) + fmt.Printf("%s:\t %d\n","delay boot", go_struct.Delay_boot) + fmt.Printf("%s:\t %s\n","dependency", go_struct.Dependency) + fmt.Printf("%s:\t %d\n","derived ec", go_struct.Derived_ec) + fmt.Printf("%s:\t %d\n","eligible time", go_struct.Eligible_time) + fmt.Printf("%s:\t %d\n","end time", go_struct.End_time) + fmt.Printf("%s:\t %s\n","exc nodes", go_struct.Exc_nodes) + fmt.Printf("%s:\t %d\n","exc node inx", go_struct.Exc_node_inx) + fmt.Printf("%s:\t %d\n","exit code", go_struct.Exit_code) + fmt.Printf("%s:\t %s\n","features", go_struct.Features) + fmt.Printf("%s:\t %s\n","fed origin str", go_struct.Fed_origin_str) + fmt.Printf("%s:\t %d\n","fed siblings active", go_struct.Fed_siblings_active) + fmt.Printf("%s:\t %s\n","fed siblings active str", go_struct.Fed_siblings_active_str) + fmt.Printf("%s:\t %d\n","fed siblings viable", go_struct.Fed_siblings_viable) + fmt.Printf("%s:\t %s\n","fed siblings viable str", go_struct.Fed_siblings_viable_str) + fmt.Printf("%s:\t %d\n","gres detail cnt", go_struct.Gres_detail_cnt) + fmt.Printf("%s:\t %d\n","group id", go_struct.Group_id) + fmt.Printf("%s:\t %d\n","job id", go_struct.Job_id) + fmt.Printf("%s:\t %d\n","job state", go_struct.Job_state) + fmt.Printf("%s:\t %d\n","last sched eval", go_struct.Last_sched_eval) + fmt.Printf("%s:\t %s\n","licenses", go_struct.Licenses) + fmt.Printf("%s:\t %d\n","max cpus", go_struct.Max_cpus) + fmt.Printf("%s:\t %d\n","max nodes", go_struct.Max_nodes) + fmt.Printf("%s:\t %s\n","mcs label", go_struct.Mcs_label) + fmt.Printf("%s:\t %s\n","mem per tres", go_struct.Mem_per_tres) + fmt.Printf("%s:\t %s\n","name", go_struct.Name) + fmt.Printf("%s:\t %s\n","network", go_struct.Network) + fmt.Printf("%s:\t %s\n","nodes", go_struct.Nodes) + fmt.Printf("%s:\t %d\n","nice", go_struct.Nice) + fmt.Printf("%s:\t %d\n","node inx", go_struct.Node_inx) + fmt.Printf("%s:\t %d\n","ntasks per core", go_struct.Ntasks_per_core) + fmt.Printf("%s:\t %d\n","ntasks per node", go_struct.Ntasks_per_node) + fmt.Printf("%s:\t %d\n","ntasks per socket", go_struct.Ntasks_per_socket) + fmt.Printf("%s:\t %d\n","ntasks per board", go_struct.Ntasks_per_board) + fmt.Printf("%s:\t %d\n","num cpus", go_struct.Num_cpus) + fmt.Printf("%s:\t %d\n","num nodes", go_struct.Num_nodes) + fmt.Printf("%s:\t %d\n","num tasks", go_struct.Num_tasks) + fmt.Printf("%s:\t %d\n","pack job id", go_struct.Pack_job_id) + fmt.Printf("%s:\t %s\n","pack job id set", go_struct.Pack_job_id_set) + fmt.Printf("%s:\t %d\n","pack job offset", go_struct.Pack_job_offset) + fmt.Printf("%s:\t %s\n","partition", go_struct.Partition) + fmt.Printf("%s:\t %d\n","pn min memory", go_struct.Pn_min_memory) + fmt.Printf("%s:\t %d\n","pn min cpus", go_struct.Pn_min_cpus) + fmt.Printf("%s:\t %d\n","pn min tmp disk", go_struct.Pn_min_tmp_disk) + fmt.Printf("%s:\t %d\n","power flags", go_struct.Power_flags) + fmt.Printf("%s:\t %d\n","preempt time", go_struct.Preempt_time) + fmt.Printf("%s:\t %d\n","preemptable time", go_struct.Preemptable_time) + fmt.Printf("%s:\t %d\n","pre sus time", go_struct.Pre_sus_time) + fmt.Printf("%s:\t %d\n","priority", go_struct.Priority) + fmt.Printf("%s:\t %d\n","profile", go_struct.Profile) + fmt.Printf("%s:\t %s\n","qos", go_struct.Qos) + fmt.Printf("%s:\t %d\n","reboot", go_struct.Reboot) + fmt.Printf("%s:\t %s\n","req nodes", go_struct.Req_nodes) + fmt.Printf("%s:\t %d\n","req node inx", go_struct.Req_node_inx) + fmt.Printf("%s:\t %d\n","req switch", go_struct.Req_switch) + fmt.Printf("%s:\t %d\n","requeue", go_struct.Requeue) + fmt.Printf("%s:\t %d\n","resize time", go_struct.Resize_time) + fmt.Printf("%s:\t %d\n","restart cnt", go_struct.Restart_cnt) + fmt.Printf("%s:\t %s\n","resv name", go_struct.Resv_name) + fmt.Printf("%s:\t %s\n","sched nodes", go_struct.Sched_nodes) + fmt.Printf("%s:\t %d\n","shared", go_struct.Shared) + fmt.Printf("%s:\t %d\n","show flags", go_struct.Show_flags) + fmt.Printf("%s:\t %d\n","site factor", go_struct.Site_factor) + fmt.Printf("%s:\t %d\n","sockets per board", go_struct.Sockets_per_board) + fmt.Printf("%s:\t %d\n","sockets per node", go_struct.Sockets_per_node) + fmt.Printf("%s:\t %d\n","start time", go_struct.Start_time) + fmt.Printf("%s:\t %d\n","start protocol ver", go_struct.Start_protocol_ver) + fmt.Printf("%s:\t %s\n","state desc", go_struct.State_desc) + fmt.Printf("%s:\t %d\n","state reason", go_struct.State_reason) + fmt.Printf("%s:\t %s\n","std err", go_struct.Std_err) + fmt.Printf("%s:\t %s\n","std in", go_struct.Std_in) + fmt.Printf("%s:\t %s\n","std out", go_struct.Std_out) + fmt.Printf("%s:\t %d\n","submit time", go_struct.Submit_time) + fmt.Printf("%s:\t %d\n","suspend time", go_struct.Suspend_time) + fmt.Printf("%s:\t %s\n","system comment", go_struct.System_comment) + fmt.Printf("%s:\t %d\n","time limit", go_struct.Time_limit) + fmt.Printf("%s:\t %d\n","time min", go_struct.Time_min) + fmt.Printf("%s:\t %d\n","threads per core", go_struct.Threads_per_core) + fmt.Printf("%s:\t %s\n","tres bind", go_struct.Tres_bind) + fmt.Printf("%s:\t %s\n","tres freq", go_struct.Tres_freq) + fmt.Printf("%s:\t %s\n","tres per job", go_struct.Tres_per_job) + fmt.Printf("%s:\t %s\n","tres per node", go_struct.Tres_per_node) + fmt.Printf("%s:\t %s\n","tres per socket", go_struct.Tres_per_socket) + fmt.Printf("%s:\t %s\n","tres per task", go_struct.Tres_per_task) + fmt.Printf("%s:\t %s\n","tres req str", go_struct.Tres_req_str) + fmt.Printf("%s:\t %s\n","tres alloc str", go_struct.Tres_alloc_str) + fmt.Printf("%s:\t %d\n","user id", go_struct.User_id) + fmt.Printf("%s:\t %s\n","user name", go_struct.User_name) + fmt.Printf("%s:\t %d\n","wait4switch", go_struct.Wait4switch) + fmt.Printf("%s:\t %s\n","wckey", go_struct.Wckey) + fmt.Printf("%s:\t %s\n","work dir", go_struct.Work_dir) +} + +type Job_info_msg struct { + Last_update int64; + Record_count uint32; + Error_code uint32; + Job_list []Job_info; +} + +func Get_job_runtime(job Job_info) time.Duration{ + + start_time := time.Unix(job.Start_time,0) + current_time := time.Now() + diff := current_time.Sub(start_time).Round(time.Second) + if int64(diff)<0 { + return 0 + } + end_time := time.Unix(job.End_time,0) + diff2 := current_time.Sub(end_time) + if int64(diff2)<0 { + return diff + } + return end_time.Sub(start_time) +} + +func Get_job_endtime (id uint32 ) time.Time { + c_time := C.get_job_endtime(C.int32_t(id)) + return time.Unix(int64(c_time),0) + +} + +func Get_all_jobs() Job_info_msg { + var go_job_buffer Job_info_msg + c_job_buffer := C.get_job_info() + if c_job_buffer == nil { + go_job_buffer.Last_update = int64(0) + go_job_buffer.Record_count = uint32(0) + go_job_buffer.Error_code = uint32(C.slurm_get_errno()) + return go_job_buffer + } + + go_job_buffer.Last_update = int64(c_job_buffer.last_update) + go_job_buffer.Record_count = uint32(c_job_buffer.record_count) + go_job_buffer.Job_list =make([]Job_info,c_job_buffer.record_count, c_job_buffer.record_count) + for i:=uint32(0); i +#include +#include +inline uint8_t uint8_ptr(uint8_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int8_t int8_ptr(int8_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +uint16_t uint16_ptr(uint16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int16_t int16_ptr(int16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline uint32_t uint32_ptr(uint32_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int32_t int32_ptr(int32_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline uint64_t uint64_ptr(uint64_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +inline int64_t int64_ptr(int16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} + struct node_info_msg *get_node_info(){ + struct node_info_msg* node_buffer; + if(slurm_load_node ((time_t) NULL, + &node_buffer, SHOW_ALL)) + return NULL; + return node_buffer; + } + struct node_info_msg *get_single_node_info(char* name){ + struct node_info_msg* node_buffer; + if( slurm_load_node_single (&node_buffer, name, SHOW_DETAIL)) + return NULL; + return node_buffer; + } + + struct node_info* node_from_list(struct node_info_msg *list, int i){ + return &list->node_array[i]; +} + void free_node_buffer(void* buffer){ + + slurm_free_node_info_msg ((struct node_info_msg*)buffer); + } + + +*/ +import "C" + +import "fmt" +import "unsafe" + +type Node_info struct { + Arch string; + Boards uint16; + Boot_time int64; + Cluster_name string; + Cores uint16; + Core_spec_cnt uint16; + Cpu_bind uint32; + Cpu_load uint32; + Free_mem uint64; + Cpus uint16; + Cpu_spec_list string; + Features string; + Features_act string; + Gres string; + Gres_drain string; + Gres_used string; + Mcs_label string; + Mem_spec_limit uint64; + Name string; + Next_state uint32; + Node_addr string; + Node_hostname string; + Node_state uint32; + Os string; + Owner uint32; + Partitions string; + Port uint16; + Real_memory uint64; + Reason string; + Reason_time int64; + Reason_uid uint32; + Slurmd_start_time int64; + Sockets uint16; + Threads uint16; + Tmp_disk uint32; + Weight uint32; + Tres_fmt_str string; + Version string; +} +func Node_info_convert_c_to_go(c_struct *C.struct_node_info) Node_info{ + var go_struct Node_info + + go_struct.Arch = C.GoString(c_struct.arch) + go_struct.Boards = uint16(c_struct.boards) + go_struct.Boot_time = int64(c_struct.boot_time) + go_struct.Cluster_name = C.GoString(c_struct.cluster_name) + go_struct.Cores = uint16(c_struct.cores) + go_struct.Core_spec_cnt = uint16(c_struct.core_spec_cnt) + go_struct.Cpu_bind = uint32(c_struct.cpu_bind) + go_struct.Cpu_load = uint32(c_struct.cpu_load) + go_struct.Free_mem = uint64(c_struct.free_mem) + go_struct.Cpus = uint16(c_struct.cpus) + go_struct.Cpu_spec_list = C.GoString(c_struct.cpu_spec_list) + go_struct.Features = C.GoString(c_struct.features) + go_struct.Features_act = C.GoString(c_struct.features_act) + go_struct.Gres = C.GoString(c_struct.gres) + go_struct.Gres_drain = C.GoString(c_struct.gres_drain) + go_struct.Gres_used = C.GoString(c_struct.gres_used) + go_struct.Mcs_label = C.GoString(c_struct.mcs_label) + go_struct.Mem_spec_limit = uint64(c_struct.mem_spec_limit) + go_struct.Name = C.GoString(c_struct.name) + go_struct.Next_state = uint32(c_struct.next_state) + go_struct.Node_addr = C.GoString(c_struct.node_addr) + go_struct.Node_hostname = C.GoString(c_struct.node_hostname) + go_struct.Node_state = uint32(c_struct.node_state) + go_struct.Os = C.GoString(c_struct.os) + go_struct.Owner = uint32(c_struct.owner) + go_struct.Partitions = C.GoString(c_struct.partitions) + go_struct.Port = uint16(c_struct.port) + go_struct.Real_memory = uint64(c_struct.real_memory) + go_struct.Reason = C.GoString(c_struct.reason) + go_struct.Reason_time = int64(c_struct.reason_time) + go_struct.Reason_uid = uint32(c_struct.reason_uid) + go_struct.Slurmd_start_time = int64(c_struct.slurmd_start_time) + go_struct.Sockets = uint16(c_struct.sockets) + go_struct.Threads = uint16(c_struct.threads) + go_struct.Tmp_disk = uint32(c_struct.tmp_disk) + go_struct.Weight = uint32(c_struct.weight) + go_struct.Tres_fmt_str = C.GoString(c_struct.tres_fmt_str) + go_struct.Version = C.GoString(c_struct.version) + return go_struct + } + + func State_to_string(state uint32) string{ + + switch s := C.uint16_t(state); s { + case C.NODE_STATE_UNKNOWN: + return "node state unknown" + case C.NODE_STATE_DOWN: + return "node state down" + case C.NODE_STATE_IDLE: + return "node state idle" + case C.NODE_STATE_ALLOCATED: + return "node state allocated" + case C.NODE_STATE_ERROR: + return "node state error" + case C.NODE_STATE_MIXED: + return "node state mixed" + case C.NODE_STATE_FUTURE: + return "node state future" + case C.NODE_STATE_END: + return "node state end" + } + return "Unkown state" + } + + func Print_node_info(go_struct Node_info){ + fmt.Printf("%s:\t %s\n","arch", go_struct.Arch) + fmt.Printf("%s:\t %d\n","boards", go_struct.Boards) + fmt.Printf("%s:\t %d\n","boot time", go_struct.Boot_time) + fmt.Printf("%s:\t %s\n","cluster name", go_struct.Cluster_name) + fmt.Printf("%s:\t %d\n","cores", go_struct.Cores) + fmt.Printf("%s:\t %d\n","core spec cnt", go_struct.Core_spec_cnt) + fmt.Printf("%s:\t %d\n","cpu bind", go_struct.Cpu_bind) + fmt.Printf("%s:\t %d\n","cpu load", go_struct.Cpu_load) + fmt.Printf("%s:\t %d\n","free mem", go_struct.Free_mem) + fmt.Printf("%s:\t %d\n","cpus", go_struct.Cpus) + fmt.Printf("%s:\t %s\n","cpu spec list", go_struct.Cpu_spec_list) + fmt.Printf("%s:\t %s\n","features", go_struct.Features) + fmt.Printf("%s:\t %s\n","features act", go_struct.Features_act) + fmt.Printf("%s:\t %s\n","gres", go_struct.Gres) + fmt.Printf("%s:\t %s\n","gres drain", go_struct.Gres_drain) + fmt.Printf("%s:\t %s\n","gres used", go_struct.Gres_used) + fmt.Printf("%s:\t %s\n","mcs label", go_struct.Mcs_label) + fmt.Printf("%s:\t %d\n","mem spec limit", go_struct.Mem_spec_limit) + fmt.Printf("%s:\t %s\n","name", go_struct.Name) + fmt.Printf("%s:\t %d\n","next state", go_struct.Next_state) + fmt.Printf("%s:\t %s\n","node addr", go_struct.Node_addr) + fmt.Printf("%s:\t %s\n","node hostname", go_struct.Node_hostname) + fmt.Printf("%s:\t %d\n","node state", go_struct.Node_state) + fmt.Printf("%s:\t %s\n","os", go_struct.Os) + fmt.Printf("%s:\t %d\n","owner", go_struct.Owner) + fmt.Printf("%s:\t %s\n","partitions", go_struct.Partitions) + fmt.Printf("%s:\t %d\n","port", go_struct.Port) + fmt.Printf("%s:\t %d\n","real memory", go_struct.Real_memory) + fmt.Printf("%s:\t %s\n","reason", go_struct.Reason) + fmt.Printf("%s:\t %d\n","reason time", go_struct.Reason_time) + fmt.Printf("%s:\t %d\n","reason uid", go_struct.Reason_uid) + fmt.Printf("%s:\t %d\n","slurmd start time", go_struct.Slurmd_start_time) + fmt.Printf("%s:\t %d\n","sockets", go_struct.Sockets) + fmt.Printf("%s:\t %d\n","threads", go_struct.Threads) + fmt.Printf("%s:\t %d\n","tmp disk", go_struct.Tmp_disk) + fmt.Printf("%s:\t %d\n","weight", go_struct.Weight) + fmt.Printf("%s:\t %s\n","tres fmt str", go_struct.Tres_fmt_str) + fmt.Printf("%s:\t %s\n","version", go_struct.Version) +} +type Node_info_msg struct { + Last_update int64; + Record_count uint32; + Error_code uint32; + Node_list []Node_info; +} + +func Get_all_nodes() Node_info_msg { + var go_node_buffer Node_info_msg + c_node_buffer := C.get_node_info() + if c_node_buffer == nil { + go_node_buffer.Last_update = int64(0) + go_node_buffer.Record_count = uint32(0) + go_node_buffer.Error_code = uint32(C.slurm_get_errno()) + return go_node_buffer + } + go_node_buffer.Last_update = int64(c_node_buffer.last_update) + go_node_buffer.Record_count = uint32(c_node_buffer.record_count) + go_node_buffer.Node_list =make([]Node_info,c_node_buffer.record_count, c_node_buffer.record_count) + for i:=uint32(0); i +#include + + +uint8_t uint8_ptr(uint8_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +int8_t int8_ptr(int8_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +uint16_t uint16_ptr(uint16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +int16_t int16_ptr(int16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +uint32_t uint32_ptr(uint32_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +int32_t int32_ptr(int32_t* pointer, int off) { + if (NULL == pointer) { + return -1;} + pointer+=off; + return *pointer; +} +uint64_t uint64_ptr(uint64_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} +int64_t int64_ptr(int16_t* pointer) { + if (NULL == pointer) { + return -1;} + return *pointer; +} + + struct partition_info_msg *get_partition_info(){ + struct partition_info_msg* partition_buffer; + if( slurm_load_partitions ((time_t) NULL, + &partition_buffer, SHOW_ALL)) + return NULL; + return partition_buffer; + } + struct partition_info* partition_from_list(struct partition_info_msg *list, int i){ + return &list->partition_array[i]; +} + void free_partition_buffer(void* buffer){ + + slurm_free_partition_info_msg ((struct partition_info_msg*)buffer); + } + int find_node_inx(int32_t* node){ + int ret = 0; + while(*node != -1) { node++; ret++;}; + return ret; +} + + +*/ +import "C" + +import "fmt" + +type Partition_info struct { + Allow_alloc_nodes string; + Allow_accounts string; + Allow_groups string; + Allow_qos string; + Alternate string; + Billing_weights_str string; + Cluster_name string; + Cr_type uint16; + Cpu_bind uint32; + Def_mem_per_cpu uint64; + Default_time uint32; + Deny_accounts string; + Deny_qos string; + Flags uint16; + Grace_time uint32; + Job_defaults_str string; + Max_cpus_per_node uint32; + Max_mem_per_cpu uint64; + Max_nodes uint32; + Max_share uint16; + Max_time uint32; + Min_nodes uint32; + Name string; + Node_inx[] int32; + Nodes string; + Over_time_limit uint16; + Preempt_mode uint16; + Priority_job_factor uint16; + Priority_tier uint16; + Qos_char string; + State_up uint16; + Total_cpus uint32; + Total_nodes uint32; + Tres_fmt_str string; +} +func Partition_info_convert_c_to_go(c_struct *C.struct_partition_info) Partition_info{ + var go_struct Partition_info + + go_struct.Allow_alloc_nodes = C.GoString(c_struct.allow_alloc_nodes) + go_struct.Allow_accounts = C.GoString(c_struct.allow_accounts) + go_struct.Allow_groups = C.GoString(c_struct.allow_groups) + go_struct.Allow_qos = C.GoString(c_struct.allow_qos) + go_struct.Alternate = C.GoString(c_struct.alternate) + go_struct.Billing_weights_str = C.GoString(c_struct.billing_weights_str) + go_struct.Cluster_name = C.GoString(c_struct.cluster_name) + go_struct.Cr_type = uint16(c_struct.cr_type) + go_struct.Cpu_bind = uint32(c_struct.cpu_bind) + go_struct.Def_mem_per_cpu = uint64(c_struct.def_mem_per_cpu) + go_struct.Default_time = uint32(c_struct.default_time) + go_struct.Deny_accounts = C.GoString(c_struct.deny_accounts) + go_struct.Deny_qos = C.GoString(c_struct.deny_qos) + go_struct.Flags = uint16(c_struct.flags) + go_struct.Grace_time = uint32(c_struct.grace_time) + go_struct.Job_defaults_str = C.GoString(c_struct.job_defaults_str) + go_struct.Max_cpus_per_node = uint32(c_struct.max_cpus_per_node) + go_struct.Max_mem_per_cpu = uint64(c_struct.max_mem_per_cpu) + go_struct.Max_nodes = uint32(c_struct.max_nodes) + go_struct.Max_share = uint16(c_struct.max_share) + go_struct.Max_time = uint32(c_struct.max_time) + go_struct.Min_nodes = uint32(c_struct.min_nodes) + go_struct.Name = C.GoString(c_struct.name) + t := C.find_node_inx(c_struct.node_inx) + + fmt.Printf("%d", t) + go_struct.Node_inx = make([]int32, t,t) + for i:=int32(0); i +#include +#include +#include +#ifndef ptr_convert +#define ptr_convert +inline uint8_t uint8_ptr(uint8_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} +inline int8_t int8_ptr(int8_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} +inline uint16_t uint16_ptr(uint16_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} +inline int16_t int16_ptr(int16_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} +inline uint32_t uint32_ptr(uint32_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} +inline int32_t int32_ptr(int32_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} +inline uint64_t uint64_ptr(uint64_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} +inline int64_t int64_ptr(int16_t* pointer) { + if (NULL == pointer) { + return 0;} + return *pointer; +} + +#endif +struct submit_response_msg *submit_job(struct job_descriptor *desc) +{ + + struct submit_response_msg *resp_msg; + if (slurm_submit_batch_job(desc, + &resp_msg)) { + return NULL; + } + return resp_msg; + +} +int update_job (struct job_descriptor *msg) { + + return slurm_update_job (msg); +} + +void free_submit_response_msg(struct submit_response_msg *msg) +{ + slurm_free_submit_response_response_msg(msg); +} +*/ +import "C" + +import "fmt" +import "unsafe" +import "slurm/jobinfo" + +type Job_descriptor struct { + Account string; + Acctg_freq string; + Admin_comment string; + Alloc_node string; + Alloc_resp_port uint16; + Alloc_sid uint32; + Argc uint32; + Argv []string; + Array_inx string; + //array_bitmap void; + Batch_features string; + Begin_time int64; + Bitflags uint32; + Burst_buffer string; + Ckpt_interval uint16; + Ckpt_dir string; + Clusters string; + Cluster_features string; + Comment string; + Contiguous uint16; + Core_spec uint16; + Cpu_bind string; + Cpu_bind_type uint16; + Cpu_freq_min uint32; + Cpu_freq_max uint32 ; + Cpu_freq_gov uint32; + Cpus_per_tres string; + Deadline int64; + Delay_boot uint32; + Dependency string; + End_time int64; + Environment []string; + Env_size uint32; + Extra string; + Exc_nodes string; + Features string; + Fed_siblings_active uint64; + Fed_siblings_viable uint64; + Group_id uint32; + Immediate uint16; + Job_id uint32; + Job_id_str string; + Kill_on_node_fail uint16; + Licenses string; + Mail_type uint16; + Mail_user string; + Mcs_label string; + Mem_bind string; + Mem_bind_type uint16; + Mem_per_tres string; + Name string; + Network string; + Nice uint32; + Num_tasks uint32; + Open_mode uint8; + Origin_cluster string; + Other_port uint16; + Overcommit uint8; + Pack_job_offset uint32; + Partition string; + Plane_size uint16; + Power_flags uint8; + Priority uint32; + Profile uint32; + Qos string; + Reboot uint16; + Resp_host string; + Restart_cnt uint16; + Req_nodes string; + Requeue uint16; + Reservation string; + Script string; + //script_buf void; + Shared uint16; + Site_factor uint32; + //spank_job_env char**; + Spank_job_env_size uint32; + Task_dist uint32; + Time_limit uint32; + Time_min uint32; + Tres_bind string; + Tres_freq string; + Tres_per_job string; + Tres_per_node string; + Tres_per_socket string; + Tres_per_task string; + User_id uint32; + Wait_all_nodes uint16; + Warn_flags uint16; + Warn_signal uint16; + Warn_time uint16; + Work_dir string; + Cpus_per_task uint16; + Min_cpus uint32; + Max_cpus uint32; + Min_nodes uint32; + Max_nodes uint32; + Boards_per_node uint16; + Sockets_per_board uint16; + Sockets_per_node uint16; + Cores_per_socket uint16; + Threads_per_core uint16; + Ntasks_per_node uint16; + Ntasks_per_socket uint16; + Ntasks_per_core uint16; + Ntasks_per_board uint16; + Pn_min_cpus uint16; + Pn_min_memory uint64; + Pn_min_tmp_disk uint32; + Req_switch uint32; + Std_err string; + Std_in string; + Std_out string; + Tres_req_cnt uint64; + Wait4switch uint32; + Wckey string; + X11 uint16; + X11_magic_cookie string; + X11_target string; + X11_target_port uint16; +} +func Job_descriptor_convert_c_to_go(c_struct *C.struct_job_descriptor) Job_descriptor{ + var go_struct Job_descriptor + + go_struct.Account = C.GoString(c_struct.account) + go_struct.Acctg_freq = C.GoString(c_struct.acctg_freq) + go_struct.Admin_comment = C.GoString(c_struct.admin_comment) + go_struct.Alloc_node = C.GoString(c_struct.alloc_node) + go_struct.Alloc_resp_port = uint16(c_struct.alloc_resp_port) + go_struct.Alloc_sid = uint32(c_struct.alloc_sid) + go_struct.Argc = uint32(c_struct.argc) + go_struct.Array_inx = C.GoString(c_struct.array_inx) + go_struct.Batch_features = C.GoString(c_struct.batch_features) + go_struct.Begin_time = int64(c_struct.begin_time) + go_struct.Bitflags = uint32(c_struct.bitflags) + go_struct.Burst_buffer = C.GoString(c_struct.burst_buffer) + go_struct.Ckpt_interval = uint16(c_struct.ckpt_interval) + go_struct.Ckpt_dir = C.GoString(c_struct.ckpt_dir) + go_struct.Clusters = C.GoString(c_struct.clusters) + go_struct.Cluster_features = C.GoString(c_struct.cluster_features) + go_struct.Comment = C.GoString(c_struct.comment) + go_struct.Contiguous = uint16(c_struct.contiguous) + go_struct.Core_spec = uint16(c_struct.core_spec) + go_struct.Cpu_bind = C.GoString(c_struct.cpu_bind) + go_struct.Cpu_bind_type = uint16(c_struct.cpu_bind_type) + go_struct.Cpu_freq_min = uint32(c_struct.cpu_freq_min) + go_struct.Cpu_freq_max = uint32(c_struct.cpu_freq_max) + go_struct.Cpu_freq_gov = uint32(c_struct.cpu_freq_gov) + go_struct.Cpus_per_tres = C.GoString(c_struct.cpus_per_tres) + go_struct.Deadline = int64(c_struct.deadline) + go_struct.Delay_boot = uint32(c_struct.delay_boot) + go_struct.Dependency = C.GoString(c_struct.dependency) + go_struct.End_time = int64(c_struct.end_time) + go_struct.Env_size = uint32(c_struct.env_size) + go_struct.Extra = C.GoString(c_struct.extra) + go_struct.Exc_nodes = C.GoString(c_struct.exc_nodes) + go_struct.Features = C.GoString(c_struct.features) + go_struct.Fed_siblings_active = uint64(c_struct.fed_siblings_active) + go_struct.Fed_siblings_viable = uint64(c_struct.fed_siblings_viable) + go_struct.Group_id = uint32(c_struct.group_id) + go_struct.Immediate = uint16(c_struct.immediate) + go_struct.Job_id = uint32(c_struct.job_id) + go_struct.Job_id_str = C.GoString(c_struct.job_id_str) + go_struct.Kill_on_node_fail = uint16(c_struct.kill_on_node_fail) + go_struct.Licenses = C.GoString(c_struct.licenses) + go_struct.Mail_type = uint16(c_struct.mail_type) + go_struct.Mail_user = C.GoString(c_struct.mail_user) + go_struct.Mcs_label = C.GoString(c_struct.mcs_label) + go_struct.Mem_bind = C.GoString(c_struct.mem_bind) + go_struct.Mem_bind_type = uint16(c_struct.mem_bind_type) + go_struct.Mem_per_tres = C.GoString(c_struct.mem_per_tres) + go_struct.Name = C.GoString(c_struct.name) + go_struct.Network = C.GoString(c_struct.network) + go_struct.Nice = uint32(c_struct.nice) + go_struct.Num_tasks = uint32(c_struct.num_tasks) + go_struct.Open_mode = uint8(c_struct.open_mode) + go_struct.Origin_cluster = C.GoString(c_struct.origin_cluster) + go_struct.Other_port = uint16(c_struct.other_port) + go_struct.Overcommit = uint8(c_struct.overcommit) + go_struct.Pack_job_offset = uint32(c_struct.pack_job_offset) + go_struct.Partition = C.GoString(c_struct.partition) + go_struct.Plane_size = uint16(c_struct.plane_size) + go_struct.Power_flags = uint8(c_struct.power_flags) + go_struct.Priority = uint32(c_struct.priority) + go_struct.Profile = uint32(c_struct.profile) + go_struct.Qos = C.GoString(c_struct.qos) + go_struct.Reboot = uint16(c_struct.reboot) + go_struct.Resp_host = C.GoString(c_struct.resp_host) + go_struct.Restart_cnt = uint16(c_struct.restart_cnt) + go_struct.Req_nodes = C.GoString(c_struct.req_nodes) + go_struct.Requeue = uint16(c_struct.requeue) + go_struct.Reservation = C.GoString(c_struct.reservation) + go_struct.Script = C.GoString(c_struct.script) + go_struct.Shared = uint16(c_struct.shared) + go_struct.Site_factor = uint32(c_struct.site_factor) + go_struct.Spank_job_env_size = uint32(c_struct.spank_job_env_size) + go_struct.Task_dist = uint32(c_struct.task_dist) + go_struct.Time_limit = uint32(c_struct.time_limit) + go_struct.Time_min = uint32(c_struct.time_min) + go_struct.Tres_bind = C.GoString(c_struct.tres_bind) + go_struct.Tres_freq = C.GoString(c_struct.tres_freq) + go_struct.Tres_per_job = C.GoString(c_struct.tres_per_job) + go_struct.Tres_per_node = C.GoString(c_struct.tres_per_node) + go_struct.Tres_per_socket = C.GoString(c_struct.tres_per_socket) + go_struct.Tres_per_task = C.GoString(c_struct.tres_per_task) + go_struct.User_id = uint32(c_struct.user_id) + go_struct.Wait_all_nodes = uint16(c_struct.wait_all_nodes) + go_struct.Warn_flags = uint16(c_struct.warn_flags) + go_struct.Warn_signal = uint16(c_struct.warn_signal) + go_struct.Warn_time = uint16(c_struct.warn_time) + go_struct.Work_dir = C.GoString(c_struct.work_dir) + go_struct.Cpus_per_task = uint16(c_struct.cpus_per_task) + go_struct.Min_cpus = uint32(c_struct.min_cpus) + go_struct.Max_cpus = uint32(c_struct.max_cpus) + go_struct.Min_nodes = uint32(c_struct.min_nodes) + go_struct.Max_nodes = uint32(c_struct.max_nodes) + go_struct.Boards_per_node = uint16(c_struct.boards_per_node) + go_struct.Sockets_per_board = uint16(c_struct.sockets_per_board) + go_struct.Sockets_per_node = uint16(c_struct.sockets_per_node) + go_struct.Cores_per_socket = uint16(c_struct.cores_per_socket) + go_struct.Threads_per_core = uint16(c_struct.threads_per_core) + go_struct.Ntasks_per_node = uint16(c_struct.ntasks_per_node) + go_struct.Ntasks_per_socket = uint16(c_struct.ntasks_per_socket) + go_struct.Ntasks_per_core = uint16(c_struct.ntasks_per_core) + go_struct.Ntasks_per_board = uint16(c_struct.ntasks_per_board) + go_struct.Pn_min_cpus = uint16(c_struct.pn_min_cpus) + go_struct.Pn_min_memory = uint64(c_struct.pn_min_memory) + go_struct.Pn_min_tmp_disk = uint32(c_struct.pn_min_tmp_disk) + go_struct.Req_switch = uint32(c_struct.req_switch) + go_struct.Std_err = C.GoString(c_struct.std_err) + go_struct.Std_in = C.GoString(c_struct.std_in) + go_struct.Std_out = C.GoString(c_struct.std_out) + go_struct.Wait4switch = uint32(c_struct.wait4switch) + go_struct.Wckey = C.GoString(c_struct.wckey) + go_struct.X11 = uint16(c_struct.x11) + go_struct.X11_magic_cookie = C.GoString(c_struct.x11_magic_cookie) + go_struct.X11_target = C.GoString(c_struct.x11_target) + go_struct.X11_target_port = uint16(c_struct.x11_target_port) + return go_struct + } + func Print_Job_descriptor(go_struct Job_descriptor){ + fmt.Printf("%s:\t %s\n","account", go_struct.Account) + fmt.Printf("%s:\t %s\n","acctg freq", go_struct.Acctg_freq) + fmt.Printf("%s:\t %s\n","admin comment", go_struct.Admin_comment) + fmt.Printf("%s:\t %s\n","alloc node", go_struct.Alloc_node) + fmt.Printf("%s:\t %d\n","alloc resp port", go_struct.Alloc_resp_port) + fmt.Printf("%s:\t %d\n","alloc sid", go_struct.Alloc_sid) + fmt.Printf("%s:\t %d\n","argc", go_struct.Argc) + fmt.Printf("%s:\t %s\n","array inx", go_struct.Array_inx) + fmt.Printf("%s:\t %s\n","batch features", go_struct.Batch_features) + fmt.Printf("%s:\t %d\n","begin time", go_struct.Begin_time) + fmt.Printf("%s:\t %d\n","bitflags", go_struct.Bitflags) + fmt.Printf("%s:\t %s\n","burst buffer", go_struct.Burst_buffer) + fmt.Printf("%s:\t %d\n","ckpt interval", go_struct.Ckpt_interval) + fmt.Printf("%s:\t %s\n","ckpt dir", go_struct.Ckpt_dir) + fmt.Printf("%s:\t %s\n","clusters", go_struct.Clusters) + fmt.Printf("%s:\t %s\n","cluster features", go_struct.Cluster_features) + fmt.Printf("%s:\t %s\n","comment", go_struct.Comment) + fmt.Printf("%s:\t %d\n","contiguous", go_struct.Contiguous) + fmt.Printf("%s:\t %d\n","core spec", go_struct.Core_spec) + fmt.Printf("%s:\t %s\n","cpu bind", go_struct.Cpu_bind) + fmt.Printf("%s:\t %d\n","cpu bind type", go_struct.Cpu_bind_type) + fmt.Printf("%s:\t %d\n","cpu freq min", go_struct.Cpu_freq_min) + fmt.Printf("%s:\t %d\n","cpu freq max", go_struct.Cpu_freq_max) + fmt.Printf("%s:\t %d\n","cpu freq gov", go_struct.Cpu_freq_gov) + fmt.Printf("%s:\t %s\n","cpus per tres", go_struct.Cpus_per_tres) + fmt.Printf("%s:\t %d\n","deadline", go_struct.Deadline) + fmt.Printf("%s:\t %d\n","delay boot", go_struct.Delay_boot) + fmt.Printf("%s:\t %s\n","dependency", go_struct.Dependency) + fmt.Printf("%s:\t %d\n","end time", go_struct.End_time) + fmt.Printf("%s:\t %d\n","env size", go_struct.Env_size) + fmt.Printf("%s:\t %s\n","extra", go_struct.Extra) + fmt.Printf("%s:\t %s\n","exc nodes", go_struct.Exc_nodes) + fmt.Printf("%s:\t %s\n","features", go_struct.Features) + fmt.Printf("%s:\t %d\n","fed siblings active", go_struct.Fed_siblings_active) + fmt.Printf("%s:\t %d\n","fed siblings viable", go_struct.Fed_siblings_viable) + fmt.Printf("%s:\t %d\n","group id", go_struct.Group_id) + fmt.Printf("%s:\t %d\n","immediate", go_struct.Immediate) + fmt.Printf("%s:\t %d\n","job id", go_struct.Job_id) + fmt.Printf("%s:\t %s\n","job id str", go_struct.Job_id_str) + fmt.Printf("%s:\t %d\n","kill on node fail", go_struct.Kill_on_node_fail) + fmt.Printf("%s:\t %s\n","licenses", go_struct.Licenses) + fmt.Printf("%s:\t %d\n","mail type", go_struct.Mail_type) + fmt.Printf("%s:\t %s\n","mail user", go_struct.Mail_user) + fmt.Printf("%s:\t %s\n","mcs label", go_struct.Mcs_label) + fmt.Printf("%s:\t %s\n","mem bind", go_struct.Mem_bind) + fmt.Printf("%s:\t %d\n","mem bind type", go_struct.Mem_bind_type) + fmt.Printf("%s:\t %s\n","mem per tres", go_struct.Mem_per_tres) + fmt.Printf("%s:\t %s\n","name", go_struct.Name) + fmt.Printf("%s:\t %s\n","network", go_struct.Network) + fmt.Printf("%s:\t %d\n","nice", go_struct.Nice) + fmt.Printf("%s:\t %d\n","num tasks", go_struct.Num_tasks) + fmt.Printf("%s:\t %d\n","open mode", go_struct.Open_mode) + fmt.Printf("%s:\t %s\n","origin cluster", go_struct.Origin_cluster) + fmt.Printf("%s:\t %d\n","other port", go_struct.Other_port) + fmt.Printf("%s:\t %d\n","overcommit", go_struct.Overcommit) + fmt.Printf("%s:\t %d\n","pack job offset", go_struct.Pack_job_offset) + fmt.Printf("%s:\t %s\n","partition", go_struct.Partition) + fmt.Printf("%s:\t %d\n","plane size", go_struct.Plane_size) + fmt.Printf("%s:\t %d\n","power flags", go_struct.Power_flags) + fmt.Printf("%s:\t %d\n","priority", go_struct.Priority) + fmt.Printf("%s:\t %d\n","profile", go_struct.Profile) + fmt.Printf("%s:\t %s\n","qos", go_struct.Qos) + fmt.Printf("%s:\t %d\n","reboot", go_struct.Reboot) + fmt.Printf("%s:\t %s\n","resp host", go_struct.Resp_host) + fmt.Printf("%s:\t %d\n","restart cnt", go_struct.Restart_cnt) + fmt.Printf("%s:\t %s\n","req nodes", go_struct.Req_nodes) + fmt.Printf("%s:\t %d\n","requeue", go_struct.Requeue) + fmt.Printf("%s:\t %s\n","reservation", go_struct.Reservation) + fmt.Printf("%s:\t %s\n","script", go_struct.Script) + fmt.Printf("%s:\t %d\n","shared", go_struct.Shared) + fmt.Printf("%s:\t %d\n","site factor", go_struct.Site_factor) + fmt.Printf("%s:\t %d\n","spank job env size", go_struct.Spank_job_env_size) + fmt.Printf("%s:\t %d\n","task dist", go_struct.Task_dist) + fmt.Printf("%s:\t %d\n","time limit", go_struct.Time_limit) + fmt.Printf("%s:\t %d\n","time min", go_struct.Time_min) + fmt.Printf("%s:\t %s\n","tres bind", go_struct.Tres_bind) + fmt.Printf("%s:\t %s\n","tres freq", go_struct.Tres_freq) + fmt.Printf("%s:\t %s\n","tres per job", go_struct.Tres_per_job) + fmt.Printf("%s:\t %s\n","tres per node", go_struct.Tres_per_node) + fmt.Printf("%s:\t %s\n","tres per socket", go_struct.Tres_per_socket) + fmt.Printf("%s:\t %s\n","tres per task", go_struct.Tres_per_task) + fmt.Printf("%s:\t %d\n","user id", go_struct.User_id) + fmt.Printf("%s:\t %d\n","wait all nodes", go_struct.Wait_all_nodes) + fmt.Printf("%s:\t %d\n","warn flags", go_struct.Warn_flags) + fmt.Printf("%s:\t %d\n","warn signal", go_struct.Warn_signal) + fmt.Printf("%s:\t %d\n","warn time", go_struct.Warn_time) + fmt.Printf("%s:\t %s\n","work dir", go_struct.Work_dir) + fmt.Printf("%s:\t %d\n","cpus per task", go_struct.Cpus_per_task) + fmt.Printf("%s:\t %d\n","min cpus", go_struct.Min_cpus) + fmt.Printf("%s:\t %d\n","max cpus", go_struct.Max_cpus) + fmt.Printf("%s:\t %d\n","min nodes", go_struct.Min_nodes) + fmt.Printf("%s:\t %d\n","max nodes", go_struct.Max_nodes) + fmt.Printf("%s:\t %d\n","boards per node", go_struct.Boards_per_node) + fmt.Printf("%s:\t %d\n","sockets per board", go_struct.Sockets_per_board) + fmt.Printf("%s:\t %d\n","sockets per node", go_struct.Sockets_per_node) + fmt.Printf("%s:\t %d\n","cores per socket", go_struct.Cores_per_socket) + fmt.Printf("%s:\t %d\n","threads per core", go_struct.Threads_per_core) + fmt.Printf("%s:\t %d\n","ntasks per node", go_struct.Ntasks_per_node) + fmt.Printf("%s:\t %d\n","ntasks per socket", go_struct.Ntasks_per_socket) + fmt.Printf("%s:\t %d\n","ntasks per core", go_struct.Ntasks_per_core) + fmt.Printf("%s:\t %d\n","ntasks per board", go_struct.Ntasks_per_board) + fmt.Printf("%s:\t %d\n","pn min cpus", go_struct.Pn_min_cpus) + fmt.Printf("%s:\t %d\n","pn min memory", go_struct.Pn_min_memory) + fmt.Printf("%s:\t %d\n","pn min tmp disk", go_struct.Pn_min_tmp_disk) + fmt.Printf("%s:\t %d\n","req switch", go_struct.Req_switch) + fmt.Printf("%s:\t %s\n","std err", go_struct.Std_err) + fmt.Printf("%s:\t %s\n","std in", go_struct.Std_in) + fmt.Printf("%s:\t %s\n","std out", go_struct.Std_out) + fmt.Printf("%s:\t %d\n","tres req cnt", go_struct.Tres_req_cnt) + fmt.Printf("%s:\t %d\n","wait4switch", go_struct.Wait4switch) + fmt.Printf("%s:\t %s\n","wckey", go_struct.Wckey) + fmt.Printf("%s:\t %d\n","x11", go_struct.X11) + fmt.Printf("%s:\t %s\n","x11 magic cookie", go_struct.X11_magic_cookie) + fmt.Printf("%s:\t %s\n","x11 target", go_struct.X11_target) + fmt.Printf("%s:\t %d\n","x11 target port", go_struct.X11_target_port) +} + +type Update_job_options struct { + Partition string; + Qos string; + Num_tasks uint32; + Ntasks_per_node uint16; + Ntasks_per_socket uint16; + Ntasks_per_core uint16; + Min_nodes uint32; + Max_nodes uint32; +} + +type Submit_response_msg struct { + Job_id uint32; + Step_id uint32; + Error_code uint32; + Job_submit_user_msg string; +} +func submit_response_msg_convert_c_to_go(c_struct *C.struct_submit_response_msg) Submit_response_msg{ + var go_struct Submit_response_msg + + go_struct.Job_id = uint32(c_struct.job_id) + go_struct.Step_id = uint32(c_struct.step_id) + go_struct.Error_code = uint32(c_struct.error_code) + go_struct.Job_submit_user_msg = C.GoString(c_struct.job_submit_user_msg) + return go_struct + } + func Print_submit_response_msg(go_struct Submit_response_msg){ + fmt.Printf("%s:\t %d\n","job id", go_struct.Job_id) + fmt.Printf("%s:\t %d\n","step id", go_struct.Step_id) + fmt.Printf("%s:\t %d\n","error code", go_struct.Error_code) + fmt.Printf("%s:\t %s\n","job submit user msg", go_struct.Job_submit_user_msg) +} +/*This is an ugly function, since we start to convert everyting back*/ + +func Submit_job (go_struct *Job_descriptor) Submit_response_msg { + + var c_struct C.struct_job_descriptor + + C.slurm_init_job_desc_msg(&c_struct) + if go_struct.Account!= "" { + account_s :=C.CString(go_struct.Account) + defer C.free(unsafe.Pointer(account_s)) + c_struct.account=account_s + } + if go_struct.Acctg_freq!= "" { + acctg_freq_s :=C.CString(go_struct.Acctg_freq) + defer C.free(unsafe.Pointer(acctg_freq_s)) + c_struct.acctg_freq=acctg_freq_s + } + if go_struct.Admin_comment!= "" { + admin_comment_s :=C.CString(go_struct.Admin_comment) + defer C.free(unsafe.Pointer(admin_comment_s)) + c_struct.admin_comment=admin_comment_s + } + if go_struct.Alloc_node!= "" { + alloc_node_s :=C.CString(go_struct.Alloc_node) + defer C.free(unsafe.Pointer(alloc_node_s)) + c_struct.alloc_node=alloc_node_s + } + if go_struct.Alloc_resp_port!= 0 { + c_struct.alloc_resp_port = C.uint16_t(go_struct.Alloc_resp_port) + } + if go_struct.Alloc_sid!= 0 { + c_struct.alloc_sid = C.uint32_t(go_struct.Alloc_sid) + } + if len(go_struct.Argv) > 0 { + c_struct.argc = C.uint32_t(len(go_struct.Argv)) + cArray := C.malloc(C.size_t(C.size_t(len(go_struct.Argv))*C.size_t(unsafe.Sizeof(uintptr(0))))) + a := (*[1<<30 - 1]*C.char)(cArray ) + for i := 0; i < len(go_struct.Argv); i++ { + a[i]= C.CString(go_struct.Argv[i]) + } + c_struct.argv=(**C.char)(cArray) + fmt.Printf("test\n") + } + + if go_struct.Array_inx!= "" { + array_inx_s :=C.CString(go_struct.Array_inx) + defer C.free(unsafe.Pointer(array_inx_s)) + c_struct.array_inx=array_inx_s + } + if go_struct.Batch_features!= "" { + batch_features_s :=C.CString(go_struct.Batch_features) + defer C.free(unsafe.Pointer(batch_features_s)) + c_struct.batch_features=batch_features_s + } + if go_struct.Begin_time!= 0 { + c_struct.begin_time = C.int64_t(go_struct.Begin_time) + } + if go_struct.Bitflags!= 0 { + c_struct.bitflags = C.uint32_t(go_struct.Bitflags) + } + if go_struct.Burst_buffer!= "" { + burst_buffer_s :=C.CString(go_struct.Burst_buffer) + defer C.free(unsafe.Pointer(burst_buffer_s)) + c_struct.burst_buffer=burst_buffer_s + } + if go_struct.Ckpt_interval!= 0 { + c_struct.ckpt_interval = C.uint16_t(go_struct.Ckpt_interval) + } + if go_struct.Ckpt_dir!= "" { + ckpt_dir_s :=C.CString(go_struct.Ckpt_dir) + defer C.free(unsafe.Pointer(ckpt_dir_s)) + c_struct.ckpt_dir=ckpt_dir_s + } + if go_struct.Clusters!= "" { + clusters_s :=C.CString(go_struct.Clusters) + defer C.free(unsafe.Pointer(clusters_s)) + c_struct.clusters=clusters_s + } + if go_struct.Cluster_features!= "" { + cluster_features_s := C.CString(go_struct.Cluster_features) + defer C.free(unsafe.Pointer(cluster_features_s)) + c_struct.cluster_features=cluster_features_s + } + if go_struct.Comment!= "" { + comment_s :=C.CString(go_struct.Comment) + defer C.free(unsafe.Pointer(comment_s)) + c_struct.comment=comment_s + } + if go_struct.Contiguous!= 0 { + c_struct.contiguous = C.uint16_t(go_struct.Contiguous) + } + if go_struct.Core_spec!= 0 { + c_struct.core_spec = C.uint16_t(go_struct.Core_spec) + } + if go_struct.Cpu_bind!= "" { + cpu_bind_s :=C.CString(go_struct.Cpu_bind) + defer C.free(unsafe.Pointer(cpu_bind_s)) + c_struct.cpu_bind=cpu_bind_s + } + if go_struct.Cpu_bind_type!= 0 { + c_struct.cpu_bind_type = C.uint16_t(go_struct.Cpu_bind_type) + } + if go_struct.Cpu_freq_min!= 0 { + c_struct.cpu_freq_min = C.uint32_t(go_struct.Cpu_freq_min) + } + if go_struct.Cpu_freq_max!= 0 { + c_struct.cpu_freq_max = C.uint32_t(go_struct.Cpu_freq_max) + } + if go_struct.Cpu_freq_gov!= 0 { + c_struct.cpu_freq_gov = C.uint32_t(go_struct.Cpu_freq_gov) + } + if go_struct.Cpus_per_tres!= "" { + cpus_per_tres_s :=C.CString(go_struct.Cpus_per_tres) + defer C.free(unsafe.Pointer(cpus_per_tres_s)) + c_struct.cpus_per_tres=cpus_per_tres_s + } + if go_struct.Deadline!= 0 { + c_struct.deadline = C.int64_t(go_struct.Deadline) + } + if go_struct.Delay_boot!= 0 { + c_struct.delay_boot = C.uint32_t(go_struct.Delay_boot) + } + if go_struct.Dependency!= "" { + dependency_s :=C.CString(go_struct.Dependency) + defer C.free(unsafe.Pointer(dependency_s)) + c_struct.dependency=dependency_s + } + if go_struct.End_time!= 0 { + c_struct.end_time = C.int64_t(go_struct.End_time) + } + if len(go_struct.Environment) > 0 { + c_struct.env_size = C.uint32_t(len(go_struct.Environment)) + cArray := C.malloc(C.size_t(C.size_t(len(go_struct.Environment))*C.size_t(unsafe.Sizeof(uintptr(0))))) + a := (*[1<<30 - 1]*C.char)(cArray ) + for i := 0; i < len(go_struct.Environment); i++ { + a[i]= C.CString(go_struct.Environment[i]) + defer C.free(unsafe.Pointer(a[i])) + } + c_struct.environment=(**C.char)(cArray) + } else { + c_struct.env_size = 1 + cArray := C.malloc(C.size_t(C.size_t(1)*C.size_t(unsafe.Sizeof(uintptr(0))))) + a := (*[1<<30 - 1]*C.char)(cArray ) + a[0]= C.CString("SLURM_GO_JOB=TRUE") + defer C.free(unsafe.Pointer(a[0])) + c_struct.environment=(**C.char)(cArray) + + } + if go_struct.Extra!= "" { + extra_s :=C.CString(go_struct.Extra) + defer C.free(unsafe.Pointer(extra_s)) + c_struct.extra=extra_s + } + if go_struct.Exc_nodes!= "" { + exc_nodes_s :=C.CString(go_struct.Exc_nodes) + defer C.free(unsafe.Pointer(exc_nodes_s)) + c_struct.exc_nodes=exc_nodes_s + } + if go_struct.Features!= "" { + features_s :=C.CString(go_struct.Features) + defer C.free(unsafe.Pointer(features_s)) + c_struct.features=features_s + } + if go_struct.Fed_siblings_active!= 0 { + c_struct.fed_siblings_active = C.uint64_t(go_struct.Fed_siblings_active) + } + if go_struct.Fed_siblings_viable!= 0 { + c_struct.fed_siblings_viable = C.uint64_t(go_struct.Fed_siblings_viable) + } + if go_struct.Group_id!= 0 { + c_struct.group_id = C.uint32_t(go_struct.Group_id) + } + if go_struct.Immediate!= 0 { + c_struct.immediate = C.uint16_t(go_struct.Immediate) + } + if go_struct.Job_id!= 0 { + c_struct.job_id = C.uint32_t(go_struct.Job_id) + } + if go_struct.Job_id_str!= "" { + job_id_str_s :=C.CString(go_struct.Job_id_str) + defer C.free(unsafe.Pointer(job_id_str_s)) + c_struct.job_id_str=job_id_str_s + } + if go_struct.Kill_on_node_fail!= 0 { + c_struct.kill_on_node_fail = C.uint16_t(go_struct.Kill_on_node_fail) + } + if go_struct.Licenses!= "" { + licenses_s :=C.CString(go_struct.Licenses) + defer C.free(unsafe.Pointer(licenses_s)) + c_struct.licenses=licenses_s + } + if go_struct.Mail_type!= 0 { + c_struct.mail_type = C.uint16_t(go_struct.Mail_type) + } + if go_struct.Mail_user!= "" { + mail_user_s :=C.CString(go_struct.Mail_user) + defer C.free(unsafe.Pointer(mail_user_s)) + c_struct.mail_user=mail_user_s + } + if go_struct.Mcs_label!= "" { + mcs_label_s :=C.CString(go_struct.Mcs_label) + defer C.free(unsafe.Pointer(mcs_label_s)) + c_struct.mcs_label=mcs_label_s + } + if go_struct.Mem_bind!= "" { + mem_bind_s :=C.CString(go_struct.Mem_bind) + defer C.free(unsafe.Pointer(mem_bind_s)) + c_struct.mem_bind=mem_bind_s + } + if go_struct.Mem_bind_type!= 0 { + c_struct.mem_bind_type = C.uint16_t(go_struct.Mem_bind_type) + } + if go_struct.Mem_per_tres!= "" { + mem_per_tres_s :=C.CString(go_struct.Mem_per_tres) + defer C.free(unsafe.Pointer(mem_per_tres_s)) + c_struct.mem_per_tres=mem_per_tres_s + } + if go_struct.Name!= "" { + name_s :=C.CString(go_struct.Name) + defer C.free(unsafe.Pointer(name_s)) + c_struct.name=name_s + } + if go_struct.Network!= "" { + network_s :=C.CString(go_struct.Network) + defer C.free(unsafe.Pointer(network_s)) + c_struct.network=network_s + } + if go_struct.Nice!= 0 { + c_struct.nice = C.uint32_t(go_struct.Nice) + } + if go_struct.Num_tasks!= 0 { + c_struct.num_tasks = C.uint32_t(go_struct.Num_tasks) + } + if go_struct.Open_mode!= 0 { + c_struct.open_mode = C.uint8_t(go_struct.Open_mode) + } + if go_struct.Origin_cluster!= "" { + origin_cluster_s :=C.CString(go_struct.Origin_cluster) + defer C.free(unsafe.Pointer(origin_cluster_s)) + c_struct.origin_cluster=origin_cluster_s + } + if go_struct.Other_port!= 0 { + c_struct.other_port = C.uint16_t(go_struct.Other_port) + } + if go_struct.Overcommit!= 0 { + c_struct.overcommit = C.uint8_t(go_struct.Overcommit) + } + if go_struct.Pack_job_offset!= 0 { + c_struct.pack_job_offset = C.uint32_t(go_struct.Pack_job_offset) + } + if go_struct.Partition!= "" { + partition_s :=C.CString(go_struct.Partition) + defer C.free(unsafe.Pointer(partition_s)) + c_struct.partition=partition_s + } + if go_struct.Plane_size!= 0 { + c_struct.plane_size = C.uint16_t(go_struct.Plane_size) + } + if go_struct.Power_flags!= 0 { + c_struct.power_flags = C.uint8_t(go_struct.Power_flags) + } + if go_struct.Priority!= 0 { + c_struct.priority = C.uint32_t(go_struct.Priority) + } + if go_struct.Profile!= 0 { + c_struct.profile = C.uint32_t(go_struct.Profile) + } + if go_struct.Qos!= "" { + qos_s :=C.CString(go_struct.Qos) + defer C.free(unsafe.Pointer(qos_s)) + c_struct.qos=qos_s + } + if go_struct.Reboot!= 0 { + c_struct.reboot = C.uint16_t(go_struct.Reboot) + } + if go_struct.Resp_host!= "" { + resp_host_s :=C.CString(go_struct.Resp_host) + defer C.free(unsafe.Pointer(resp_host_s)) + c_struct.resp_host=resp_host_s + } + if go_struct.Restart_cnt!= 0 { + c_struct.restart_cnt = C.uint16_t(go_struct.Restart_cnt) + } + if go_struct.Req_nodes!= "" { + req_nodes_s :=C.CString(go_struct.Req_nodes) + defer C.free(unsafe.Pointer(req_nodes_s)) + c_struct.req_nodes=req_nodes_s + } + if go_struct.Requeue!= 0 { + c_struct.requeue = C.uint16_t(go_struct.Requeue) + } + if go_struct.Reservation!= "" { + reservation_s :=C.CString(go_struct.Reservation) + defer C.free(unsafe.Pointer(reservation_s)) + c_struct.reservation=reservation_s + } + if go_struct.Script!= "" { + script_s :=C.CString(go_struct.Script) + defer C.free(unsafe.Pointer(script_s)) + c_struct.script=script_s + } + if go_struct.Shared!= 0 { + c_struct.shared = C.uint16_t(go_struct.Shared) + } + if go_struct.Site_factor!= 0 { + c_struct.site_factor = C.uint32_t(go_struct.Site_factor) + } + if go_struct.Spank_job_env_size!= 0 { + c_struct.spank_job_env_size = C.uint32_t(go_struct.Spank_job_env_size) + } + if go_struct.Task_dist!= 0 { + c_struct.task_dist = C.uint32_t(go_struct.Task_dist) + } + if go_struct.Time_limit!= 0 { + c_struct.time_limit = C.uint32_t(go_struct.Time_limit) + } + if go_struct.Time_min!= 0 { + c_struct.time_min = C.uint32_t(go_struct.Time_min) + } + if go_struct.Tres_bind!= "" { + tres_bind_s :=C.CString(go_struct.Tres_bind) + defer C.free(unsafe.Pointer(tres_bind_s)) + c_struct.tres_bind=tres_bind_s + } + if go_struct.Tres_freq!= "" { + tres_freq_s :=C.CString(go_struct.Tres_freq) + defer C.free(unsafe.Pointer(tres_freq_s)) + c_struct.tres_freq=tres_freq_s + } + if go_struct.Tres_per_job!= "" { + tres_per_job_s :=C.CString(go_struct.Tres_per_job) + defer C.free(unsafe.Pointer(tres_per_job_s)) + c_struct.tres_per_job=tres_per_job_s + } + if go_struct.Tres_per_node!= "" { + tres_per_node_s :=C.CString(go_struct.Tres_per_node) + defer C.free(unsafe.Pointer(tres_per_node_s)) + c_struct.tres_per_node=tres_per_node_s + } + if go_struct.Tres_per_socket!= "" { + tres_per_socket_s :=C.CString(go_struct.Tres_per_socket) + defer C.free(unsafe.Pointer(tres_per_socket_s)) + c_struct.tres_per_socket=tres_per_socket_s + } + if go_struct.Tres_per_task!= "" { + tres_per_task_s :=C.CString(go_struct.Tres_per_task) + defer C.free(unsafe.Pointer(tres_per_task_s)) + c_struct.tres_per_task=tres_per_task_s + } + if go_struct.User_id!= 0 { + c_struct.user_id = C.uint32_t(go_struct.User_id) + } + if go_struct.Wait_all_nodes!= 0 { + c_struct.wait_all_nodes = C.uint16_t(go_struct.Wait_all_nodes) + } + if go_struct.Warn_flags!= 0 { + c_struct.warn_flags = C.uint16_t(go_struct.Warn_flags) + } + if go_struct.Warn_signal!= 0 { + c_struct.warn_signal = C.uint16_t(go_struct.Warn_signal) + } + if go_struct.Warn_time!= 0 { + c_struct.warn_time = C.uint16_t(go_struct.Warn_time) + } + if go_struct.Work_dir!= "" { + work_dir_s :=C.CString(go_struct.Work_dir) + defer C.free(unsafe.Pointer(work_dir_s)) + c_struct.work_dir=work_dir_s + } + if go_struct.Cpus_per_task!= 0 { + c_struct.cpus_per_task = C.uint16_t(go_struct.Cpus_per_task) + } + if go_struct.Min_cpus!= 0 { + c_struct.min_cpus = C.uint32_t(go_struct.Min_cpus) + } + if go_struct.Max_cpus!= 0 { + c_struct.max_cpus = C.uint32_t(go_struct.Max_cpus) + } + if go_struct.Min_nodes!= 0 { + c_struct.min_nodes = C.uint32_t(go_struct.Min_nodes) + } + if go_struct.Max_nodes!= 0 { + c_struct.max_nodes = C.uint32_t(go_struct.Max_nodes) + } + if go_struct.Boards_per_node!= 0 { + c_struct.boards_per_node = C.uint16_t(go_struct.Boards_per_node) + } + if go_struct.Sockets_per_board!= 0 { + c_struct.sockets_per_board = C.uint16_t(go_struct.Sockets_per_board) + } + if go_struct.Sockets_per_node!= 0 { + c_struct.sockets_per_node = C.uint16_t(go_struct.Sockets_per_node) + } + if go_struct.Cores_per_socket!= 0 { + c_struct.cores_per_socket = C.uint16_t(go_struct.Cores_per_socket) + } + if go_struct.Threads_per_core!= 0 { + c_struct.threads_per_core = C.uint16_t(go_struct.Threads_per_core) + } + if go_struct.Ntasks_per_node!= 0 { + c_struct.ntasks_per_node = C.uint16_t(go_struct.Ntasks_per_node) + } + if go_struct.Ntasks_per_socket!= 0 { + c_struct.ntasks_per_socket = C.uint16_t(go_struct.Ntasks_per_socket) + } + if go_struct.Ntasks_per_core!= 0 { + c_struct.ntasks_per_core = C.uint16_t(go_struct.Ntasks_per_core) + } + if go_struct.Ntasks_per_board!= 0 { + c_struct.ntasks_per_board = C.uint16_t(go_struct.Ntasks_per_board) + } + if go_struct.Pn_min_cpus!= 0 { + c_struct.pn_min_cpus = C.uint16_t(go_struct.Pn_min_cpus) + } + if go_struct.Pn_min_memory!= 0 { + c_struct.pn_min_memory = C.uint64_t(go_struct.Pn_min_memory) + } + if go_struct.Pn_min_tmp_disk!= 0 { + c_struct.pn_min_tmp_disk = C.uint32_t(go_struct.Pn_min_tmp_disk) + } + if go_struct.Req_switch!= 0 { + c_struct.req_switch = C.uint32_t(go_struct.Req_switch) + } + if go_struct.Std_err!= "" { + std_err_s :=C.CString(go_struct.Std_err) + defer C.free(unsafe.Pointer(std_err_s)) + c_struct.std_err=std_err_s + } + if go_struct.Std_in!= "" { + std_in_s :=C.CString(go_struct.Std_in) + defer C.free(unsafe.Pointer(std_in_s)) + c_struct.std_in=std_in_s + } + if go_struct.Std_out!= "" { + std_out_s :=C.CString(go_struct.Std_out) + defer C.free(unsafe.Pointer(std_out_s)) + c_struct.std_out=std_out_s + } + + if go_struct.Wait4switch!= 0 { + c_struct.wait4switch = C.uint32_t(go_struct.Wait4switch) + } + if go_struct.Wckey!= "" { + wckey_s :=C.CString(go_struct.Wckey) + defer C.free(unsafe.Pointer(wckey_s)) + c_struct.wckey=wckey_s + } + if go_struct.X11!= 0 { + c_struct.x11 = C.uint16_t(go_struct.X11) + } + if go_struct.X11_magic_cookie!= "" { + x11_magic_cookie_s :=C.CString(go_struct.X11_magic_cookie) + defer C.free(unsafe.Pointer(x11_magic_cookie_s)) + c_struct.x11_magic_cookie=x11_magic_cookie_s + } + if go_struct.X11_target!= "" { + x11_target_s :=C.CString(go_struct.X11_target) + defer C.free(unsafe.Pointer(x11_target_s)) + c_struct.x11_target=x11_target_s + } + if go_struct.X11_target_port!= 0 { + c_struct.x11_target_port = C.uint16_t(go_struct.X11_target_port) + } + + c_msg := C.submit_job(&c_struct) + + defer C.free_submit_response_msg(c_msg) + if c_msg == nil{ + go_msg := Submit_response_msg{} + go_msg.Job_id = 1<<31-1 + go_msg.Error_code = uint32(C.slurm_get_errno()) + return go_msg + } + go_msg := submit_response_msg_convert_c_to_go(c_msg) + + return go_msg + +} + + +func Update_job (update_info Update_job_options, JobId uint32 ) uint32 { + + var c_struct C.struct_job_descriptor + C.slurm_init_job_desc_msg(&c_struct) + if update_info.Partition !="" { + partition_s := C.CString(update_info.Partition) + defer C.free(unsafe.Pointer(partition_s)) + c_struct.partition = partition_s + } + if update_info.Qos != "" { + qos_s := C.CString(update_info.Qos) + defer C.free(unsafe.Pointer(qos_s)) + c_struct.qos = qos_s + } + if update_info.Num_tasks != 0 { + c_struct.num_tasks = C.uint32_t(update_info.Num_tasks) + } + if update_info.Ntasks_per_core != 0 { + c_struct.ntasks_per_core = C.uint16_t(update_info.Ntasks_per_core) + } + + if update_info.Ntasks_per_node != 0 { + c_struct.ntasks_per_node = C.uint16_t(update_info.Ntasks_per_node) + } + if update_info.Ntasks_per_socket != 0 { + c_struct.ntasks_per_socket = C.uint16_t(update_info.Ntasks_per_socket) + } + + if update_info.Max_nodes != 0 { + c_struct.max_nodes = C.uint32_t(update_info.Max_nodes) + } + if update_info.Min_nodes != 0 { + c_struct.min_nodes = C.uint32_t(update_info.Min_nodes) + } + + + job_list := job_info.Get_job(uint32(JobId)) + + if job_list.Error_code != 0 { + return uint32(job_list.Error_code) + } + + job := job_list.Job_list[0] + if job.Job_state != C.JOB_PENDING { + return uint32(C.ESLURM_JOB_NOT_PENDING) + } + c_struct.job_id = C.uint32_t(JobId) + + err := C.update_job(&c_struct) + + return uint32(err) + +} diff --git a/examples/ali.go b/examples/pod/ali.go similarity index 100% rename from examples/ali.go rename to examples/pod/ali.go diff --git a/examples/huawei.go b/examples/pod/huawei.go similarity index 100% rename from examples/huawei.go rename to examples/pod/huawei.go diff --git a/examples/pcm_ali.go b/examples/pod/pcm_ali.go similarity index 100% rename from examples/pcm_ali.go rename to examples/pod/pcm_ali.go diff --git a/examples/pcm_huawei.go b/examples/pod/pcm_huawei.go similarity index 100% rename from examples/pcm_huawei.go rename to examples/pod/pcm_huawei.go diff --git a/examples/pcm_tencent.go b/examples/pod/pcm_tencent.go similarity index 100% rename from examples/pcm_tencent.go rename to examples/pod/pcm_tencent.go diff --git a/examples/tencent.go b/examples/pod/tencent.go similarity index 100% rename from examples/tencent.go rename to examples/pod/tencent.go diff --git a/examples/slurm/extra/cancel_job.go b/examples/slurm/extra/cancel_job.go new file mode 100644 index 00000000..69abd335 --- /dev/null +++ b/examples/slurm/extra/cancel_job.go @@ -0,0 +1,20 @@ +package main + +import "slurm/extra" +import "fmt" +import "os" +import "strconv" +func main(){ + + if len(os.Args)<2 { + fmt.Printf("Please specify Job ID\n") + return + } + id,_ := strconv.Atoi(os.Args[1]) + fmt.Printf("try to cancel %d\n", id) + err:= extra.Cancel_job(uint32( id)) + if(err!= nil){ + fmt.Printf(err.Error()) + + } +} diff --git a/examples/slurm/extra/get_job_account_info.go b/examples/slurm/extra/get_job_account_info.go new file mode 100644 index 00000000..fc70ddf4 --- /dev/null +++ b/examples/slurm/extra/get_job_account_info.go @@ -0,0 +1,25 @@ +package main + +import "slurm/extra" +import "fmt" +import "os" +import "strconv" +func main(){ + + if len(os.Args)<2 { + fmt.Printf("Please specify Job ID\n") + return + } + id,_ := strconv.Atoi(os.Args[1]) + + jobs, err := extra. Get_job_info_accounting(uint32( id)) + if err!= nil { + fmt.Printf(err.Error()) + return + } + fmt.Printf("JobId\tuser\taccount\tstate\t\tJobName\n") + for i := range(jobs) { + fmt.Printf("%d\t%s\t%s\t%s\t%s\n", jobs[i].JobId, jobs[i].User, jobs[i].Account, jobs[i].State, jobs[i].JobName) + + } +} diff --git a/examples/slurm/get_all_jobs.go b/examples/slurm/get_all_jobs.go new file mode 100644 index 00000000..37c901a7 --- /dev/null +++ b/examples/slurm/get_all_jobs.go @@ -0,0 +1,19 @@ +package main + +import "slurm/jobinfo" +import "fmt" + +func main(){ + job_list := job_info.Get_all_jobs() + fmt.Printf("Found %d jobs \n", job_list.Record_count) +/* a little bit nicer */ + fmt.Printf("Id\tName\t\tPartion\tUser\tRuntime\tStatus\t\t(Reason)\tNodes\tPriority\n") + fmt.Printf("________________________________________________________________________________________________\n") + for i := range job_list.Job_list { + job := job_list.Job_list[i] + fmt.Printf("%d\t%s\t%s\t%s %s\t%s\t%s\t%s\t%d\n" , + job.Job_id, job.Name, job.Partition, job.User_name,job_info.Get_job_runtime(job).String(), job.Job_stateS , + job_info.Reason_to_string(job.State_reason), job.Nodes,job.Priority) + } + +} diff --git a/examples/slurm/get_all_nodes.go b/examples/slurm/get_all_nodes.go new file mode 100644 index 00000000..19b21c57 --- /dev/null +++ b/examples/slurm/get_all_nodes.go @@ -0,0 +1,20 @@ +package main + +import "slurm/nodeinfo" +import "fmt" + +func main(){ + node_list := node_info.Get_all_nodes() + fmt.Printf("Found %d nodes \n", node_list.Record_count) + + +/* a little bit nicer*/ + fmt.Printf("name\t State\t\t\t Reason\t\t Tres\n") + fmt.Printf("________________________________________\n") + for i := range node_list.Node_list { + node := node_list.Node_list[i] + fmt.Printf("%s\t %s\t %s\t %s\n", node.Node_hostname, node_info.State_to_string(node.Node_state), node.Reason, node.Tres_fmt_str) + + } + +} diff --git a/examples/slurm/get_info.go b/examples/slurm/get_info.go new file mode 100644 index 00000000..580cb58a --- /dev/null +++ b/examples/slurm/get_info.go @@ -0,0 +1,15 @@ +package main + +import "slurm" +import "fmt" +func main(){ + + version := int(0) + var config slurm.Ctl_conf + version = slurm.Version() + fmt.Printf("Version is %s\n", slurm.VersionString(version)); + config = slurm.GetConfig() + slurm.Print_Ctl_conf(config) + + +} diff --git a/examples/slurm/get_job_by_id.go b/examples/slurm/get_job_by_id.go new file mode 100644 index 00000000..3ae4fa2e --- /dev/null +++ b/examples/slurm/get_job_by_id.go @@ -0,0 +1,41 @@ +package main + +import "slurm/jobinfo" +import "slurm" +import "fmt" +import "os" +import "strconv" + +func main(){ + + if len(os.Args)<2 { + fmt.Printf("Please specify Job ID\n") + return + } + id,_ := strconv.Atoi(os.Args[1]) + job_list := job_info.Get_job(uint32(id)) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + + + } + for i := range job_list.Job_list { + job_info.Print_Job_info(job_list.Job_list[i]) + } + fmt.Printf("Id\tName\t\tPartion\tUser\tRuntime\tStatus\t\t(Reason)\tNodes\tPriority\n") + fmt.Printf("________________________________________________________________________________________________\n") + for i := range job_list.Job_list { + job := job_list.Job_list[i] + fmt.Printf("%d\t%s\t%s\t%s %s\t%s\t%s\t%s\t%d\n" , + job.Job_id, job.Name, job.Partition, job.User_name,job_info.Get_job_runtime(job).String(), job.Job_stateS, + job_info.Reason_to_string(job.State_reason), job.Nodes,job.Priority) + } + + + end_time :=job_info.Get_job_endtime(uint32(id)) + fmt.Printf("End-Time: %s\n", end_time) + +} diff --git a/examples/slurm/get_node_info.go b/examples/slurm/get_node_info.go new file mode 100644 index 00000000..a7d2201e --- /dev/null +++ b/examples/slurm/get_node_info.go @@ -0,0 +1,32 @@ +package main + +import "slurm/nodeinfo" +import "slurm" +import "fmt" +import "os" +func main(){ + if len(os.Args)<2 { + fmt.Printf("Please specify node name\n") + return + } + name:= os.Args[1] + + node_list := node_info.Get_node_info(name) + if(node_list.Error_code !=0) { + msg := slurm.GetErrorString(node_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + } + fmt.Printf("Found %d nodes \n", node_list.Record_count) + + +/* a little bit nicer*/ + fmt.Printf("name\t State\t\t\t Reason\t\t Tres\n") + fmt.Printf("________________________________________\n") + for i := range node_list.Node_list { + node := node_list.Node_list[i] + fmt.Printf("%s\t %s\t %s\t %s\n", node.Node_hostname, node_info.State_to_string(node.Node_state), node.Reason, node.Tres_fmt_str) + + } + +} diff --git a/examples/slurm/get_partitions.go b/examples/slurm/get_partitions.go new file mode 100644 index 00000000..e5699ff8 --- /dev/null +++ b/examples/slurm/get_partitions.go @@ -0,0 +1,20 @@ +package main + +import "slurm/partitioninfo" +import "fmt" + +func main(){ + partition_list := partition_info.Get_partitions() + fmt.Printf("Found %d partions \n", partition_list.Record_count) + + +/* a little bit nicer */ + fmt.Printf("Name\t Nodes\t\t\t Max_time(min)\t\t Tres\n") + fmt.Printf("________________________________________\n") + for i := range partition_list.Partition_list { + partition := partition_list.Partition_list[i] + fmt.Printf("%s\t %s\t %d\t %d\n", partition.Name, partition.Nodes, partition.Max_time, partition.Node_inx ) + + } + +} diff --git a/examples/slurm/get_user_jobs.go b/examples/slurm/get_user_jobs.go new file mode 100644 index 00000000..3417b101 --- /dev/null +++ b/examples/slurm/get_user_jobs.go @@ -0,0 +1,35 @@ +package main + +import "slurm/jobinfo" +import "slurm" +import "fmt" +import "os" + +func main(){ + + if len(os.Args)<2 { + fmt.Printf("Please specify username\n") + return + } + name := os.Args[1] + job_list := job_info.Get_user_jobs(name) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + + + } + fmt.Printf("Id\tName\t\tPartion\tUser\tRuntime\tStatus\t\t(Reason)\tNodes\tPriority\n") + fmt.Printf("________________________________________________________________________________________________\n") + for i := range job_list.Job_list { + job := job_list.Job_list[i] + fmt.Printf("%d\t%s\t%s\t%s %s\t%s\t%s\t%s\t%d\n" , + job.Job_id, job.Name, job.Partition, job.User_name,job_info.Get_job_runtime(job).String(), job.Job_stateS , + job_info.Reason_to_string(job.State_reason), job.Nodes,job.Priority) + } + + + +} diff --git a/examples/slurm/submit_example/Readme.md b/examples/slurm/submit_example/Readme.md new file mode 100644 index 00000000..571a4675 --- /dev/null +++ b/examples/slurm/submit_example/Readme.md @@ -0,0 +1,109 @@ +# Submission of jobs +This folder shows in a few more examples of how jobs can be submitted in Slurm. Some examples use containers. +Attention: The parameters for job names and partitions probably have to be adjusted! + +# Simple Jobs + +## submit_job.go +In this example, a simple bash-Jobs is submitted. The used partition is *long* (adapt probably). +``` +job_desc.Partition="long" +``` + +The job sets two environment variables and executes a +``` +hostname +env | grep SLUM +``` +On a single node of the cluster (single task job). +The application does not wait until the hob is completed, but dirctly returns. +The (std) output is written to +out-jobid.txt, the std- error to err-jobid.txt + +``` +job_desc.Std_out = ("./out-%j.txt") +job_desc.Std_err = ("./err-%j.txt") +```` + + +## update_job.go +This example allows to update the qos and the partition a job is running on. This can help to move the job to another queue with another partition. +Note to users: In theory, the API allows the update of the number of nodes and the tasks per node. However, since this is only allowed by root or a slurm admin, we do not include an example here. +Synthax +``` +./update_job JobId qos partition +``` +(Note: This requires that the Job with the Id JobID is already submitted and in a pending state) + + +# Container jobs + +The following examples all submit a job that starts singulrity containers. +These containers, if they do not exist, are created. However, problems can arise if the user does not have sudo permissions.. + +## The containers + +The first container is an MPI container. This is used by and `submit_mpi_containier.go` and `submit_mpi_and_update.go`. The definition is stored in `mpi_container.def` + It can also be created with the command + +``` +sudo singularity build mpi_container.img mpi_container.def +``` + +The program mpi_pingppong (source code enclosed: `mpi_pingpong.c` ) is built into the container. It performs a ping-pong test between two processes. + +This container uses the hybrid model, which assumes that MPI is installed on the cluter (to start the job) and installs it in the container itself. Works with OpenMPI. + +The second container is an openmp container, including a sample OpenMP programm openmp_example (source code: ` openmp_example.c`). +It can also be created with the command: + +``` +sudo singularity build openmp_container.img openmp_container.def +``` + +This container is used bei `submit_openmp_container.go`. + + +## submit_mpi_containier.go +Submits a mpi-container job to the cluster. It runs to Processes on two nodes +``` +job_desc.Min_nodes =uint32(2) +job_desc.Num_tasks = uint32(2) +``` +The application blocks, until the job is completed. The (std) output is written to +jobid-out.txt, the std- error to jobId-err.txt +``` +job_desc.Std_out = ("./%j-out.txt") +job_desc.Std_err = ("./%j-err.txt") +``` +## submit_omp_container.go +Submits two openMP jobs to the cluster and wait, until they are completed. +Both jobs allocate *one process* for the job, but *two CPUs per task/process* (for multi-threading). +``` +job_desc.Num_tasks = uint32(1) +job_desc.Cpus_per_task = uint16(2) + +``` +The first job reads the environment variable ` SLURM_JOB_CPUS_PER_NODE` and sets the number of openMP threads to exactly the number of cpus that are available per task/process. +``` +job_desc.Script+= "export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE\n" +``` +The second job sets the number of threads to 4 (which is oversuscribing because more threads are started than processes) and executes the same job. +``` +job_desc.Script+= "export OMP_NUM_THREADS=4\n" +``` + +The program waits until both jobs are completed. The results are written to the two outputs files, similiar to `submit_mpi_container.go` + +### submit_mpi_and_update.go +This application is dooing the same as `submit_mpi_container.go` +``` +ops.Qos = "shortjobs" +ops.Partition = "short" +``` + +This situation, can, for example, be created my submitting longer, other jobs bevore in the background (depending on the partion size) and than start this application: + +``` +./submit_mpi_containier & ./submit_mpi_containier & ./submit_mpi_and_update +``` diff --git a/examples/slurm/submit_example/mpi_container.def b/examples/slurm/submit_example/mpi_container.def new file mode 100644 index 00000000..0453082e --- /dev/null +++ b/examples/slurm/submit_example/mpi_container.def @@ -0,0 +1,37 @@ +Bootstrap: docker +From: ubuntu:latest + +%files + mpi_pingpong.c /opt + +%environment + export OMPI_DIR=/home0/opt/openmpi + export SINGULARITY_OMPI_DIR=$OMPI_DIR + export SINGULARITYENV_APPEND_PATH=$OMPI_DIR/bin + export SINGULAIRTYENV_APPEND_LD_LIBRARY_PATH=$OMPI_DIR/lib + +%post + echo "Installing required packages..." + apt-get update && apt-get install -y wget git bash gcc gfortran g++ make file + + echo "Installing Open MPI" + export OMPI_DIR=/home0/opt/openmpi + export OMPI_VERSION=4.0.3 + export OMPI_URL="https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-$OMPI_VERSION.tar.bz2" + mkdir -p /tmp/ompi + mkdir -p /opt + chmod a+w /opt/ + chmod a+r /opt/ + ls -la /tmp/ompi + # Download + cd /tmp/ompi && wget -O openmpi-$OMPI_VERSION.tar.bz2 $OMPI_URL && tar -xjf openmpi-$OMPI_VERSION.tar.bz2 + ls -la + # Compile and install + cd /tmp/ompi/openmpi-$OMPI_VERSION && ./configure --prefix=$OMPI_DIR && make install + # Set env variables so we can compile our application + export PATH=$OMPI_DIR/bin:$PATH + export LD_LIBRARY_PATH=$OMPI_DIR/lib:$LD_LIBRARY_PATH + export MANPATH=$OMPI_DIR/share/man:$MANPATH + # rm -r tmp/mpi + echo "Compiling the MPI application..." + cd /opt && mpicc -o mpi_pingpong mpi_pingpong.c diff --git a/examples/slurm/submit_example/mpi_pingpong.c b/examples/slurm/submit_example/mpi_pingpong.c new file mode 100644 index 00000000..bdaa3f1a --- /dev/null +++ b/examples/slurm/submit_example/mpi_pingpong.c @@ -0,0 +1,65 @@ +#include +#include +#include +#include +#define MAX_ITER 1000 +int main (int argc, char **argv) { + int rc; + int size; + int myrank; + size_t max_send = 1<<22; + char *send_buf = (char*)malloc(sizeof(char)*max_send); + char *recv_buf = (char*)malloc(sizeof(char)*max_send); + size_t send_size; + clock_t start, end; + rc = MPI_Init (&argc, &argv); + if (rc != MPI_SUCCESS) { + fprintf (stderr, "MPI_Init() failed"); + return EXIT_FAILURE; + } + + rc = MPI_Comm_size (MPI_COMM_WORLD, &size); + if (rc != MPI_SUCCESS) { + fprintf (stderr, "MPI_Comm_size() failed"); + goto exit_with_error; + } + + if(size!= 2) { + + fprintf(stderr, "This process requieres exact two processes\n"); + } + rc = MPI_Comm_rank (MPI_COMM_WORLD, &myrank); + if (rc != MPI_SUCCESS) { + fprintf (stderr, "MPI_Comm_rank() failed"); + goto exit_with_error; + } + if(myrank==0) + fprintf (stdout, "Size\t Time(ms)\n"); + + for(send_size=1 ; send_size<= max_send; send_size*=2){ + for (int i = 0; i +#include + +int main() { + #pragma omp parallel + { + int id = omp_get_thread_num(); + int data = id; + int total = omp_get_num_threads(); + printf("Greetings from thread %d out of %d with Data %d\n", id, total, data); + } + printf("parallel for ends.\n"); + return 0; +} diff --git a/examples/slurm/submit_example/submit_job.go b/examples/slurm/submit_example/submit_job.go new file mode 100644 index 00000000..22bc88c9 --- /dev/null +++ b/examples/slurm/submit_example/submit_job.go @@ -0,0 +1,36 @@ +package main + + +import "slurm/submitjob" +import "slurm" +import "os/user" +import "os" +import "strconv" +import "fmt" +func main(){ + job_desc := submit_job.Job_descriptor{} + job_desc.Script = "#! /bin/bash\n hostname \n env | grep SLURM " + dir, _ := os.Getwd() + + user, _:= user.Current() + userid , _ := strconv.Atoi(user.Uid) + job_desc.User_id= uint32(userid) + groupid , _ := strconv.Atoi(user.Gid) + + job_desc.Group_id= uint32(groupid) + job_desc.Name = "test_job" + job_desc.Partition="long" + job_desc.Time_limit = uint32(2) + job_desc.Min_nodes =uint32(1) + job_desc.Std_out = ("./out-%j.txt") + job_desc.Std_err = ("./err-%j.txt") + job_desc.Work_dir = dir + job_desc.Environment = []string{"SLURM_GO_JOB=TRUE", "SLURM_CONTAINER_JOB=FALSE"} + answer := submit_job.Submit_job(&job_desc) + if(answer.Error_code != 0) { + msg := slurm.GetErrorString(answer.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + } + fmt.Printf("Submitted Job %d\n", answer.Job_id) +} diff --git a/examples/slurm/submit_example/submit_mpi_and_update.go b/examples/slurm/submit_example/submit_mpi_and_update.go new file mode 100644 index 00000000..c9549066 --- /dev/null +++ b/examples/slurm/submit_example/submit_mpi_and_update.go @@ -0,0 +1,127 @@ +package main + + +import "slurm/submitjob" +import "slurm" +import "os" +import "strconv" +import "fmt" +import "os/exec" +import "path/filepath" +import "slurm/jobinfo" +import "time" +import "os/user" + + + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} +func build_container(file_name,container_name string){ + + cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build",container_name, file_name) + fmt.Print("Now build new container") + fmt.Printf("%s\n", cmd.String()) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + fmt.Printf("error in creating container %s \n", err); +// return + } + fmt.Printf("%s\n", stdoutStderr) +} + +func main(){ + job_desc := submit_job.Job_descriptor{} + + dir, _ := os.Getwd() + container := filepath.Join(dir, "mpi_container.img") + definition := filepath.Join(dir, "mpi_container.def") + if !fileExists(container){ + build_container(definition,container) + } + + if !fileExists(container){ + return + } + /* use Cmd to create our script */ + + job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n srun hostname \n" + cmd := exec.Command( "/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec",container, "/opt/mpi_pingpong" ) + job_desc.Script+= cmd.String() + fmt.Printf("cmd %s\n", job_desc.Script) + user, _:= user.Current() + userid , _ := strconv.Atoi(user.Uid) + job_desc.User_id= uint32(userid) + groupid , _ := strconv.Atoi(user.Gid) + + job_desc.Group_id= uint32(groupid) + job_desc.Name = "flex_mpi_job" + job_desc.Partition="long" + job_desc.Time_limit = uint32(60) + job_desc.Ntasks_per_node = uint16(1) + job_desc.Num_tasks = uint32(2) + job_desc.Std_out = ("./%j-out.txt") + job_desc.Std_err = ("./%j-err.txt") + job_desc.Work_dir = dir + + time.Sleep(3 * time.Second) + answer := submit_job.Submit_job(&job_desc) + if(answer.Error_code != 0) { + msg := slurm.GetErrorString(answer.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + } + fmt.Printf("Submitted Job %d\n", answer.Job_id) + + time.Sleep(5 * time.Second) + + job_list := job_info.Get_job(answer.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job := job_list.Job_list[0] + + fmt.Printf("job %d is %s\n", answer.Job_id, job.Job_stateS) + state := job.Job_stateS + if state == "Pending" { + fmt.Printf("Move job %d to another partition \n", answer.Job_id) + var ops submit_job.Update_job_options + + ops.Qos = "shortjobs" + ops.Partition = "short" + err2 := submit_job.Update_job(ops, uint32(answer.Job_id)) + if err2!= uint32(0) { + fmt.Printf("error %s \n", slurm.GetErrorString(err2)) + } + } + + for state == "Pending" || state == "Running" { + time.Sleep(2 * time.Second) + job_list = job_info.Get_job(answer.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job = job_list.Job_list[0] + + state = job.Job_stateS + + fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS) + + + } + + fmt.Printf("Total runtime Job %d %s\n",job.Job_id, job_info.Get_job_runtime(job).String() ) +} + + + diff --git a/examples/slurm/submit_example/submit_mpi_containier.go b/examples/slurm/submit_example/submit_mpi_containier.go new file mode 100644 index 00000000..64b85e7a --- /dev/null +++ b/examples/slurm/submit_example/submit_mpi_containier.go @@ -0,0 +1,111 @@ +package main + +import "slurm/submitjob" +import "slurm" +import "os/user" +import "os" +import "strconv" +import "fmt" +import "os/exec" +import "path/filepath" +import "slurm/jobinfo" +import "time" + + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} +func build_container(file_name,container_name string){ + + cmd := exec.Command("sudo","/usr/local/bin/singularity", "build",container_name, file_name) + fmt.Print("Now build new container") + fmt.Printf("%s\n", cmd.String()) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + fmt.Printf("error in creating container %s \n", err) + + fmt.Printf("%s\n", stdoutStderr) +// return + } + fmt.Printf("%s\n", stdoutStderr) +} + +func main(){ + job_desc := submit_job.Job_descriptor{} + + dir, _ := os.Getwd() + container := filepath.Join(dir, "mpi_container.img") + definition := filepath.Join(dir, "mpi_container.def") + if !fileExists(container){ + build_container(definition,container) + } + + if !fileExists(container){ + return + } + /* use Cmd to create our script */ + + job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n" + cmd := exec.Command( "/home0/opt/openmpi/bin/mpirun", "-mca btl_tcp_if_include eth1", "/usr/local/bin/singularity", "exec",container, "/opt/mpi_pingpong" ) + job_desc.Script+= cmd.String() + fmt.Printf("cmd %s\n", job_desc.Script) + user, _:= user.Current() + userid , _ := strconv.Atoi(user.Uid) + job_desc.User_id= uint32(userid) + groupid , _ := strconv.Atoi(user.Gid) + + job_desc.Group_id= uint32(groupid) + job_desc.Name = "mpi_job" + job_desc.Partition="long" + job_desc.Time_limit = uint32(60) + job_desc.Min_nodes =uint32(2) + job_desc.Num_tasks = uint32(2) + job_desc.Std_out = ("./%j-out.txt") + job_desc.Std_err = ("./%j-err.txt") + job_desc.Work_dir = dir + + answer := submit_job.Submit_job(&job_desc) + if(answer.Error_code != 0) { + msg := slurm.GetErrorString(answer.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + } + fmt.Printf("Submitted Job %d\n", answer.Job_id) + + + job_list := job_info.Get_job(answer.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job := job_list.Job_list[0] + + fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS) + state := job.Job_stateS + for state == "Pending" || state == "Running" { + time.Sleep(2 * time.Second) + job_list = job_info.Get_job(answer.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job = job_list.Job_list[0] + + state = job.Job_stateS + + fmt.Printf("job %d is %s\n",answer.Job_id, job.Job_stateS) + + + } + + fmt.Printf("Total runtime Job %d: %s\n",job.Job_id, job_info.Get_job_runtime(job).String() ) + +} diff --git a/examples/slurm/submit_example/submit_openmp_container.go b/examples/slurm/submit_example/submit_openmp_container.go new file mode 100644 index 00000000..80afa886 --- /dev/null +++ b/examples/slurm/submit_example/submit_openmp_container.go @@ -0,0 +1,162 @@ +package main + +import "slurm/submitjob" +import "slurm" +import "os/user" +import "os" +import "strconv" +import "fmt" +import "os/exec" +import "path/filepath" +import "slurm/jobinfo" +import "time" + + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} +func build_container(file_name,container_name string){ + + cmd := exec.Command("sudo", "/usr/local/bin/singularity", "build",container_name, file_name) + fmt.Print("Now build new container") + fmt.Printf("%s\n", cmd.String()) + stdoutStderr, err := cmd.CombinedOutput() + if err != nil { + fmt.Printf("error in creating container %s \n", err) + + fmt.Printf("%s\n", stdoutStderr) +// return + } + fmt.Printf("%s\n", stdoutStderr) +} + +func main(){ + job_desc := submit_job.Job_descriptor{} + + dir, _ := os.Getwd() + container := filepath.Join(dir, "openmp_container.img") + definition := filepath.Join(dir, "openmp_container.def") + if !fileExists(container){ + build_container(definition,container) + } + + if !fileExists(container){ + return + } + /* use Cmd to create our script */ + + job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n" + job_desc.Script+= "export OMP_NUM_THREADS=$SLURM_JOB_CPUS_PER_NODE\n" + cmd := exec.Command( "/usr/local/bin/singularity", "exec",container, "/opt/openmp_example" ) + + job_desc.Script+= cmd.String() + fmt.Printf("cmd %s\n", job_desc.Script) + user, _:= user.Current() + userid , _ := strconv.Atoi(user.Uid) + job_desc.User_id= uint32(userid) + groupid , _ := strconv.Atoi(user.Gid) + + job_desc.Group_id= uint32(groupid) + job_desc.Name = "test_job" + job_desc.Partition="long" + job_desc.Time_limit = uint32(60) + job_desc.Min_nodes =uint32(1) + job_desc.Num_tasks = uint32(1) + + job_desc.Cpus_per_task = uint16(2) + job_desc.Std_out = ("./%j-out.txt") + job_desc.Std_err = ("./%j-err.txt") + job_desc.Work_dir = dir + + answer := submit_job.Submit_job(&job_desc) + if(answer.Error_code != 0) { + msg := slurm.GetErrorString(answer.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + } + fmt.Printf("Submitted Job %d\n", answer.Job_id) + + /*Now, we submit the same jon again, ut with some oversubsciption */ + job_desc.Script = "#!/bin/bash\n export PATH=$PATH:/usr/local/bin\n hostname \n" + job_desc.Script+= "export OMP_NUM_THREADS=4\n" + + job_desc.Script+= cmd.String() + fmt.Printf("cmd %s\n", job_desc.Script) + answer2 := submit_job.Submit_job(&job_desc) + if(answer2.Error_code != 0) { + msg := slurm.GetErrorString(answer.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + } + fmt.Printf("Submitted Job %d\n", answer2.Job_id) + + + + job_list := job_info.Get_job(answer.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job := job_list.Job_list[0] + + fmt.Printf("job is %s\n",job.Job_stateS) + state := job.Job_stateS + for state == "Pending" || state == "Running" { + time.Sleep(2 * time.Second) + job_list = job_info.Get_job(answer.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job = job_list.Job_list[0] + + state = job.Job_stateS + + fmt.Printf("job is %s\n",job.Job_stateS) + + + } + + fmt.Printf("Total runtime first job %s\n",job_info.Get_job_runtime(job).String() ) + /*wait for second job */ + job_list = job_info.Get_job(answer2.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job = job_list.Job_list[0] + + fmt.Printf("job is %s\n",job.Job_stateS) + state = job.Job_stateS + for state == "Pending" || state == "Running" { + time.Sleep(2 * time.Second) + job_list = job_info.Get_job(answer2.Job_id) + if job_list.Error_code != 0 { + msg := slurm.GetErrorString(job_list.Error_code) + fmt.Printf("Error: %s\n" ,msg) + return + + } + job = job_list.Job_list[0] + + state = job.Job_stateS + + fmt.Printf("job is %s\n",job.Job_stateS) + + + } + + + fmt.Printf("Total runtime second job %s\n",job_info.Get_job_runtime(job).String() ) + +} diff --git a/examples/slurm/submit_example/update_job.go b/examples/slurm/submit_example/update_job.go new file mode 100644 index 00000000..70fe3788 --- /dev/null +++ b/examples/slurm/submit_example/update_job.go @@ -0,0 +1,28 @@ +package main + + +import "slurm/submitjob" +import "slurm" +import "os" +import "strconv" +import "fmt" +func main(){ + if len(os.Args)<4 { + fmt.Printf("Synthax specify JobID, qos and partition \n") + return + } + var ops submit_job.Update_job_options + id,err := strconv.Atoi(os.Args[1]) + if err != nil { + fmt.Printf("Invalid job id (no int) %s\n", os.Args[1] ) + return + } + + ops.Qos = os.Args[2] + ops.Partition = os.Args[3] + + err2 := submit_job.Update_job(ops, uint32(id)) + if err2!= uint32(0) { + fmt.Printf("error %s \n", slurm.GetErrorString(err2)) + } +}