fix: update HpcInfo

This commit is contained in:
qiwang 2024-10-31 18:10:19 +08:00
parent ed5da106d4
commit fbf24089fd
6 changed files with 52 additions and 83 deletions

View File

@ -76,48 +76,48 @@ type PushNoticeResp struct {
type HpcInfo struct {
Id int64 `json:"id"` // id
TaskId int64 `json:"task_id"` // 任务id
JobId string `json:"job_id"` // 作业id(在第三方系统中的作业id)
AdapterId int64 `json:"adapter_id"` // 执行任务的适配器id
TaskId int64 `json:"taskId"` // 任务id
JobId string `json:"jobId"` // 作业id(在第三方系统中的作业id)
AdapterId int64 `json:"adapterId"` // 执行任务的适配器id
AdapterName string `json:"adapterName,omitempty,optional"`
ClusterId int64 `json:"cluster_id"` // 执行任务的集群id
ClusterId int64 `json:"clusterId"` // 执行任务的集群id
ClusterName string `json:"clusterName,omitempty,optional"`
ClusterType string `json:"cluster_type"` // 执行任务的集群类型
ClusterType string `json:"clusterType"` // 执行任务的集群类型
Name string `json:"name"` // 名称
Status string `json:"status"` // 状态
CmdScript string `json:"cmd_script"`
StartTime string `json:"start_time"` // 开始时间
RunningTime int64 `json:"running_time"` // 运行时间
DerivedEs string `json:"derived_es"`
CmdScript string `json:"cmdScript"`
StartTime string `json:"startTime"` // 开始时间
RunningTime int64 `json:"runningTime"` // 运行时间
DerivedEs string `json:"derivedEs"`
Cluster string `json:"cluster"`
BlockId int64 `json:"block_id"`
AllocNodes int64 `json:"alloc_nodes"`
AllocCpu int64 `json:"alloc_cpu"`
CardCount int64 `json:"card_count"` // 卡数
BlockId int64 `json:"blockId"`
AllocNodes int64 `json:"allocNodes"`
AllocCpu int64 `json:"allocCpu"`
CardCount int64 `json:"cardCount"` // 卡数
Version string `json:"version"`
Account string `json:"account"`
WorkDir string `json:"work_dir"` // 工作路径
AssocId int64 `json:"assoc_id"`
WorkDir string `json:"workDir"` // 工作路径
AssocId int64 `json:"assocId"`
Partition string `json:"partition,omitempty,optional"`
ExitCode int64 `json:"exit_code"`
WallTime string `json:"wall_time"` // 最大运行时间
ExitCode int64 `json:"exitCode"`
WallTime string `json:"wallTime"` // 最大运行时间
Result string `json:"result"` // 运行结果
DeletedAt sql.NullTime `json:"deleted_at"` // 删除时间
YamlString string `json:"yaml_string"`
AppType string `json:"app_type"` // 应用类型
AppName string `json:"app_name"` // 应用名称
DeletedAt sql.NullTime `json:"deletedAt"` // 删除时间
YamlString string `json:"yamlString"`
AppType string `json:"appType"` // 应用类型
AppName string `json:"appName"` // 应用名称
Queue string `json:"queue"` // 队列名称
SubmitType string `json:"submit_type"` // cmd命令行模式
NNode string `json:"n_node"` // 节点个数当指定该参数时GAP_NODE_STRING必须为""
StdOutFile string `json:"std_out_file"` // 工作路径/std.err.%j
StdErrFile string `json:"std_err_file"` // 工作路径/std.err.%j
StdInput string `json:"std_input"`
SubmitType string `json:"submitType"` // cmd命令行模式
NNode string `json:"nNode"` // 节点个数当指定该参数时GAP_NODE_STRING必须为""
StdOutFile string `json:"stdOutFile"` // 工作路径/std.err.%j
StdErrFile string `json:"stdErrFile"` // 工作路径/std.err.%j
StdInput string `json:"stdInput"`
Environment string `json:"environment"`
DeletedFlag int64 `json:"deleted_flag"` // 是否删除0-否1-是)
CreatedBy int64 `json:"created_by"` // 创建人
CreatedTime time.Time `json:"created_time"` // 创建时间
UpdatedBy int64 `json:"updated_by"` // 更新人
UpdatedTime time.Time `json:"updated_time"` // 更新时间
DeletedFlag int64 `json:"deletedFlag"` // 是否删除0-否1-是)
CreatedBy int64 `json:"createdBy"` // 创建人
CreatedTime time.Time `json:"createdTime"` // 创建时间
UpdatedBy int64 `json:"updatedBy"` // 更新人
UpdatedTime time.Time `json:"updatedTime"` // 更新时间
}
type CloudInfo struct {
@ -140,57 +140,17 @@ type AiInfo struct {
AdapterId int64 `json:"adapterId,omitempty,optional"`
AdapterName string `json:"adapterName,omitempty,optional"`
ClusterId int64 `json:"clusterId,omitempty,optional"`
//ClusterIds []int64 `json:"clusterIds,omitempty,optional"`
TaskId int64 `json:"taskId,omitempty"`
ClusterName string `json:"clusterName,omitempty,optional"`
ImageId string `json:"imageId,omitempty"`
//ResourceId string `json:"resourceId,omitempty"`
//AlgorithmId string `json:"algorithmId,omitempty"`
//MetadataName string `json:"metadataName,omitempty"`
Command string `json:"command,omitempty"`
//Environments string `json:"environments,omitempty"`
//Parameters string `json:"parameters,omitempty"`
Name string `json:"name,omitempty"`
Status string `json:"status,omitempty"`
StartTime string `json:"startTime,omitempty"`
//RunningTime int64 `json:"runningTime,omitempty"`
JobId string `json:"jobId,omitempty"`
FlavorId string `json:"flavorId,omitempty"`
//TaskName string `json:"taskName,omitempty"`
//Replica int32 `json:"replica,omitempty"`
//ResourceType string `json:"resourceType,omitempty"`
//CpuCoreNum int32 `json:"cpuCoreNum,omitempty"`
//TaskType string `json:"taskType,omitempty"`
//DatasetsName string `json:"datasetsName,omitempty"`
//ProjectId string `json:"project_id,omitempty"`
//StrategyName string `json:"strategyName,omitempty"`
//ClusterToStaticWeight map[string]int32 `json:"clusterToStaticWeight,omitempty"`
//Tops float64 `json:"tops,omitempty"`
//ComputeCard string `json:"computeCard,omitempty,optional"`
//CodeType string `json:"codeType,omitempty,optional"`
//ModelName string `json:"ModelName,omitempty,optional"`
//AlgorithmName string `json:"algorithmName,omitempty,optional"`
//Strategy string `json:"strategy,omitempty"`
//Envs []string `json:"envs,omitempty"`
//Params []string `json:"params,omitempty"`
//SpecId string `json:"specId,omitempty"`
//DatasetsId string `json:"datasetsId,omitempty"`
//CodeId string `json:"codeId,omitempty"`
//Result string `json:"result,omitempty"`
//Datasets string `json:"datasets,omitempty"`
//AlgorithmCode string `json:"algorithmCode,omitempty"`
//Image string `json:"image,omitempty"`
//CreateTime string `json:"createTime,omitempty"`
//ImageUrl string `json:"imageUrl,omitempty"`
//SubscriptionId string `json:"subscriptionId,omitempty"`
//ItemVersionId string `json:"itemVersionId,omitempty"`
//ObsUrl string `json:"obsUrl,omitempty"`
}
type VmInfo struct {

View File

@ -54,7 +54,7 @@ func (l *AsynCommitAiTaskLogic) AsynCommitAiTask(req *types.AsynCommitAiTaskReq)
return nil, tx.Error
}
l.svcCtx.DbEngin.Raw("SELECT nickname FROM `t_cluster` where id = ?", req.ClusterId).Scan(&clusterName)
l.svcCtx.DbEngin.Raw("SELECT name FROM `t_cluster` where id = ?", req.ClusterId).Scan(&clusterName)
l.svcCtx.DbEngin.Raw("SELECT adapter_id FROM `t_cluster` where id = ?", req.ClusterId).Scan(&adapterId)
l.svcCtx.DbEngin.Raw("SELECT name FROM `t_adapter` where id = ?", adapterId).Scan(&adapterName)
if len(adapterName) == 0 || adapterName == "" {

View File

@ -108,7 +108,7 @@ func (l *PushTaskInfoLogic) PushTaskInfo(req *clientCore.PushTaskInfoReq) (*clie
func syncTask(gorm *gorm.DB, noticeInfo clientCore.NoticeInfo) {
var allStatus string
tx := gorm.Raw("SELECT CONCAT_WS(',',GROUP_CONCAT(DISTINCT h.status) ,GROUP_CONCAT(DISTINCT a.status) ,GROUP_CONCAT(DISTINCT c.status),GROUP_CONCAT(DISTINCT v.status))as status from task t left join task_hpc h on t.id = h.task_id left join task_cloud c on t.id = c.task_id left join task_vm v on t.id = v.task_id left join task_ai a on t.id = a.task_id where t.id = ?", noticeInfo.TaskId).Scan(&allStatus)
tx := gorm.Raw("SELECT CONCAT_WS(',',GROUP_CONCAT(DISTINCT h.status) ,GROUP_CONCAT(DISTINCT a.status) ,GROUP_CONCAT(DISTINCT c.status),GROUP_CONCAT(DISTINCT v.status),GROUP_CONCAT(DISTINCT as.status))as status from task t left join task_hpc h on t.id = h.task_id left join task_cloud c on t.id = c.task_id left join task_vm v on t.id = v.task_id left join task_ai a on t.id = a.task_id where t.id left join task_ai_asynchronous as on t.id = as.task_id = ?", noticeInfo.TaskId).Scan(&allStatus)
if tx.Error != nil {
logx.Error(tx.Error)
}

View File

@ -43,6 +43,9 @@ func (l *TaskDetailsLogic) TaskDetails(req *types.FId) (resp *types.TaskDetailsR
}
case "1":
l.svcCtx.DbEngin.Table("task_ai").Where("task_id", task.Id).Scan(&subList)
if len(subList) <= 0 {
l.svcCtx.DbEngin.Table("task_ai_asynchronous").Where("task_id", task.Id).Scan(&subList)
}
case "2":
l.svcCtx.DbEngin.Table("task_hpc").Where("task_id", task.Id).Scan(&subList)
}

View File

@ -76,7 +76,7 @@ func (l *TaskListLogic) TaskList(req *types.TaskListReq) (resp *types.TaskListRe
Address string
}
pInfo := PInfo{}
tx := l.svcCtx.DbEngin.Raw("SELECT id,name,address from sc_participant_phy_info where id in (SELECT CONCAT_WS(',',GROUP_CONCAT(DISTINCT h.participant_id) ,GROUP_CONCAT(DISTINCT a.participant_id) ,GROUP_CONCAT(DISTINCT c.participant_id))as service_name from task t left join hpc h on t.id = h.task_id left join cloud c on t.id = c.task_id left join ai a on t.id = a.task_id where t.id = ?)", task.Id).Scan(&pInfo)
tx := l.svcCtx.DbEngin.Raw("SELECT id,name,address from sc_participant_phy_info where id in (SELECT CONCAT_WS(',',GROUP_CONCAT(DISTINCT h.participant_id) ,GROUP_CONCAT(DISTINCT a.participant_id) ,GROUP_CONCAT(DISTINCT c.participant_id),GROUP_CONCAT(DISTINCT v.participant_id))as service_name from task t left join hpc h on t.id = h.task_id left join cloud c on t.id = c.task_id left join ai a on t.id = a.task_id left join vm v on t.id = v.task_id where t.id = ?)", task.Id).Scan(&pInfo)
if tx.Error != nil {
logx.Error(err)
return nil, tx.Error

View File

@ -26,7 +26,7 @@ func NewScheduleSituationLogic(ctx context.Context, svcCtx *svc.ServiceContext)
func (l *ScheduleSituationLogic) ScheduleSituation() (resp *types.ScheduleSituationResp, err error) {
resp = &types.ScheduleSituationResp{}
// node region
tx := l.svcCtx.DbEngin.Raw("SELECT c.id, c.name, tdi.id AS category, count(DISTINCT ta.id)+count(DISTINCT tc.id)+COUNT(DISTINCT th.id)+COUNT(tv.id) as value FROM t_cluster c LEFT JOIN t_dict_item tdi ON c.region_dict = tdi.id left JOIN task_ai ta ON ta.cluster_id = c.id left JOIN task_cloud tc ON tc.cluster_id = c.id left JOIN task_hpc th ON th.cluster_id = c.id left JOIN task_vm tv ON tv.cluster_id = c.id WHERE tc.deleted_at IS NULL GROUP BY c.id").Scan(&resp.Nodes)
tx := l.svcCtx.DbEngin.Raw("SELECT c.id, c.name, tdi.id AS category, count(DISTINCT ta.id)+count(DISTINCT tc.id)+COUNT(DISTINCT th.id)+COUNT(tv.id) as value FROM t_cluster c LEFT JOIN t_dict_item tdi ON c.region_dict = tdi.id left JOIN task_ai ta ON ta.cluster_id = c.id left JOIN task_ai_asynchronous tas ON tas.cluster_id = c.id left JOIN task_cloud tc ON tc.cluster_id = c.id left JOIN task_hpc th ON th.cluster_id = c.id left JOIN task_vm tv ON tv.cluster_id = c.id WHERE tc.deleted_at IS NULL GROUP BY c.id").Scan(&resp.Nodes)
if tx.Error != nil {
return nil, tx.Error
}
@ -52,6 +52,12 @@ func (l *ScheduleSituationLogic) ScheduleSituation() (resp *types.ScheduleSituat
return nil, tx.Error
}
LinksHandler(aiLinks, resp)
var aiAsynchronousLinks []string
tx = l.svcCtx.DbEngin.Raw("SELECT distinct GROUP_CONCAT(distinct cluster_id SEPARATOR ',') as cluster_ids FROM task_ai_asynchronous WHERE deleted_at IS NULL GROUP BY task_id HAVING COUNT(*) > 1;").Scan(&aiLinks)
if tx.Error != nil {
return nil, tx.Error
}
LinksHandler(aiAsynchronousLinks, resp)
// vm
var vmLinks []string
tx = l.svcCtx.DbEngin.Raw("SELECT distinct GROUP_CONCAT(distinct cluster_id SEPARATOR ',') as cluster_ids FROM task_vm WHERE deleted_at IS NULL GROUP BY task_id HAVING COUNT(*) > 1;").Scan(&vmLinks)