Merge pull request 'updated ai overview functions' (#132) from tzwang/pcm-coordinator:master into master

Former-commit-id: 1a78382c11268f122ae0b3b2d2f7ca819eaed303
This commit is contained in:
tzwang 2024-04-30 16:33:00 +08:00
commit caa8ad19a1
10 changed files with 337 additions and 29 deletions

View File

@ -2,7 +2,6 @@ package ai
import ( import (
"context" "context"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types"
@ -24,7 +23,21 @@ func NewGetCenterListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *Get
} }
func (l *GetCenterListLogic) GetCenterList() (resp *types.CenterListResp, err error) { func (l *GetCenterListLogic) GetCenterList() (resp *types.CenterListResp, err error) {
// todo: add your logic here and delete this line resp = &types.CenterListResp{}
return adapterList, err := l.svcCtx.Scheduler.AiStorages.GetAdaptersByType("1")
if err != nil {
return nil, err
}
for _, adapter := range adapterList {
a := &types.AiCenter{
Name: adapter.Name,
StackName: adapter.Nickname,
Version: adapter.Version,
}
resp.List = append(resp.List, a)
}
return resp, nil
} }

View File

@ -2,7 +2,6 @@ package ai
import ( import (
"context" "context"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types"
@ -24,7 +23,46 @@ func NewGetCenterOverviewLogic(ctx context.Context, svcCtx *svc.ServiceContext)
} }
func (l *GetCenterOverviewLogic) GetCenterOverview() (resp *types.CenterOverviewResp, err error) { func (l *GetCenterOverviewLogic) GetCenterOverview() (resp *types.CenterOverviewResp, err error) {
// todo: add your logic here and delete this line resp = &types.CenterOverviewResp{}
return var centerNum int32
var taskNum int32
var cardNum int32
var totalTops float64
adapterList, err := l.svcCtx.Scheduler.AiStorages.GetAdaptersByType("1")
if err != nil {
return nil, err
}
centerNum = int32(len(adapterList))
resp.CenterNum = centerNum
for _, adapter := range adapterList {
taskList, err := l.svcCtx.Scheduler.AiStorages.GetAiTasksByAdapterId(adapter.Id)
if err != nil {
continue
}
taskNum += int32(len(taskList))
}
resp.TaskNum = taskNum
for _, adapter := range adapterList {
clusters, err := l.svcCtx.Scheduler.AiStorages.GetClustersByAdapterId(adapter.Id)
if err != nil {
continue
}
for _, cluster := range clusters.List {
clusterResource, err := l.svcCtx.Scheduler.AiStorages.GetClusterResourcesById(cluster.Id)
if err != nil {
continue
}
cardNum += int32(clusterResource.CardTotal)
totalTops += clusterResource.CardTopsTotal
}
}
resp.CardNum = centerNum
resp.PowerInTops = totalTops
return resp, nil
} }

View File

@ -2,6 +2,7 @@ package ai
import ( import (
"context" "context"
"sort"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types"
@ -24,7 +25,46 @@ func NewGetCenterQueueingLogic(ctx context.Context, svcCtx *svc.ServiceContext)
} }
func (l *GetCenterQueueingLogic) GetCenterQueueing() (resp *types.CenterQueueingResp, err error) { func (l *GetCenterQueueingLogic) GetCenterQueueing() (resp *types.CenterQueueingResp, err error) {
// todo: add your logic here and delete this line resp = &types.CenterQueueingResp{}
return adapters, err := l.svcCtx.Scheduler.AiStorages.GetAdaptersByType("1")
if err != nil {
return nil, err
}
for _, adapter := range adapters {
clusters, err := l.svcCtx.Scheduler.AiStorages.GetClustersByAdapterId(adapter.Id)
if err != nil {
continue
}
for _, cluster := range clusters.List {
queues, err := l.svcCtx.Scheduler.AiStorages.GetClusterTaskQueues(adapter.Id, cluster.Id)
if err != nil {
continue
}
//todo sync current task queues
current := &types.CenterQueue{
Name: cluster.Name,
QueueingNum: int32(queues[0].QueueNum),
}
history := &types.CenterQueue{
Name: cluster.Name,
QueueingNum: int32(queues[0].QueueNum),
}
resp.Current = append(resp.Current, current)
resp.History = append(resp.History, history)
}
}
sortQueueingNum(resp.Current)
sortQueueingNum(resp.History)
return resp, nil
}
func sortQueueingNum(q []*types.CenterQueue) {
sort.Slice(q, func(i, j int) bool {
return q[i].QueueingNum > q[j].QueueingNum
})
} }

View File

@ -2,6 +2,8 @@ package ai
import ( import (
"context" "context"
"gitlink.org.cn/JointCloud/pcm-coordinator/pkg/constants"
"time"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/svc"
"gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types" "gitlink.org.cn/JointCloud/pcm-coordinator/api/internal/types"
@ -15,6 +17,8 @@ type GetCenterTaskListLogic struct {
svcCtx *svc.ServiceContext svcCtx *svc.ServiceContext
} }
const layout = "2006-01-02 15:04:05"
func NewGetCenterTaskListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetCenterTaskListLogic { func NewGetCenterTaskListLogic(ctx context.Context, svcCtx *svc.ServiceContext) *GetCenterTaskListLogic {
return &GetCenterTaskListLogic{ return &GetCenterTaskListLogic{
Logger: logx.WithContext(ctx), Logger: logx.WithContext(ctx),
@ -24,7 +28,36 @@ func NewGetCenterTaskListLogic(ctx context.Context, svcCtx *svc.ServiceContext)
} }
func (l *GetCenterTaskListLogic) GetCenterTaskList() (resp *types.CenterTaskListResp, err error) { func (l *GetCenterTaskListLogic) GetCenterTaskList() (resp *types.CenterTaskListResp, err error) {
// todo: add your logic here and delete this line resp = &types.CenterTaskListResp{}
return adapterList, err := l.svcCtx.Scheduler.AiStorages.GetAdaptersByType("1")
if err != nil {
return nil, err
}
for _, adapter := range adapterList {
taskList, err := l.svcCtx.Scheduler.AiStorages.GetAiTasksByAdapterId(adapter.Id)
if err != nil {
continue
}
for _, task := range taskList {
var elapsed time.Duration
start, _ := time.Parse(layout, task.CommitTime)
if task.Status != constants.Completed {
elapsed = start.Sub(time.Now())
} else {
end, _ := time.Parse(layout, task.EndTime)
elapsed = start.Sub(end)
}
t := &types.AiTask{
Name: task.Name,
Status: task.Status,
TimeElapsed: int32(elapsed.Seconds()),
}
resp.List = append(resp.List, t)
}
}
return resp, nil
} }

View File

@ -29,7 +29,9 @@ func (l *ScheduleSubmitLogic) ScheduleSubmit(req *types.ScheduleReq) (resp *type
resp = &types.ScheduleResp{} resp = &types.ScheduleResp{}
opt := &option.AiOption{ opt := &option.AiOption{
AdapterId: req.AiOption.AdapterId, AdapterId: req.AiOption.AdapterId,
TaskName: req.AiOption.TaskName,
ResourceType: req.AiOption.ResourceType, ResourceType: req.AiOption.ResourceType,
Replica: 1,
Tops: req.AiOption.Tops, Tops: req.AiOption.Tops,
TaskType: req.AiOption.TaskType, TaskType: req.AiOption.TaskType,
DatasetsName: req.AiOption.Datasets, DatasetsName: req.AiOption.Datasets,

View File

@ -50,9 +50,20 @@ func (s *AiStorage) GetAdapterIdsByType(adapterType string) ([]string, error) {
return ids, nil return ids, nil
} }
func (s *AiStorage) GetAiTasks() ([]*types.AiTaskDb, error) { func (s *AiStorage) GetAdaptersByType(adapterType string) ([]*types.AdapterInfo, error) {
var list []*types.AdapterInfo
db := s.DbEngin.Model(&types.AdapterInfo{}).Table("t_adapter")
db = db.Where("type = ?", adapterType)
err := db.Order("create_time desc").Find(&list).Error
if err != nil {
return nil, err
}
return list, nil
}
func (s *AiStorage) GetAiTasksByAdapterId(adapterId string) ([]*types.AiTaskDb, error) {
var resp []*types.AiTaskDb var resp []*types.AiTaskDb
tx := s.DbEngin.Raw("select * from task_ai").Scan(&resp) tx := s.DbEngin.Raw("select * from task_ai where `adapter_id` = ? ", adapterId).Scan(&resp)
if tx.Error != nil { if tx.Error != nil {
logx.Errorf(tx.Error.Error()) logx.Errorf(tx.Error.Error())
return nil, tx.Error return nil, tx.Error
@ -93,6 +104,7 @@ func (s *AiStorage) SaveAiTask(taskId int64, option *option.AiOption, clusterId
Name: option.TaskName, Name: option.TaskName,
Replica: option.Replica, Replica: option.Replica,
JobId: jobId, JobId: jobId,
TaskType: option.TaskType,
Strategy: option.StrategyName, Strategy: option.StrategyName,
Status: status, Status: status,
Msg: msg, Msg: msg,
@ -106,6 +118,37 @@ func (s *AiStorage) SaveAiTask(taskId int64, option *option.AiOption, clusterId
return nil return nil
} }
func (s *AiStorage) SaveClusterTaskQueue(adapterId string, clusterId string, queueNum int64) error {
aId, err := strconv.ParseInt(adapterId, 10, 64)
if err != nil {
return err
}
cId, err := strconv.ParseInt(clusterId, 10, 64)
if err != nil {
return err
}
taskQueue := models.TClusterTaskQueue{
AdapterId: aId,
ClusterId: cId,
QueueNum: queueNum,
}
tx := s.DbEngin.Create(&taskQueue)
if tx.Error != nil {
return tx.Error
}
return nil
}
func (s *AiStorage) GetClusterTaskQueues(adapterId string, clusterId string) ([]*models.TClusterTaskQueue, error) {
var taskQueues []*models.TClusterTaskQueue
tx := s.DbEngin.Raw("select * from t_cluster_task_queue where `adapter_id` = ? and `cluster_id` = ?", adapterId, clusterId).Scan(&taskQueues)
if tx.Error != nil {
logx.Errorf(tx.Error.Error())
return nil, tx.Error
}
return taskQueues, nil
}
func (s *AiStorage) GetAiTaskIdByClusterIdAndTaskId(clusterId string, taskId string) (string, error) { func (s *AiStorage) GetAiTaskIdByClusterIdAndTaskId(clusterId string, taskId string) (string, error) {
var aiTask models.TaskAi var aiTask models.TaskAi
tx := s.DbEngin.Raw("select * from task_ai where `cluster_id` = ? and `task_id` = ?", clusterId, taskId).Scan(&aiTask) tx := s.DbEngin.Raw("select * from task_ai where `cluster_id` = ? and `task_id` = ?", clusterId, taskId).Scan(&aiTask)
@ -116,6 +159,16 @@ func (s *AiStorage) GetAiTaskIdByClusterIdAndTaskId(clusterId string, taskId str
return aiTask.JobId, nil return aiTask.JobId, nil
} }
func (s *AiStorage) GetClusterResourcesById(clusterId string) (*models.TClusterResource, error) {
var clusterResource models.TClusterResource
tx := s.DbEngin.Raw("select * from t_cluster_resource where `cluster_id` = ?", clusterId).Scan(&clusterResource)
if tx.Error != nil {
logx.Errorf(tx.Error.Error())
return nil, tx.Error
}
return &clusterResource, nil
}
func (s *AiStorage) UpdateTask() error { func (s *AiStorage) UpdateTask() error {
return nil return nil
} }

View File

@ -49,6 +49,7 @@ type (
CommitTime time.Time `db:"commit_time"` // 提交时间 CommitTime time.Time `db:"commit_time"` // 提交时间
StartTime string `db:"start_time"` // 开始时间 StartTime string `db:"start_time"` // 开始时间
EndTime string `db:"end_time"` // 结束时间 EndTime string `db:"end_time"` // 结束时间
TaskType string `db:"task_type"`
} }
) )
@ -87,14 +88,14 @@ func (m *defaultTaskAiModel) FindOne(ctx context.Context, id int64) (*TaskAi, er
} }
func (m *defaultTaskAiModel) Insert(ctx context.Context, data *TaskAi) (sql.Result, error) { func (m *defaultTaskAiModel) Insert(ctx context.Context, data *TaskAi) (sql.Result, error) {
query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, taskAiRowsExpectAutoSet) query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, taskAiRowsExpectAutoSet)
ret, err := m.conn.ExecCtx(ctx, query, data.TaskId, data.AdapterId, data.ClusterId, data.Name, data.Replica, data.JobId, data.Strategy, data.Status, data.Msg, data.CommitTime, data.StartTime, data.EndTime) ret, err := m.conn.ExecCtx(ctx, query, data.TaskId, data.AdapterId, data.ClusterId, data.Name, data.Replica, data.JobId, data.Strategy, data.Status, data.Msg, data.CommitTime, data.StartTime, data.EndTime, data.TaskType)
return ret, err return ret, err
} }
func (m *defaultTaskAiModel) Update(ctx context.Context, data *TaskAi) error { func (m *defaultTaskAiModel) Update(ctx context.Context, data *TaskAi) error {
query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, taskAiRowsWithPlaceHolder) query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, taskAiRowsWithPlaceHolder)
_, err := m.conn.ExecCtx(ctx, query, data.TaskId, data.AdapterId, data.ClusterId, data.Name, data.Replica, data.JobId, data.Strategy, data.Status, data.Msg, data.CommitTime, data.StartTime, data.EndTime, data.Id) _, err := m.conn.ExecCtx(ctx, query, data.TaskId, data.AdapterId, data.ClusterId, data.Name, data.Replica, data.JobId, data.Strategy, data.Status, data.Msg, data.CommitTime, data.StartTime, data.EndTime, data.TaskType, data.Id)
return err return err
} }

View File

@ -35,17 +35,19 @@ type (
} }
TClusterResource struct { TClusterResource struct {
ClusterId int64 `db:"cluster_id"` ClusterId int64 `db:"cluster_id"`
ClusterName string `db:"cluster_name"` ClusterName string `db:"cluster_name"`
ClusterType int64 `db:"cluster_type"` // 类型0->容器1->智算2->超算3-虚拟机 ClusterType int64 `db:"cluster_type"` // 类型0->容器1->智算2->超算3-虚拟机
CpuAvail float64 `db:"cpu_avail"` CpuAvail float64 `db:"cpu_avail"`
CpuTotal float64 `db:"cpu_total"` CpuTotal float64 `db:"cpu_total"`
MemAvail float64 `db:"mem_avail"` MemAvail float64 `db:"mem_avail"`
MemTotal float64 `db:"mem_total"` MemTotal float64 `db:"mem_total"`
DiskAvail float64 `db:"disk_avail"` DiskAvail float64 `db:"disk_avail"`
DiskTotal float64 `db:"disk_total"` DiskTotal float64 `db:"disk_total"`
GpuAvail float64 `db:"gpu_avail"` GpuAvail float64 `db:"gpu_avail"`
GpuTotal float64 `db:"gpu_total"` GpuTotal float64 `db:"gpu_total"`
CardTotal int64 `db:"card_total"` // 算力卡数量
CardTopsTotal float64 `db:"card_tops_total"` // 算力总量tops
} }
) )
@ -56,6 +58,13 @@ func newTClusterResourceModel(conn sqlx.SqlConn) *defaultTClusterResourceModel {
} }
} }
func (m *defaultTClusterResourceModel) withSession(session sqlx.Session) *defaultTClusterResourceModel {
return &defaultTClusterResourceModel{
conn: sqlx.NewSqlConnFromSession(session),
table: "`t_cluster_resource`",
}
}
func (m *defaultTClusterResourceModel) Delete(ctx context.Context, clusterId int64) error { func (m *defaultTClusterResourceModel) Delete(ctx context.Context, clusterId int64) error {
query := fmt.Sprintf("delete from %s where `cluster_id` = ?", m.table) query := fmt.Sprintf("delete from %s where `cluster_id` = ?", m.table)
_, err := m.conn.ExecCtx(ctx, query, clusterId) _, err := m.conn.ExecCtx(ctx, query, clusterId)
@ -77,14 +86,14 @@ func (m *defaultTClusterResourceModel) FindOne(ctx context.Context, clusterId in
} }
func (m *defaultTClusterResourceModel) Insert(ctx context.Context, data *TClusterResource) (sql.Result, error) { func (m *defaultTClusterResourceModel) Insert(ctx context.Context, data *TClusterResource) (sql.Result, error) {
query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, tClusterResourceRowsExpectAutoSet) query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", m.table, tClusterResourceRowsExpectAutoSet)
ret, err := m.conn.ExecCtx(ctx, query, data.ClusterId, data.ClusterName, data.ClusterType, data.CpuAvail, data.CpuTotal, data.MemAvail, data.MemTotal, data.DiskAvail, data.DiskTotal, data.GpuAvail, data.GpuTotal) ret, err := m.conn.ExecCtx(ctx, query, data.ClusterId, data.ClusterName, data.ClusterType, data.CpuAvail, data.CpuTotal, data.MemAvail, data.MemTotal, data.DiskAvail, data.DiskTotal, data.GpuAvail, data.GpuTotal, data.CardTotal, data.CardTopsTotal)
return ret, err return ret, err
} }
func (m *defaultTClusterResourceModel) Update(ctx context.Context, data *TClusterResource) error { func (m *defaultTClusterResourceModel) Update(ctx context.Context, data *TClusterResource) error {
query := fmt.Sprintf("update %s set %s where `cluster_id` = ?", m.table, tClusterResourceRowsWithPlaceHolder) query := fmt.Sprintf("update %s set %s where `cluster_id` = ?", m.table, tClusterResourceRowsWithPlaceHolder)
_, err := m.conn.ExecCtx(ctx, query, data.ClusterName, data.ClusterType, data.CpuAvail, data.CpuTotal, data.MemAvail, data.MemTotal, data.DiskAvail, data.DiskTotal, data.GpuAvail, data.GpuTotal, data.ClusterId) _, err := m.conn.ExecCtx(ctx, query, data.ClusterName, data.ClusterType, data.CpuAvail, data.CpuTotal, data.MemAvail, data.MemTotal, data.DiskAvail, data.DiskTotal, data.GpuAvail, data.GpuTotal, data.CardTotal, data.CardTopsTotal, data.ClusterId)
return err return err
} }

View File

@ -0,0 +1,24 @@
package models
import "github.com/zeromicro/go-zero/core/stores/sqlx"
var _ TClusterTaskQueueModel = (*customTClusterTaskQueueModel)(nil)
type (
// TClusterTaskQueueModel is an interface to be customized, add more methods here,
// and implement the added methods in customTClusterTaskQueueModel.
TClusterTaskQueueModel interface {
tClusterTaskQueueModel
}
customTClusterTaskQueueModel struct {
*defaultTClusterTaskQueueModel
}
)
// NewTClusterTaskQueueModel returns a model for the database table.
func NewTClusterTaskQueueModel(conn sqlx.SqlConn) TClusterTaskQueueModel {
return &customTClusterTaskQueueModel{
defaultTClusterTaskQueueModel: newTClusterTaskQueueModel(conn),
}
}

View File

@ -0,0 +1,95 @@
// Code generated by goctl. DO NOT EDIT.
package models
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/zeromicro/go-zero/core/stores/builder"
"github.com/zeromicro/go-zero/core/stores/sqlc"
"github.com/zeromicro/go-zero/core/stores/sqlx"
"github.com/zeromicro/go-zero/core/stringx"
)
var (
tClusterTaskQueueFieldNames = builder.RawFieldNames(&TClusterTaskQueue{})
tClusterTaskQueueRows = strings.Join(tClusterTaskQueueFieldNames, ",")
tClusterTaskQueueRowsExpectAutoSet = strings.Join(stringx.Remove(tClusterTaskQueueFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), ",")
tClusterTaskQueueRowsWithPlaceHolder = strings.Join(stringx.Remove(tClusterTaskQueueFieldNames, "`id`", "`create_at`", "`create_time`", "`created_at`", "`update_at`", "`update_time`", "`updated_at`"), "=?,") + "=?"
)
type (
tClusterTaskQueueModel interface {
Insert(ctx context.Context, data *TClusterTaskQueue) (sql.Result, error)
FindOne(ctx context.Context, id int64) (*TClusterTaskQueue, error)
Update(ctx context.Context, data *TClusterTaskQueue) error
Delete(ctx context.Context, id int64) error
}
defaultTClusterTaskQueueModel struct {
conn sqlx.SqlConn
table string
}
TClusterTaskQueue struct {
Id int64 `db:"id"` // id
AdapterId int64 `db:"adapter_id"` // 适配器id
ClusterId int64 `db:"cluster_id"` // 集群id
QueueNum int64 `db:"queue_num"` // 任务排队数量
Date time.Time `db:"date"`
}
)
func newTClusterTaskQueueModel(conn sqlx.SqlConn) *defaultTClusterTaskQueueModel {
return &defaultTClusterTaskQueueModel{
conn: conn,
table: "`t_cluster_task_queue`",
}
}
func (m *defaultTClusterTaskQueueModel) withSession(session sqlx.Session) *defaultTClusterTaskQueueModel {
return &defaultTClusterTaskQueueModel{
conn: sqlx.NewSqlConnFromSession(session),
table: "`t_cluster_task_queue`",
}
}
func (m *defaultTClusterTaskQueueModel) Delete(ctx context.Context, id int64) error {
query := fmt.Sprintf("delete from %s where `id` = ?", m.table)
_, err := m.conn.ExecCtx(ctx, query, id)
return err
}
func (m *defaultTClusterTaskQueueModel) FindOne(ctx context.Context, id int64) (*TClusterTaskQueue, error) {
query := fmt.Sprintf("select %s from %s where `id` = ? limit 1", tClusterTaskQueueRows, m.table)
var resp TClusterTaskQueue
err := m.conn.QueryRowCtx(ctx, &resp, query, id)
switch err {
case nil:
return &resp, nil
case sqlc.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
func (m *defaultTClusterTaskQueueModel) Insert(ctx context.Context, data *TClusterTaskQueue) (sql.Result, error) {
query := fmt.Sprintf("insert into %s (%s) values (?, ?, ?, ?)", m.table, tClusterTaskQueueRowsExpectAutoSet)
ret, err := m.conn.ExecCtx(ctx, query, data.AdapterId, data.ClusterId, data.QueueNum, data.Date)
return ret, err
}
func (m *defaultTClusterTaskQueueModel) Update(ctx context.Context, data *TClusterTaskQueue) error {
query := fmt.Sprintf("update %s set %s where `id` = ?", m.table, tClusterTaskQueueRowsWithPlaceHolder)
_, err := m.conn.ExecCtx(ctx, query, data.AdapterId, data.ClusterId, data.QueueNum, data.Date, data.Id)
return err
}
func (m *defaultTClusterTaskQueueModel) tableName() string {
return m.table
}