Merge pull request '更新函数名' (#8) from feature_sjc into master
This commit is contained in:
commit
51c59bd951
|
@ -6,7 +6,7 @@ import (
|
|||
advmq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq/advisor"
|
||||
)
|
||||
|
||||
func (svc *Service) StartGetScheduleScheme(msg *advmq.StartGetScheduleScheme) (*advmq.StartGetScheduleSchemeResp, *mq.CodeMessage) {
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewGetScheduleScheme())
|
||||
return mq.ReplyOK(advmq.NewStartGetScheduleSchemeResp(tsk.ID()))
|
||||
func (svc *Service) StartMakeScheduleScheme(msg *advmq.StartMakeScheduleScheme) (*advmq.StartMakeScheduleSchemeResp, *mq.CodeMessage) {
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewMakeScheduleScheme())
|
||||
return mq.ReplyOK(advmq.NewStartMakeScheduleSchemeResp(tsk.ID()))
|
||||
}
|
||||
|
|
|
@ -1,29 +1,41 @@
|
|||
package task
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/models"
|
||||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
"gitlink.org.cn/cloudream/common/pkgs/task"
|
||||
exectsk "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq/executor/task"
|
||||
"gitlink.org.cn/cloudream/scheduler/common/globals"
|
||||
"gitlink.org.cn/cloudream/scheduler/common/models/job"
|
||||
advtsk "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq/advisor/task"
|
||||
"gitlink.org.cn/cloudream/scheduler/common/pkgs/mq/collector"
|
||||
|
||||
"github.com/inhies/go-bytesize"
|
||||
)
|
||||
|
||||
type GetScheduleScheme struct {
|
||||
type MakeScheduleScheme struct {
|
||||
Job job.NormalJob
|
||||
preAdjustNodeID int64
|
||||
}
|
||||
|
||||
func NewGetScheduleScheme() *GetScheduleScheme {
|
||||
return &GetScheduleScheme{}
|
||||
func NewMakeScheduleScheme() *MakeScheduleScheme {
|
||||
return &MakeScheduleScheme{}
|
||||
}
|
||||
|
||||
func (t *GetScheduleScheme) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
|
||||
log := logger.WithType[GetScheduleScheme]("Task")
|
||||
func (t *MakeScheduleScheme) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
|
||||
log := logger.WithType[MakeScheduleScheme]("Task")
|
||||
log.Debugf("begin")
|
||||
defer log.Debugf("end")
|
||||
|
||||
err := t.do(task.ID(), ctx)
|
||||
if err != nil {
|
||||
//TODO 若任务失败,上报的状态failed字段根据情况修改
|
||||
ctx.reporter.Report(task.ID(), exectsk.NewScheduleTaskStatus("failed", err.Error(), 0))
|
||||
ctx.reporter.Report(task.ID(), advtsk.NewScheduleSchemeTaskStatus("failed", err.Error(), true, advtsk.AdjustedScheme{}))
|
||||
} else {
|
||||
///////// 修改
|
||||
ctx.reporter.Report(task.ID(), advtsk.NewScheduleSchemeTaskStatus("failed", "", false, advtsk.AdjustedScheme{}))
|
||||
}
|
||||
ctx.reporter.ReportNow()
|
||||
|
||||
|
@ -32,42 +44,151 @@ func (t *GetScheduleScheme) Execute(task *task.Task[TaskContext], ctx TaskContex
|
|||
})
|
||||
}
|
||||
|
||||
func (t *GetScheduleScheme) do(taskID string, ctx TaskContext) error {
|
||||
// pcmCli, err := globals.PCMPool.Acquire()
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("new pcm client: %w", err)
|
||||
// }
|
||||
// defer pcmCli.Close()
|
||||
func (t *MakeScheduleScheme) do(taskID string, ctx TaskContext) error {
|
||||
isAvailable, err := t.CheckResourceAvailability()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// resp, err := pcmCli.ScheduleTask(pcm.ScheduleTaskReq{
|
||||
|
||||
// })
|
||||
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// var prevStatus string
|
||||
// for {
|
||||
// tsResp, err := pcmCli.GetTaskStatus(pcm.GetTaskStatusReq{
|
||||
// NodeID: t.nodeID,
|
||||
// PCMJobID: resp.PCMJobID,
|
||||
// })
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// if tsResp.Status != prevStatus {
|
||||
// ctx.reporter.Report(taskID, exectsk.NewScheduleTaskStatus(tsResp.Status, "", resp.PCMJobID))
|
||||
// }
|
||||
|
||||
// prevStatus = tsResp.Status
|
||||
|
||||
// // TODO 根据接口result返回情况修改
|
||||
// // 根据返回的result判定任务是否完成,若完成 跳出循环,结束任务
|
||||
// if tsResp.Status == "Completed" {
|
||||
// return nil
|
||||
// }
|
||||
// }
|
||||
if isAvailable {
|
||||
// 确认code、dataset、image是否已经调度到该中心
|
||||
} else {
|
||||
// 重新执行预调度方案,寻找最优节点
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// 检查预调度节点资源是否足够
|
||||
func (t *MakeScheduleScheme) CheckResourceAvailability() (bool, error) {
|
||||
colCli, err := globals.CollectorMQPool.Acquire()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("new collector client: %w", err)
|
||||
}
|
||||
defer colCli.Close()
|
||||
|
||||
neededCPU := t.Job.Info.Resources.CPU
|
||||
if neededCPU > 0 {
|
||||
resp, err := colCli.GetOneResourceData(collector.NewGetOneResourceData(
|
||||
t.preAdjustNodeID,
|
||||
models.ResourceTypeCPU,
|
||||
))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availCPU := resp.Data.(models.CPUResourceData).Available
|
||||
|
||||
if float64(availCPU.Value) < 1.5*neededCPU {
|
||||
logger.WithField("JobID", t.Job.JobID).
|
||||
Infof("insufficient CPU resources, want: %f, available: %d%s", 1.5*neededCPU, availCPU.Value, availCPU.Unit)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
neededNPU := t.Job.Info.Resources.NPU
|
||||
if neededNPU > 0 {
|
||||
resp, err := colCli.GetOneResourceData(collector.NewGetOneResourceData(
|
||||
t.preAdjustNodeID,
|
||||
models.ResourceTypeNPU,
|
||||
))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availNPU := resp.Data.(models.NPUResourceData).Available
|
||||
|
||||
if float64(availNPU.Value) < 1.5*neededNPU {
|
||||
logger.WithField("JobID", t.Job.JobID).
|
||||
Infof("insufficient NPU resources, want: %f, available: %d%s", 1.5*neededNPU, availNPU.Value, availNPU.Unit)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
neededGPU := t.Job.Info.Resources.GPU
|
||||
if neededGPU > 0 {
|
||||
resp, err := colCli.GetOneResourceData(collector.NewGetOneResourceData(
|
||||
t.preAdjustNodeID,
|
||||
models.ResourceTypeGPU,
|
||||
))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availGPU := resp.Data.(models.GPUResourceData).Available
|
||||
|
||||
if float64(availGPU.Value) < 1.5*neededGPU {
|
||||
logger.WithField("JobID", t.Job.JobID).
|
||||
Infof("insufficient GPU resources, want: %f, available: %d%s", 1.5*neededGPU, availGPU.Value, availGPU.Unit)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
neededMLU := t.Job.Info.Resources.MLU
|
||||
if neededMLU > 0 {
|
||||
resp, err := colCli.GetOneResourceData(collector.NewGetOneResourceData(
|
||||
t.preAdjustNodeID,
|
||||
models.ResourceTypeMLU,
|
||||
))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availMLU := resp.Data.(models.MLUResourceData).Available
|
||||
|
||||
if float64(availMLU.Value) < 1.5*neededMLU {
|
||||
logger.WithField("JobID", t.Job.JobID).
|
||||
Infof("insufficient MLU resources, want: %f, available: %d%s", 1.5*neededMLU, availMLU.Value, availMLU.Unit)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
neededStorage := t.Job.Info.Resources.Storage
|
||||
if neededStorage > 0 {
|
||||
resp, err := colCli.GetOneResourceData(collector.NewGetOneResourceData(
|
||||
t.preAdjustNodeID,
|
||||
models.ResourceTypeStorage,
|
||||
))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availStorage := resp.Data.(models.StorageResourceData).Available
|
||||
|
||||
bytesStorage, err := bytesize.Parse(fmt.Sprintf("%f%s", availStorage.Value, availStorage.Unit))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if int64(bytesStorage) < int64(1.5*float64(neededStorage)) {
|
||||
logger.WithField("JobID", t.Job.JobID).
|
||||
Infof("insufficient storage resources, want: %s, available: %f%s", bytesize.New(1.5*float64(neededStorage)), availStorage.Value, availStorage.Unit)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
neededMemory := t.Job.Info.Resources.Memory
|
||||
if neededMemory > 0 {
|
||||
resp, err := colCli.GetOneResourceData(collector.NewGetOneResourceData(
|
||||
t.preAdjustNodeID,
|
||||
models.ResourceTypeMemory,
|
||||
))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
availMemory := resp.Data.(models.MemoryResourceData).Available
|
||||
|
||||
bytesMemory, err := bytesize.Parse(fmt.Sprintf("%f%s", availMemory.Value, availMemory.Unit))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if int64(bytesMemory) < int64(1.5*float64(neededMemory)) {
|
||||
logger.WithField("JobID", t.Job.JobID).
|
||||
Infof("insufficient memory resources, want: %s, available: %f%s", bytesize.New(1.5*float64(neededMemory)), availMemory.Value, availMemory.Unit)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -1,39 +1,39 @@
|
|||
package advisor
|
||||
|
||||
import (
|
||||
"gitlink.org.cn/cloudream/common/models"
|
||||
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
||||
"gitlink.org.cn/cloudream/scheduler/common/models/job"
|
||||
)
|
||||
|
||||
type ApiService interface {
|
||||
StartMakeScheduleScheme(msg *StartMakeScheduleScheme) (*StartMakeScheduleSchemeResp, *mq.CodeMessage)
|
||||
}
|
||||
|
||||
// 获取调度方案
|
||||
var _ = Register(Service.StartGetScheduleScheme)
|
||||
var _ = Register(Service.StartMakeScheduleScheme)
|
||||
|
||||
type StartGetScheduleScheme struct {
|
||||
// UserID int64 `json:"userID"`
|
||||
// PackageID int64 `json:"packageID"`
|
||||
type StartMakeScheduleScheme struct {
|
||||
mq.MessageBodyBase
|
||||
Job job.NormalJob `json:"job"`
|
||||
}
|
||||
|
||||
func NewStartGetScheduleScheme() StartGetScheduleScheme {
|
||||
return StartGetScheduleScheme{
|
||||
// UserID: userID,
|
||||
// PackageID: packageID,
|
||||
}
|
||||
}
|
||||
|
||||
type StartGetScheduleSchemeResp struct {
|
||||
type StartMakeScheduleSchemeResp struct {
|
||||
mq.MessageBodyBase
|
||||
TaskID string `json:"taskID"`
|
||||
}
|
||||
|
||||
func NewStartGetScheduleSchemeResp(taskID string) StartGetScheduleSchemeResp {
|
||||
return StartGetScheduleSchemeResp{
|
||||
func NewStartGetScheduleScheme(job job.NormalJob) *StartMakeScheduleScheme {
|
||||
return &StartMakeScheduleScheme{
|
||||
Job: job,
|
||||
}
|
||||
}
|
||||
|
||||
func NewStartMakeScheduleSchemeResp(taskID string) *StartMakeScheduleSchemeResp {
|
||||
return &StartMakeScheduleSchemeResp{
|
||||
TaskID: taskID,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) StartGetScheduleScheme(msg StartGetScheduleScheme, opts ...mq.RequestOption) (*StartGetScheduleSchemeResp, error) {
|
||||
return mq.Request[StartGetScheduleSchemeResp](c.rabbitCli, msg, opts...)
|
||||
}
|
||||
|
||||
func init() {
|
||||
mq.RegisterUnionType(models.ResourceDataTypeUnion)
|
||||
func (c *Client) StartMakeScheduleScheme(msg *StartMakeScheduleScheme, opts ...mq.RequestOption) (*StartMakeScheduleSchemeResp, error) {
|
||||
return mq.Request(Service.StartMakeScheduleScheme, c.rabbitCli, msg, opts...)
|
||||
}
|
||||
|
|
|
@ -6,11 +6,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
ServerQueueName = "Scheduler-Collector"
|
||||
ServerQueueName = "Scheduler-Advisor"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
StartGetScheduleScheme(msg *StartGetScheduleScheme) (*StartGetScheduleSchemeResp, *mq.CodeMessage)
|
||||
ApiService
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
|
|
|
@ -1,73 +1,40 @@
|
|||
package task
|
||||
|
||||
type TaskStatus interface{}
|
||||
import (
|
||||
"gitlink.org.cn/cloudream/common/pkgs/types"
|
||||
myreflect "gitlink.org.cn/cloudream/common/utils/reflect"
|
||||
)
|
||||
|
||||
type TaskStatusConst interface {
|
||||
TaskStatus | ScheduleTaskStatus | UploadImageTaskStatus
|
||||
type TaskStatus interface {
|
||||
Noop()
|
||||
}
|
||||
|
||||
type ScheduleTaskStatus struct {
|
||||
type TaskStatusBase struct{}
|
||||
|
||||
func (b *TaskStatusBase) Noop() {}
|
||||
|
||||
// 增加了新类型后需要在这里也同步添加
|
||||
var TaskStatusTypeUnion = types.NewTypeUnion[TaskStatus](
|
||||
myreflect.TypeOf[ScheduleSchemeTaskStatus](),
|
||||
)
|
||||
|
||||
type ScheduleSchemeTaskStatus struct {
|
||||
TaskStatusBase
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
PCMJobID int64 `json:"pcmJobID"`
|
||||
IsAdjustment bool `json:"isAdjustment"`
|
||||
AdjustedScheme AdjustedScheme `json:"adjustedScheme"`
|
||||
}
|
||||
|
||||
func NewScheduleTaskStatus(status string, err string, pcmJobID int64) ScheduleTaskStatus {
|
||||
return ScheduleTaskStatus{
|
||||
Status: status,
|
||||
Error: err,
|
||||
PCMJobID: pcmJobID,
|
||||
}
|
||||
}
|
||||
|
||||
type UploadImageTaskStatus struct {
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
ImageID int64 `json:"imageID"`
|
||||
}
|
||||
|
||||
func NewUploadImageTaskStatus(status string, err string, imageID int64) UploadImageTaskStatus {
|
||||
return UploadImageTaskStatus{
|
||||
Status: status,
|
||||
Error: err,
|
||||
ImageID: imageID,
|
||||
}
|
||||
}
|
||||
|
||||
type CacheMovePackageTaskStatus struct {
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func NewCacheMovePackageTaskStatus(status string, err string) CacheMovePackageTaskStatus {
|
||||
return CacheMovePackageTaskStatus{
|
||||
Status: status,
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
|
||||
type CreatePackageTaskStatus struct {
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
PackageID int64 `json:"packageID"`
|
||||
}
|
||||
|
||||
func NewCreatePackageTaskStatus(status string, err string, packageID int64) CreatePackageTaskStatus {
|
||||
return CreatePackageTaskStatus{
|
||||
Status: status,
|
||||
Error: err,
|
||||
PackageID: packageID,
|
||||
}
|
||||
}
|
||||
|
||||
type LoadPackageTaskStatus struct {
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func NewLoadPackageTaskStatus(status string, err string) LoadPackageTaskStatus {
|
||||
return LoadPackageTaskStatus{
|
||||
type AdjustedScheme struct {
|
||||
NodeID int64 `json:"nodeID"`
|
||||
}
|
||||
|
||||
func NewScheduleSchemeTaskStatus(status string, err string, isAdjustment bool, adjustedScheme AdjustedScheme) *ScheduleSchemeTaskStatus {
|
||||
return &ScheduleSchemeTaskStatus{
|
||||
Status: status,
|
||||
Error: err,
|
||||
IsAdjustment: isAdjustment,
|
||||
AdjustedScheme: adjustedScheme,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ var _ = Register(Service.StartUploadImage)
|
|||
|
||||
type StartUploadImage struct {
|
||||
mq.MessageBodyBase
|
||||
NodeID int64 `json:"nodeID"`
|
||||
SlwNodeID int64 `json:"slwNodeID"`
|
||||
ImagePath string `json:"imagePath"`
|
||||
}
|
||||
type StartUploadImageResp struct {
|
||||
|
@ -27,9 +27,9 @@ type StartUploadImageResp struct {
|
|||
TaskID string `json:"taskID"`
|
||||
}
|
||||
|
||||
func NewStartUploadImage(nodeID int64, imagePath string) *StartUploadImage {
|
||||
func NewStartUploadImage(slwNodeID int64, imagePath string) *StartUploadImage {
|
||||
return &StartUploadImage{
|
||||
NodeID: nodeID,
|
||||
SlwNodeID: slwNodeID,
|
||||
ImagePath: imagePath,
|
||||
}
|
||||
}
|
||||
|
@ -47,16 +47,16 @@ var _ = Register(Service.GetImageList)
|
|||
|
||||
type GetImageList struct {
|
||||
mq.MessageBodyBase
|
||||
NodeID int64 `json:"nodeID"`
|
||||
SlwNodeID int64 `json:"slwNodeID"`
|
||||
}
|
||||
type GetImageListResp struct {
|
||||
mq.MessageBodyBase
|
||||
ImageIDs []int64 `json:"imageIDs"`
|
||||
}
|
||||
|
||||
func NewGetImageList(nodeID int64) *GetImageList {
|
||||
func NewGetImageList(slwNodeID int64) *GetImageList {
|
||||
return &GetImageList{
|
||||
NodeID: nodeID,
|
||||
SlwNodeID: slwNodeID,
|
||||
}
|
||||
}
|
||||
func NewGetImageListResp(imageIDs []int64) *GetImageListResp {
|
||||
|
@ -73,7 +73,7 @@ var _ = Register(Service.DeleteImage)
|
|||
|
||||
type DeleteImage struct {
|
||||
mq.MessageBodyBase
|
||||
NodeID int64 `json:"nodeID"`
|
||||
SlwNodeID int64 `json:"slwNodeID"`
|
||||
PCMJobID int64 `json:"pcmJobID"`
|
||||
}
|
||||
type DeleteImageResp struct {
|
||||
|
@ -81,9 +81,9 @@ type DeleteImageResp struct {
|
|||
Result string `json:"result"`
|
||||
}
|
||||
|
||||
func NewDeleteImage(nodeID int64, pcmJobID int64) *DeleteImage {
|
||||
func NewDeleteImage(slwNodeID int64, pcmJobID int64) *DeleteImage {
|
||||
return &DeleteImage{
|
||||
NodeID: nodeID,
|
||||
SlwNodeID: slwNodeID,
|
||||
PCMJobID: pcmJobID,
|
||||
}
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ var _ = Register(Service.StartScheduleTask)
|
|||
|
||||
type StartScheduleTask struct {
|
||||
mq.MessageBodyBase
|
||||
NodeID int64 `json:"nodeID"`
|
||||
SlwNodeID int64 `json:"slwNodeID"`
|
||||
Envs []map[string]string `json:"envs"`
|
||||
ImageID int64 `json:"imageID"`
|
||||
CMDLine string `json:"cmdLine"`
|
||||
|
@ -111,9 +111,9 @@ type StartScheduleTaskResp struct {
|
|||
TaskID string `json:"taskID"`
|
||||
}
|
||||
|
||||
func NewStartScheduleTask(nodeID int64, envs []map[string]string, imageID int64, cmdLine string) *StartScheduleTask {
|
||||
func NewStartScheduleTask(slwNodeID int64, envs []map[string]string, imageID int64, cmdLine string) *StartScheduleTask {
|
||||
return &StartScheduleTask{
|
||||
NodeID: nodeID,
|
||||
SlwNodeID: slwNodeID,
|
||||
Envs: envs,
|
||||
ImageID: imageID,
|
||||
CMDLine: cmdLine,
|
||||
|
@ -133,7 +133,7 @@ var _ = Register(Service.DeleteTask)
|
|||
|
||||
type DeleteTask struct {
|
||||
mq.MessageBodyBase
|
||||
NodeID int64 `json:"nodeID"`
|
||||
SlwNodeID int64 `json:"slwNodeID"`
|
||||
PCMJobID int64 `json:"pcmJobID"`
|
||||
}
|
||||
type DeleteTaskResp struct {
|
||||
|
@ -141,9 +141,9 @@ type DeleteTaskResp struct {
|
|||
Result string `json:"result"`
|
||||
}
|
||||
|
||||
func NewDeleteTask(nodeID int64, pcmJobID int64) *DeleteTask {
|
||||
func NewDeleteTask(slwNodeID int64, pcmJobID int64) *DeleteTask {
|
||||
return &DeleteTask{
|
||||
NodeID: nodeID,
|
||||
SlwNodeID: slwNodeID,
|
||||
PCMJobID: pcmJobID,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -85,18 +85,18 @@ type StartCacheMovePackage struct {
|
|||
mq.MessageBodyBase
|
||||
UserID int64 `json:"userID"`
|
||||
PackageID int64 `json:"packageID"`
|
||||
NodeID int64 `json:"nodeID"`
|
||||
StgNodeID int64 `json:"stgNodeID"`
|
||||
}
|
||||
type StartCacheMovePackageResp struct {
|
||||
mq.MessageBodyBase
|
||||
TaskID string `json:"taskID"`
|
||||
}
|
||||
|
||||
func NewStartCacheMovePackage(userID int64, packageID int64, nodeID int64) *StartCacheMovePackage {
|
||||
func NewStartCacheMovePackage(userID int64, packageID int64, stgNodeID int64) *StartCacheMovePackage {
|
||||
return &StartCacheMovePackage{
|
||||
UserID: userID,
|
||||
PackageID: packageID,
|
||||
NodeID: nodeID,
|
||||
StgNodeID: stgNodeID,
|
||||
}
|
||||
}
|
||||
func NewStartCacheMovePackageResp(taskID string) *StartCacheMovePackageResp {
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func (svc *Service) StartUploadImage(msg *execmq.StartUploadImage) (*execmq.StartUploadImageResp, *mq.CodeMessage) {
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewPCMUploadImage(msg.NodeID, msg.ImagePath))
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewPCMUploadImage(msg.SlwNodeID, msg.ImagePath))
|
||||
return mq.ReplyOK(execmq.NewStartUploadImageResp(tsk.ID()))
|
||||
}
|
||||
|
||||
|
@ -24,7 +24,7 @@ func (svc *Service) GetImageList(msg *execmq.GetImageList) (*execmq.GetImageList
|
|||
defer pcmCli.Close()
|
||||
|
||||
resp, err := pcmCli.GetImageList(pcm.GetImageListReq{
|
||||
NodeID: msg.NodeID,
|
||||
SlwNodeID: msg.SlwNodeID,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Warnf("get image list failed, err: %s", err.Error())
|
||||
|
@ -43,7 +43,7 @@ func (svc *Service) DeleteImage(msg *execmq.DeleteImage) (*execmq.DeleteImageRes
|
|||
defer pcmCli.Close()
|
||||
|
||||
resp, err := pcmCli.DeleteImage(pcm.DeleteImageReq{
|
||||
NodeID: msg.NodeID,
|
||||
SlwNodeID: msg.SlwNodeID,
|
||||
PCMJobID: msg.PCMJobID,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -54,7 +54,7 @@ func (svc *Service) DeleteImage(msg *execmq.DeleteImage) (*execmq.DeleteImageRes
|
|||
}
|
||||
|
||||
func (svc *Service) StartScheduleTask(msg *execmq.StartScheduleTask) (*execmq.StartScheduleTaskResp, *mq.CodeMessage) {
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewPCMScheduleTask(msg.NodeID, msg.Envs, msg.ImageID, msg.CMDLine))
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewPCMScheduleTask(msg.SlwNodeID, msg.Envs, msg.ImageID, msg.CMDLine))
|
||||
return mq.ReplyOK(execmq.NewStartScheduleTaskResp(tsk.ID()))
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ func (svc *Service) DeleteTask(msg *execmq.DeleteTask) (*execmq.DeleteTaskResp,
|
|||
defer pcmCli.Close()
|
||||
|
||||
resp, err := pcmCli.DeleteTask(pcm.DeleteTaskReq{
|
||||
NodeID: msg.NodeID,
|
||||
SlwNodeID: msg.SlwNodeID,
|
||||
PCMJobID: msg.PCMJobID,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -17,7 +17,7 @@ func (svc *Service) StartStorageCreatePackage(msg *execmq.StartStorageCreatePack
|
|||
}
|
||||
|
||||
func (svc *Service) StartCacheMovePackage(msg *execmq.StartCacheMovePackage) (*execmq.StartCacheMovePackageResp, *mq.CodeMessage) {
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewCacheMovePackage(msg.UserID, msg.PackageID, msg.NodeID))
|
||||
tsk := svc.taskManager.StartNew(schtsk.NewCacheMovePackage(msg.UserID, msg.PackageID, msg.StgNodeID))
|
||||
// tsk := svc.taskManager.StartNew(task.TaskBody[schtsk.NewCacheMovePackage(msg.UserID, msg.PackageID, msg.NodeID)])
|
||||
return mq.ReplyOK(execmq.NewStartCacheMovePackageResp(tsk.ID()))
|
||||
}
|
||||
|
|
|
@ -14,14 +14,14 @@ import (
|
|||
type CacheMovePackage struct {
|
||||
userID int64
|
||||
packageID int64
|
||||
nodeID int64
|
||||
stgNodeID int64
|
||||
}
|
||||
|
||||
func NewCacheMovePackage(userID int64, packageID int64, nodeID int64) *CacheMovePackage {
|
||||
func NewCacheMovePackage(userID int64, packageID int64, stgNodeID int64) *CacheMovePackage {
|
||||
return &CacheMovePackage{
|
||||
userID: userID,
|
||||
packageID: packageID,
|
||||
nodeID: nodeID,
|
||||
stgNodeID: stgNodeID,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,6 @@ func (t *CacheMovePackage) do(ctx TaskContext) error {
|
|||
return stgCli.CacheMovePackage(storage.CacheMovePackageReq{
|
||||
UserID: t.userID,
|
||||
PackageID: t.packageID,
|
||||
NodeID: t.packageID,
|
||||
NodeID: t.stgNodeID,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -13,15 +13,15 @@ import (
|
|||
)
|
||||
|
||||
type PCMScheduleTask struct {
|
||||
nodeID int64
|
||||
slwNodeID int64
|
||||
envs []map[string]string
|
||||
imageID int64
|
||||
cmdLine string
|
||||
}
|
||||
|
||||
func NewPCMScheduleTask(nodeID int64, envs []map[string]string, imageID int64, cmdLine string) *PCMScheduleTask {
|
||||
func NewPCMScheduleTask(slwNodeID int64, envs []map[string]string, imageID int64, cmdLine string) *PCMScheduleTask {
|
||||
return &PCMScheduleTask{
|
||||
nodeID: nodeID,
|
||||
slwNodeID: slwNodeID,
|
||||
envs: envs,
|
||||
imageID: imageID,
|
||||
cmdLine: cmdLine,
|
||||
|
@ -53,7 +53,7 @@ func (t *PCMScheduleTask) do(taskID string, ctx TaskContext) error {
|
|||
defer pcmCli.Close()
|
||||
|
||||
resp, err := pcmCli.ScheduleTask(pcm.ScheduleTaskReq{
|
||||
NodeID: t.nodeID,
|
||||
SlwNodeID: t.slwNodeID,
|
||||
Envs: t.envs,
|
||||
ImageID: t.imageID,
|
||||
CMDLine: t.cmdLine,
|
||||
|
@ -66,7 +66,7 @@ func (t *PCMScheduleTask) do(taskID string, ctx TaskContext) error {
|
|||
var prevStatus string
|
||||
for {
|
||||
tsResp, err := pcmCli.GetTaskStatus(pcm.GetTaskStatusReq{
|
||||
NodeID: t.nodeID,
|
||||
SlwNodeID: t.slwNodeID,
|
||||
PCMJobID: resp.PCMJobID,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -12,13 +12,13 @@ import (
|
|||
)
|
||||
|
||||
type PCMUploadImage struct {
|
||||
nodeID int64
|
||||
slwNodeID int64
|
||||
imagePath string
|
||||
}
|
||||
|
||||
func NewPCMUploadImage(nodeID int64, imagePath string) *PCMUploadImage {
|
||||
func NewPCMUploadImage(slwNodeID int64, imagePath string) *PCMUploadImage {
|
||||
return &PCMUploadImage{
|
||||
nodeID: nodeID,
|
||||
slwNodeID: slwNodeID,
|
||||
imagePath: imagePath,
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ func (t *PCMUploadImage) do(taskID string, ctx TaskContext) error {
|
|||
defer pcmCli.Close()
|
||||
|
||||
resp, err := pcmCli.UploadImage(pcm.UploadImageReq{
|
||||
NodeID: t.nodeID,
|
||||
SlwNodeID: t.slwNodeID,
|
||||
ImagePath: t.imagePath,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
1
go.mod
1
go.mod
|
@ -27,6 +27,7 @@ require (
|
|||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/imdario/mergo v0.3.15 // indirect
|
||||
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
|
|
2
go.sum
2
go.sum
|
@ -41,6 +41,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
|||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf h1:FtEj8sfIcaaBfAKrE1Cwb61YDtYq9JxChK1c7AKce7s=
|
||||
github.com/inhies/go-bytesize v0.0.0-20220417184213-4913239db9cf/go.mod h1:yrqSXGoD/4EKfF26AOGzscPOgTTJcyAwM2rpixWT+t4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
|
|
Loading…
Reference in New Issue