优化ClientPool定义,方便测试
This commit is contained in:
parent
6f34785cd6
commit
a8d61ae115
|
@ -48,7 +48,7 @@ func (r *Reporter) Serve() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new manager client: %w", err)
|
return fmt.Errorf("new manager client: %w", err)
|
||||||
}
|
}
|
||||||
defer magCli.Close()
|
defer schglb.ManagerMQPool.Release(magCli)
|
||||||
|
|
||||||
ticker := time.NewTicker(r.reportInterval)
|
ticker := time.NewTicker(r.reportInterval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
|
@ -132,7 +132,7 @@ func (s *DefaultScheduler) Schedule(job *jobmod.NormalJob) (*jobmod.JobScheduleS
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new collector client: %w", err)
|
return nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
allSlwNodes := make(map[uopsdk.SlwNodeID]*candidateSlwNode)
|
allSlwNodes := make(map[uopsdk.SlwNodeID]*candidateSlwNode)
|
||||||
|
|
||||||
|
@ -215,7 +215,7 @@ func (s *DefaultScheduler) calcOneResourceScore(requires schsdk.JobResourcesInfo
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new collector client: %w", err)
|
return nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
getResDataResp, err := colCli.GetAllResourceData(collector.NewGetAllResourceData(slwNodeID))
|
getResDataResp, err := colCli.GetAllResourceData(collector.NewGetAllResourceData(slwNodeID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -405,7 +405,7 @@ func (s *DefaultScheduler) calcPackageFileScore(packageID int64, stgNodeToSlwNod
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new collector client: %w", err)
|
return nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
slwNodeFileScores := make(map[uopsdk.SlwNodeID]*fileDetail)
|
slwNodeFileScores := make(map[uopsdk.SlwNodeID]*fileDetail)
|
||||||
|
|
||||||
|
@ -456,13 +456,13 @@ func (s *DefaultScheduler) calcImageFileScore(imageID schsdk.ImageID, allSlwNode
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new collector client: %w", err)
|
return nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
magCli, err := schglb.ManagerMQPool.Acquire()
|
magCli, err := schglb.ManagerMQPool.Acquire()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new manager client: %w", err)
|
return nil, fmt.Errorf("new manager client: %w", err)
|
||||||
}
|
}
|
||||||
defer magCli.Close()
|
defer schglb.ManagerMQPool.Release(magCli)
|
||||||
|
|
||||||
imageInfoResp, err := magCli.GetImageInfo(manager.NewGetImageInfo(imageID))
|
imageInfoResp, err := magCli.GetImageInfo(manager.NewGetImageInfo(imageID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -136,7 +136,7 @@ func (s *DefaultPreScheduler) Schedule(info *schsdk.JobSetInfo) (*jobmod.JobSetP
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("new collector client: %w", err)
|
return nil, nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
// 查询有哪些算力中心可用
|
// 查询有哪些算力中心可用
|
||||||
getNodesResp, err := colCli.GetAllSlwNodeInfo(collector.NewGetAllSlwNodeInfo())
|
getNodesResp, err := colCli.GetAllSlwNodeInfo(collector.NewGetAllSlwNodeInfo())
|
||||||
|
@ -378,7 +378,7 @@ func (s *DefaultPreScheduler) calcOneResourceScore(requires schsdk.JobResourcesI
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new collector client: %w", err)
|
return nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
getResDataResp, err := colCli.GetAllResourceData(collector.NewGetAllResourceData(slwNodeID))
|
getResDataResp, err := colCli.GetAllResourceData(collector.NewGetAllResourceData(slwNodeID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -575,7 +575,7 @@ func (s *DefaultPreScheduler) calcPackageFileScore(packageID int64, stgNodeToSlw
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new collector client: %w", err)
|
return nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
slwNodeFileScores := make(map[uopsdk.SlwNodeID]*fileDetail)
|
slwNodeFileScores := make(map[uopsdk.SlwNodeID]*fileDetail)
|
||||||
|
|
||||||
|
@ -626,13 +626,13 @@ func (s *DefaultPreScheduler) calcImageFileScore(imageID schsdk.ImageID, allSlwN
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new collector client: %w", err)
|
return nil, fmt.Errorf("new collector client: %w", err)
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
magCli, err := schglb.ManagerMQPool.Acquire()
|
magCli, err := schglb.ManagerMQPool.Acquire()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new manager client: %w", err)
|
return nil, fmt.Errorf("new manager client: %w", err)
|
||||||
}
|
}
|
||||||
defer magCli.Close()
|
defer schglb.ManagerMQPool.Release(magCli)
|
||||||
|
|
||||||
imageInfoResp, err := magCli.GetImageInfo(manager.NewGetImageInfo(imageID))
|
imageInfoResp, err := magCli.GetImageInfo(manager.NewGetImageInfo(imageID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -22,7 +22,7 @@ func (svc *JobSetService) Submit(info schsdk.JobSetInfo) (schsdk.JobSetID, *schs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, fmt.Errorf("new manager client: %w", err)
|
return "", nil, fmt.Errorf("new manager client: %w", err)
|
||||||
}
|
}
|
||||||
defer mgrCli.Close()
|
defer schglb.ManagerMQPool.Release(mgrCli)
|
||||||
|
|
||||||
schScheme, uploadScheme, err := svc.preScheduler.Schedule(&info)
|
schScheme, uploadScheme, err := svc.preScheduler.Schedule(&info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -43,7 +43,7 @@ func (svc *JobSetService) LocalFileUploaded(jobSetID schsdk.JobSetID, localPath
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new manager client: %w", err)
|
return fmt.Errorf("new manager client: %w", err)
|
||||||
}
|
}
|
||||||
defer mgrCli.Close()
|
defer schglb.ManagerMQPool.Release(mgrCli)
|
||||||
|
|
||||||
_, err = mgrCli.JobSetLocalFileUploaded(mgrmq.NewJobSetLocalFileUploaded(jobSetID, localPath, errMsg, packageID))
|
_, err = mgrCli.JobSetLocalFileUploaded(mgrmq.NewJobSetLocalFileUploaded(jobSetID, localPath, errMsg, packageID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -15,7 +15,7 @@ func (svc *Service) GetImageList(msg *colmq.GetImageList) (*colmq.GetImageListRe
|
||||||
logger.Warnf("new pcm client, err: %s", err.Error())
|
logger.Warnf("new pcm client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new pcm client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new pcm client failed")
|
||||||
}
|
}
|
||||||
defer pcmCli.Close()
|
defer schglb.PCMPool.Release(pcmCli)
|
||||||
|
|
||||||
resp, err := pcmCli.GetImageList(pcmsdk.GetImageListReq{
|
resp, err := pcmCli.GetImageList(pcmsdk.GetImageListReq{
|
||||||
SlwNodeID: msg.SlwNodeID,
|
SlwNodeID: msg.SlwNodeID,
|
||||||
|
|
|
@ -15,7 +15,7 @@ func (svc *Service) GetOneResourceData(msg *colmq.GetOneResourceData) (*colmq.Ge
|
||||||
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
||||||
}
|
}
|
||||||
defer uniOpsCli.Close()
|
defer schglb.UnifyOpsPool.Release(uniOpsCli)
|
||||||
|
|
||||||
var resp uopsdk.ResourceData
|
var resp uopsdk.ResourceData
|
||||||
switch msg.Type {
|
switch msg.Type {
|
||||||
|
@ -61,7 +61,7 @@ func (svc *Service) GetAllResourceData(msg *colmq.GetAllResourceData) (*colmq.Ge
|
||||||
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
||||||
}
|
}
|
||||||
defer uniOpsCli.Close()
|
defer schglb.UnifyOpsPool.Release(uniOpsCli)
|
||||||
|
|
||||||
resps, err := uniOpsCli.GetIndicatorData(uopsdk.GetOneResourceDataReq{
|
resps, err := uniOpsCli.GetIndicatorData(uopsdk.GetOneResourceDataReq{
|
||||||
SlwNodeID: msg.SlwNodeID,
|
SlwNodeID: msg.SlwNodeID,
|
||||||
|
|
|
@ -17,7 +17,7 @@ func (svc *Service) GetSlwNodeInfo(msg *colmq.GetSlwNodeInfo) (*colmq.GetSlwNode
|
||||||
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
||||||
}
|
}
|
||||||
defer uniOpsCli.Close()
|
defer schglb.UnifyOpsPool.Release(uniOpsCli)
|
||||||
|
|
||||||
resp, err := uniOpsCli.GetAllSlwNodeInfo()
|
resp, err := uniOpsCli.GetAllSlwNodeInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -52,7 +52,7 @@ func (svc *Service) GetAllSlwNodeInfo(msg *colmq.GetAllSlwNodeInfo) (*colmq.GetA
|
||||||
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
logger.Warnf("new unifyOps client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new unifyOps client failed")
|
||||||
}
|
}
|
||||||
defer uniOpsCli.Close()
|
defer schglb.UnifyOpsPool.Release(uniOpsCli)
|
||||||
|
|
||||||
resp, err := uniOpsCli.GetAllSlwNodeInfo()
|
resp, err := uniOpsCli.GetAllSlwNodeInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -15,7 +15,7 @@ func (svc *Service) PackageGetCachedStgNodes(msg *colmq.PackageGetCachedStgNodes
|
||||||
logger.Warnf("new storage client, err: %s", err.Error())
|
logger.Warnf("new storage client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new storage client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new storage client failed")
|
||||||
}
|
}
|
||||||
defer stgCli.Close()
|
defer schglb.CloudreamStoragePool.Release(stgCli)
|
||||||
|
|
||||||
resp, err := stgCli.PackageGetCachedNodes(stgsdk.PackageGetCachedNodesReq{
|
resp, err := stgCli.PackageGetCachedNodes(stgsdk.PackageGetCachedNodesReq{
|
||||||
PackageID: msg.PackageID,
|
PackageID: msg.PackageID,
|
||||||
|
@ -35,7 +35,7 @@ func (svc *Service) PackageGetLoadedStgNodes(msg *colmq.PackageGetLoadedStgNodes
|
||||||
logger.Warnf("new storage client, err: %s", err.Error())
|
logger.Warnf("new storage client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new storage client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new storage client failed")
|
||||||
}
|
}
|
||||||
defer stgCli.Close()
|
defer schglb.CloudreamStoragePool.Release(stgCli)
|
||||||
|
|
||||||
resp, err := stgCli.PackageGetLoadedNodes(stgsdk.PackageGetLoadedNodesReq{
|
resp, err := stgCli.PackageGetLoadedNodes(stgsdk.PackageGetLoadedNodesReq{
|
||||||
PackageID: msg.PackageID,
|
PackageID: msg.PackageID,
|
||||||
|
|
|
@ -11,13 +11,13 @@ import (
|
||||||
mgrmq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq/manager"
|
mgrmq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq/manager"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ExecutorMQPool *exemq.Pool
|
var ExecutorMQPool exemq.Pool
|
||||||
|
|
||||||
var AdvisorMQPool *advmq.Pool
|
var AdvisorMQPool advmq.Pool
|
||||||
|
|
||||||
var CollectorMQPool *cltmq.Pool
|
var CollectorMQPool cltmq.Pool
|
||||||
|
|
||||||
var ManagerMQPool *mgrmq.Pool
|
var ManagerMQPool mgrmq.Pool
|
||||||
|
|
||||||
func InitMQPool(cfg *scmq.Config) {
|
func InitMQPool(cfg *scmq.Config) {
|
||||||
ExecutorMQPool = exemq.NewPool(cfg)
|
ExecutorMQPool = exemq.NewPool(cfg)
|
||||||
|
@ -26,19 +26,19 @@ func InitMQPool(cfg *scmq.Config) {
|
||||||
ManagerMQPool = mgrmq.NewPool(cfg)
|
ManagerMQPool = mgrmq.NewPool(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
var CloudreamStoragePool *stgsdk.Pool
|
var CloudreamStoragePool stgsdk.Pool
|
||||||
|
|
||||||
func InitCloudreamStoragePool(cfg *stgsdk.Config) {
|
func InitCloudreamStoragePool(cfg *stgsdk.Config) {
|
||||||
CloudreamStoragePool = stgsdk.NewPool(cfg)
|
CloudreamStoragePool = stgsdk.NewPool(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
var UnifyOpsPool *uopsdk.Pool
|
var UnifyOpsPool uopsdk.Pool
|
||||||
|
|
||||||
func IniUnifyOpsPool(cfg *uopsdk.Config) {
|
func IniUnifyOpsPool(cfg *uopsdk.Config) {
|
||||||
UnifyOpsPool = uopsdk.NewPool(cfg)
|
UnifyOpsPool = uopsdk.NewPool(cfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
var PCMPool *pcmsdk.Pool
|
var PCMPool pcmsdk.Pool
|
||||||
|
|
||||||
func InitPCMPool(cfg *pcmsdk.Config) {
|
func InitPCMPool(cfg *pcmsdk.Config) {
|
||||||
PCMPool = pcmsdk.NewPool(cfg)
|
PCMPool = pcmsdk.NewPool(cfg)
|
||||||
|
|
|
@ -2,15 +2,15 @@ package advisor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
||||||
mymq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
schmq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
rabbitCli *mq.RabbitMQClient
|
rabbitCli *mq.RabbitMQTransport
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClient(cfg *mymq.Config) (*Client, error) {
|
func NewClient(cfg *schmq.Config) (*Client, error) {
|
||||||
rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), ServerQueueName, "")
|
rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), ServerQueueName, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -24,36 +24,24 @@ func (c *Client) Close() {
|
||||||
c.rabbitCli.Close()
|
c.rabbitCli.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type PoolClient struct {
|
type Pool interface {
|
||||||
*Client
|
Acquire() (*Client, error)
|
||||||
owner *Pool
|
Release(cli *Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PoolClient) Close() {
|
type pool struct {
|
||||||
c.owner.Release(c)
|
mqcfg *schmq.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
type Pool struct {
|
func NewPool(mqcfg *schmq.Config) Pool {
|
||||||
mqcfg *mymq.Config
|
return &pool{
|
||||||
}
|
|
||||||
|
|
||||||
func NewPool(mqcfg *mymq.Config) *Pool {
|
|
||||||
return &Pool{
|
|
||||||
mqcfg: mqcfg,
|
mqcfg: mqcfg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (p *Pool) Acquire() (*PoolClient, error) {
|
func (p *pool) Acquire() (*Client, error) {
|
||||||
cli, err := NewClient(p.mqcfg)
|
return NewClient(p.mqcfg)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &PoolClient{
|
|
||||||
Client: cli,
|
|
||||||
owner: p,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pool) Release(cli *PoolClient) {
|
func (p *pool) Release(cli *Client) {
|
||||||
cli.Client.Close()
|
cli.Close()
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,15 +2,15 @@ package collector
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
||||||
mymq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
schmq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
rabbitCli *mq.RabbitMQClient
|
rabbitCli *mq.RabbitMQTransport
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClient(cfg *mymq.Config) (*Client, error) {
|
func NewClient(cfg *schmq.Config) (*Client, error) {
|
||||||
rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), ServerQueueName, "")
|
rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), ServerQueueName, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -24,36 +24,24 @@ func (c *Client) Close() {
|
||||||
c.rabbitCli.Close()
|
c.rabbitCli.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type PoolClient struct {
|
type Pool interface {
|
||||||
*Client
|
Acquire() (*Client, error)
|
||||||
owner *Pool
|
Release(cli *Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PoolClient) Close() {
|
type pool struct {
|
||||||
c.owner.Release(c)
|
mqcfg *schmq.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
type Pool struct {
|
func NewPool(mqcfg *schmq.Config) Pool {
|
||||||
mqcfg *mymq.Config
|
return &pool{
|
||||||
}
|
|
||||||
|
|
||||||
func NewPool(mqcfg *mymq.Config) *Pool {
|
|
||||||
return &Pool{
|
|
||||||
mqcfg: mqcfg,
|
mqcfg: mqcfg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (p *Pool) Acquire() (*PoolClient, error) {
|
func (p *pool) Acquire() (*Client, error) {
|
||||||
cli, err := NewClient(p.mqcfg)
|
return NewClient(p.mqcfg)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &PoolClient{
|
|
||||||
Client: cli,
|
|
||||||
owner: p,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pool) Release(cli *PoolClient) {
|
func (p *pool) Release(cli *Client) {
|
||||||
cli.Client.Close()
|
cli.Close()
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,15 +2,15 @@ package executor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
||||||
mymq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
schmq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
rabbitCli *mq.RabbitMQClient
|
rabbitCli *mq.RabbitMQTransport
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClient(cfg *mymq.Config) (*Client, error) {
|
func NewClient(cfg *schmq.Config) (*Client, error) {
|
||||||
rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), ServerQueueName, "")
|
rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), ServerQueueName, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -24,36 +24,24 @@ func (c *Client) Close() {
|
||||||
c.rabbitCli.Close()
|
c.rabbitCli.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type PoolClient struct {
|
type Pool interface {
|
||||||
*Client
|
Acquire() (*Client, error)
|
||||||
owner *Pool
|
Release(cli *Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PoolClient) Close() {
|
type pool struct {
|
||||||
c.owner.Release(c)
|
mqcfg *schmq.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
type Pool struct {
|
func NewPool(mqcfg *schmq.Config) Pool {
|
||||||
mqcfg *mymq.Config
|
return &pool{
|
||||||
}
|
|
||||||
|
|
||||||
func NewPool(mqcfg *mymq.Config) *Pool {
|
|
||||||
return &Pool{
|
|
||||||
mqcfg: mqcfg,
|
mqcfg: mqcfg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (p *Pool) Acquire() (*PoolClient, error) {
|
func (p *pool) Acquire() (*Client, error) {
|
||||||
cli, err := NewClient(p.mqcfg)
|
return NewClient(p.mqcfg)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &PoolClient{
|
|
||||||
Client: cli,
|
|
||||||
owner: p,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pool) Release(cli *PoolClient) {
|
func (p *pool) Release(cli *Client) {
|
||||||
cli.Client.Close()
|
cli.Close()
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,5 +44,5 @@ func NewAdvisorTaskStatus(taskID string, status exectsk.TaskStatus) AdvisorTaskS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (c *Client) ReportAdvisorTaskStatus(msg *ReportAdvisorTaskStatus, opts ...mq.RequestOption) (*ReportAdvisorTaskStatusResp, error) {
|
func (c *Client) ReportAdvisorTaskStatus(msg *ReportAdvisorTaskStatus, opts ...mq.RequestOption) (*ReportAdvisorTaskStatusResp, error) {
|
||||||
return mq.Request(Service.ReportAdvisorTaskStatus, c.rabbitCli, msg, opts...)
|
return mq.Request(Service.ReportAdvisorTaskStatus, c.roundTripper, msg, opts...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,58 +2,52 @@ package manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
"gitlink.org.cn/cloudream/common/pkgs/mq"
|
||||||
mymq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
schmq "gitlink.org.cn/cloudream/scheduler/common/pkgs/mq"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Client struct {
|
type Client struct {
|
||||||
rabbitCli *mq.RabbitMQClient
|
roundTripper mq.RoundTripper
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClient(cfg *mymq.Config) (*Client, error) {
|
func NewClient(cfg *schmq.Config) (*Client, error) {
|
||||||
rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), ServerQueueName, "")
|
rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), ServerQueueName, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
rabbitCli: rabbitCli,
|
roundTripper: rabbitCli,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClientWithRoundTripper(rd mq.RoundTripper) (*Client, error) {
|
||||||
|
return &Client{
|
||||||
|
roundTripper: rd,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) Close() {
|
func (c *Client) Close() {
|
||||||
c.rabbitCli.Close()
|
c.roundTripper.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type PoolClient struct {
|
type Pool interface {
|
||||||
*Client
|
Acquire() (*Client, error)
|
||||||
owner *Pool
|
Release(cli *Client)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PoolClient) Close() {
|
type pool struct {
|
||||||
c.owner.Release(c)
|
mqcfg *schmq.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
type Pool struct {
|
func NewPool(mqcfg *schmq.Config) Pool {
|
||||||
mqcfg *mymq.Config
|
return &pool{
|
||||||
}
|
|
||||||
|
|
||||||
func NewPool(mqcfg *mymq.Config) *Pool {
|
|
||||||
return &Pool{
|
|
||||||
mqcfg: mqcfg,
|
mqcfg: mqcfg,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (p *Pool) Acquire() (*PoolClient, error) {
|
func (p *pool) Acquire() (*Client, error) {
|
||||||
cli, err := NewClient(p.mqcfg)
|
return NewClient(p.mqcfg)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &PoolClient{
|
|
||||||
Client: cli,
|
|
||||||
owner: p,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pool) Release(cli *PoolClient) {
|
func (p *pool) Release(cli *Client) {
|
||||||
cli.Client.Close()
|
cli.Close()
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,7 +43,7 @@ func NewExecutorTaskStatus(taskID string, status exectsk.TaskStatus) ExecutorTas
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (c *Client) ReportExecutorTaskStatus(msg *ReportExecutorTaskStatus, opts ...mq.RequestOption) (*ReportExecutorTaskStatusResp, error) {
|
func (c *Client) ReportExecutorTaskStatus(msg *ReportExecutorTaskStatus, opts ...mq.RequestOption) (*ReportExecutorTaskStatusResp, error) {
|
||||||
return mq.Request(Service.ReportExecutorTaskStatus, c.rabbitCli, msg, opts...)
|
return mq.Request(Service.ReportExecutorTaskStatus, c.roundTripper, msg, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
|
@ -69,5 +69,5 @@ func NewGetImageInfoResp(imageID schsdk.ImageID, packageID int64, importingInfo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (c *Client) GetImageInfo(msg *GetImageInfo, opts ...mq.RequestOption) (*GetImageInfoResp, error) {
|
func (c *Client) GetImageInfo(msg *GetImageInfo, opts ...mq.RequestOption) (*GetImageInfoResp, error) {
|
||||||
return mq.Request(Service.GetImageInfo, c.rabbitCli, msg, opts...)
|
return mq.Request(Service.GetImageInfo, c.roundTripper, msg, opts...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func NewSubmitJobSetResp(jobSetID schsdk.JobSetID) *SubmitJobSetResp {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (c *Client) SubmitJobSet(msg *SubmitJobSet, opts ...mq.RequestOption) (*SubmitJobSetResp, error) {
|
func (c *Client) SubmitJobSet(msg *SubmitJobSet, opts ...mq.RequestOption) (*SubmitJobSetResp, error) {
|
||||||
return mq.Request(Service.SubmitJobSet, c.rabbitCli, msg, opts...)
|
return mq.Request(Service.SubmitJobSet, c.roundTripper, msg, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobSet中需要使用的一个文件上传完成
|
// JobSet中需要使用的一个文件上传完成
|
||||||
|
@ -70,7 +70,7 @@ func NewJobSetLocalFileUploadedResp() *JobSetLocalFileUploadedResp {
|
||||||
return &JobSetLocalFileUploadedResp{}
|
return &JobSetLocalFileUploadedResp{}
|
||||||
}
|
}
|
||||||
func (c *Client) JobSetLocalFileUploaded(msg *JobSetLocalFileUploaded, opts ...mq.RequestOption) (*JobSetLocalFileUploadedResp, error) {
|
func (c *Client) JobSetLocalFileUploaded(msg *JobSetLocalFileUploaded, opts ...mq.RequestOption) (*JobSetLocalFileUploadedResp, error) {
|
||||||
return mq.Request(Service.JobSetLocalFileUploaded, c.rabbitCli, msg, opts...)
|
return mq.Request(Service.JobSetLocalFileUploaded, c.roundTripper, msg, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取任务数据
|
// 获取任务数据
|
||||||
|
@ -94,7 +94,7 @@ func NewGetJobResp(job jobmod.Job) *GetJobResp {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func (c *Client) GetJob(msg *GetJob, opts ...mq.RequestOption) (*GetJobResp, error) {
|
func (c *Client) GetJob(msg *GetJob, opts ...mq.RequestOption) (*GetJobResp, error) {
|
||||||
return mq.Request(Service.GetJob, c.rabbitCli, msg, opts...)
|
return mq.Request(Service.GetJob, c.roundTripper, msg, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -48,7 +48,7 @@ func (r *Reporter) Serve() error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new manager client: %w", err)
|
return fmt.Errorf("new manager client: %w", err)
|
||||||
}
|
}
|
||||||
defer magCli.Close()
|
defer schglb.ManagerMQPool.Release(magCli)
|
||||||
|
|
||||||
ticker := time.NewTicker(r.reportInterval)
|
ticker := time.NewTicker(r.reportInterval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
|
@ -15,7 +15,7 @@ func (svc *Service) DeleteImage(msg *execmq.DeleteImage) (*execmq.DeleteImageRes
|
||||||
logger.Warnf("new pcm client, err: %s", err.Error())
|
logger.Warnf("new pcm client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new pcm client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new pcm client failed")
|
||||||
}
|
}
|
||||||
defer pcmCli.Close()
|
defer schglb.PCMPool.Release(pcmCli)
|
||||||
|
|
||||||
resp, err := pcmCli.DeleteImage(pcmsdk.DeleteImageReq{
|
resp, err := pcmCli.DeleteImage(pcmsdk.DeleteImageReq{
|
||||||
SlwNodeID: msg.SlwNodeID,
|
SlwNodeID: msg.SlwNodeID,
|
||||||
|
@ -34,7 +34,7 @@ func (svc *Service) DeleteTask(msg *execmq.DeleteTask) (*execmq.DeleteTaskResp,
|
||||||
logger.Warnf("new pcm client, err: %s", err.Error())
|
logger.Warnf("new pcm client, err: %s", err.Error())
|
||||||
return nil, mq.Failed(errorcode.OperationFailed, "new pcm client failed")
|
return nil, mq.Failed(errorcode.OperationFailed, "new pcm client failed")
|
||||||
}
|
}
|
||||||
defer pcmCli.Close()
|
defer schglb.PCMPool.Release(pcmCli)
|
||||||
|
|
||||||
resp, err := pcmCli.DeleteTask(pcmsdk.DeleteTaskReq{
|
resp, err := pcmCli.DeleteTask(pcmsdk.DeleteTaskReq{
|
||||||
SlwNodeID: msg.SlwNodeID,
|
SlwNodeID: msg.SlwNodeID,
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (t *CacheMovePackage) do(ctx TaskContext) ([]stgsdk.ObjectCacheInfo, error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("new cloudream storage client: %w", err)
|
return nil, fmt.Errorf("new cloudream storage client: %w", err)
|
||||||
}
|
}
|
||||||
defer stgCli.Close()
|
defer schglb.CloudreamStoragePool.Release(stgCli)
|
||||||
|
|
||||||
resp, err := stgCli.CacheMovePackage(stgsdk.CacheMovePackageReq{
|
resp, err := stgCli.CacheMovePackage(stgsdk.CacheMovePackageReq{
|
||||||
UserID: t.UserID,
|
UserID: t.UserID,
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (t *PCMScheduleTask) do(taskID string, ctx TaskContext) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new pcm client: %w", err)
|
return fmt.Errorf("new pcm client: %w", err)
|
||||||
}
|
}
|
||||||
defer pcmCli.Close()
|
defer schglb.PCMPool.Release(pcmCli)
|
||||||
|
|
||||||
resp, err := pcmCli.ScheduleTask(pcmsdk.ScheduleTaskReq{
|
resp, err := pcmCli.ScheduleTask(pcmsdk.ScheduleTaskReq{
|
||||||
SlwNodeID: t.SlwNodeID,
|
SlwNodeID: t.SlwNodeID,
|
||||||
|
|
|
@ -43,7 +43,7 @@ func (t *PCMUploadImage) do(taskID string, ctx TaskContext) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new pcm client: %w", err)
|
return fmt.Errorf("new pcm client: %w", err)
|
||||||
}
|
}
|
||||||
defer pcmCli.Close()
|
defer schglb.PCMPool.Release(pcmCli)
|
||||||
|
|
||||||
resp, err := pcmCli.UploadImage(pcmsdk.UploadImageReq{
|
resp, err := pcmCli.UploadImage(pcmsdk.UploadImageReq{
|
||||||
SlwNodeID: t.SlwNodeID,
|
SlwNodeID: t.SlwNodeID,
|
||||||
|
|
|
@ -43,7 +43,7 @@ func (t *StorageCreatePackage) do(taskID string, ctx TaskContext) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new cloudream storage client: %w", err)
|
return fmt.Errorf("new cloudream storage client: %w", err)
|
||||||
}
|
}
|
||||||
defer stgCli.Close()
|
defer schglb.CloudreamStoragePool.Release(stgCli)
|
||||||
|
|
||||||
resp, err := stgCli.StorageCreatePackage(stgsdk.StorageCreatePackageReq{
|
resp, err := stgCli.StorageCreatePackage(stgsdk.StorageCreatePackageReq{
|
||||||
UserID: t.UserID,
|
UserID: t.UserID,
|
||||||
|
|
|
@ -45,7 +45,7 @@ func (t *StorageLoadPackage) do(ctx TaskContext) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("new cloudream storage client: %w", err)
|
return fmt.Errorf("new cloudream storage client: %w", err)
|
||||||
}
|
}
|
||||||
defer stgCli.Close()
|
defer schglb.CloudreamStoragePool.Release(stgCli)
|
||||||
|
|
||||||
return stgCli.StorageLoadPackage(stgsdk.StorageLoadPackageReq{
|
return stgCli.StorageLoadPackage(stgsdk.StorageLoadPackageReq{
|
||||||
UserID: t.UserID,
|
UserID: t.UserID,
|
||||||
|
|
|
@ -31,7 +31,7 @@ type OnTimeoutCallbackFn func(jobID schsdk.JobID, fullTaskID string)
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
advisors map[schmod.AdvisorID]*AdvisorInfo
|
advisors map[schmod.AdvisorID]*AdvisorInfo
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
advCli *advmq.PoolClient
|
advCli *advmq.Client
|
||||||
|
|
||||||
onTaskUpdated OnTaskUpdatedCallbackFn
|
onTaskUpdated OnTaskUpdatedCallbackFn
|
||||||
onTaskTimeout OnTimeoutCallbackFn
|
onTaskTimeout OnTimeoutCallbackFn
|
||||||
|
|
|
@ -32,7 +32,7 @@ type OnTimeoutCallbackFn func(jobID schsdk.JobID, fullTaskID string)
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
executors map[schmod.ExecutorID]*ExecutorInfo
|
executors map[schmod.ExecutorID]*ExecutorInfo
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
exeCli *exemq.PoolClient
|
exeCli *exemq.Client
|
||||||
|
|
||||||
onTaskUpdated OnTaskUpdatedCallbackFn
|
onTaskUpdated OnTaskUpdatedCallbackFn
|
||||||
onTaskTimeout OnTimeoutCallbackFn
|
onTaskTimeout OnTimeoutCallbackFn
|
||||||
|
|
|
@ -57,7 +57,7 @@ func (h *AdjustingHandler) Handle(job jobmod.Job) {
|
||||||
h.changeJobState(job, jobmod.NewStateFailed(fmt.Sprintf("new collector client: %s", err), job.GetState()))
|
h.changeJobState(job, jobmod.NewStateFailed(fmt.Sprintf("new collector client: %s", err), job.GetState()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
getNodeResp, err := colCli.GetSlwNodeInfo(colmq.NewGetSlwNodeInfo(adjustingState.Scheme.TargetSlwNodeID))
|
getNodeResp, err := colCli.GetSlwNodeInfo(colmq.NewGetSlwNodeInfo(adjustingState.Scheme.TargetSlwNodeID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -147,7 +147,7 @@ func (h *ExecutingHandler) onResourceJobEvent(evt event.Event, job *executingJob
|
||||||
h.changeJobState(job.job, jobmod.NewStateFailed(fmt.Sprintf("new collector client: %s", err.Error()), job.state))
|
h.changeJobState(job.job, jobmod.NewStateFailed(fmt.Sprintf("new collector client: %s", err.Error()), job.state))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
getNodeResp, err := colCli.GetSlwNodeInfo(colmq.NewGetSlwNodeInfo(tarNorJob.TargetSlwNodeID))
|
getNodeResp, err := colCli.GetSlwNodeInfo(colmq.NewGetSlwNodeInfo(tarNorJob.TargetSlwNodeID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -59,7 +59,7 @@ func (h *PreSchedulingHandler) Handle(job jobmod.Job) {
|
||||||
h.changeJobState(job, jobmod.NewStateFailed(fmt.Sprintf("new collector client: %s", err), job.GetState()))
|
h.changeJobState(job, jobmod.NewStateFailed(fmt.Sprintf("new collector client: %s", err), job.GetState()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer colCli.Close()
|
defer schglb.CollectorMQPool.Release(colCli)
|
||||||
|
|
||||||
getNodeResp, err := colCli.GetSlwNodeInfo(colmq.NewGetSlwNodeInfo(preSchState.Scheme.TargetSlwNodeID))
|
getNodeResp, err := colCli.GetSlwNodeInfo(colmq.NewGetSlwNodeInfo(preSchState.Scheme.TargetSlwNodeID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
Loading…
Reference in New Issue