coor的types移动到common
This commit is contained in:
parent
e9200bc78c
commit
dd23f68926
|
@ -7,7 +7,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/async"
|
||||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type AccessStatEventChan = async.UnboundChannel[AccessStatEvent]
|
||||
|
@ -30,9 +30,9 @@ type AccessStat struct {
|
|||
}
|
||||
|
||||
type entryKey struct {
|
||||
objID jcsypes.ObjectID
|
||||
pkgID jcsypes.PackageID
|
||||
spaceID jcsypes.UserSpaceID
|
||||
objID jcstypes.ObjectID
|
||||
pkgID jcstypes.PackageID
|
||||
spaceID jcstypes.UserSpaceID
|
||||
}
|
||||
|
||||
func NewAccessStat(cfg Config, db *db.DB) *AccessStat {
|
||||
|
@ -44,7 +44,7 @@ func NewAccessStat(cfg Config, db *db.DB) *AccessStat {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *AccessStat) AddAccessCounter(objID jcsypes.ObjectID, pkgID jcsypes.PackageID, spaceID jcsypes.UserSpaceID, value float64) {
|
||||
func (p *AccessStat) AddAccessCounter(objID jcstypes.ObjectID, pkgID jcstypes.PackageID, spaceID jcstypes.UserSpaceID, value float64) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/accesstoken"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type KeeperEvent interface {
|
||||
|
@ -30,7 +30,7 @@ type ExitEvent struct {
|
|||
type Keeper struct {
|
||||
cfg Config
|
||||
enabled bool
|
||||
token cortypes.UserAccessToken
|
||||
token jcstypes.UserAccessToken
|
||||
priKey ed25519.PrivateKey
|
||||
lock sync.RWMutex
|
||||
done chan any
|
||||
|
@ -144,7 +144,7 @@ func (k *Keeper) Stop() {
|
|||
}
|
||||
}
|
||||
|
||||
func (k *Keeper) GetToken() cortypes.UserAccessToken {
|
||||
func (k *Keeper) GetToken() jcstypes.UserAccessToken {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
"github.com/spf13/cobra"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/config"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
@ -38,15 +38,15 @@ func migrate(configPath string) {
|
|||
}
|
||||
db = db.Set("gorm:table_options", "CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci")
|
||||
|
||||
migrateOne(db, jcsypes.Bucket{})
|
||||
migrateOne(db, jcsypes.ObjectAccessStat{})
|
||||
migrateOne(db, jcsypes.ObjectBlock{})
|
||||
migrateOne(db, jcsypes.Object{})
|
||||
migrateOne(db, jcsypes.PackageAccessStat{})
|
||||
migrateOne(db, jcsypes.Package{})
|
||||
migrateOne(db, jcsypes.PinnedObject{})
|
||||
migrateOne(db, jcsypes.UserSpace{})
|
||||
migrateOne(db, jcsypes.SpaceSyncTask{})
|
||||
migrateOne(db, jcstypes.Bucket{})
|
||||
migrateOne(db, jcstypes.ObjectAccessStat{})
|
||||
migrateOne(db, jcstypes.ObjectBlock{})
|
||||
migrateOne(db, jcstypes.Object{})
|
||||
migrateOne(db, jcstypes.PackageAccessStat{})
|
||||
migrateOne(db, jcstypes.Package{})
|
||||
migrateOne(db, jcstypes.PinnedObject{})
|
||||
migrateOne(db, jcstypes.UserSpace{})
|
||||
migrateOne(db, jcstypes.SpaceSyncTask{})
|
||||
|
||||
fmt.Println("migrate success")
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types/datamap"
|
||||
)
|
||||
|
||||
|
@ -61,7 +61,7 @@ func doTest(svc *services.Service) {
|
|||
|
||||
ft = ioswitch2.NewFromTo()
|
||||
ft.AddFrom(ioswitch2.NewFromShardstore("Full1AE5436AF72D8EF93923486E0E167315CEF0C91898064DADFAC22216FFBC5E3D", *space1, ioswitch2.RawStream()))
|
||||
ft.AddTo(ioswitch2.NewToBaseStore(*space2, jcsypes.PathFromComps("test3.txt")))
|
||||
ft.AddTo(ioswitch2.NewToBaseStore(*space2, jcstypes.PathFromComps("test3.txt")))
|
||||
plans := exec.NewPlanBuilder()
|
||||
parser.Parse(ft, plans)
|
||||
fmt.Println(plans)
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/speedstats"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/connectivity"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -21,18 +21,18 @@ const (
|
|||
type DownloadIterator = iterator.Iterator[*Downloading]
|
||||
|
||||
type DownloadReqeust struct {
|
||||
ObjectID jcsypes.ObjectID
|
||||
ObjectID jcstypes.ObjectID
|
||||
Offset int64
|
||||
Length int64
|
||||
}
|
||||
|
||||
type downloadReqeust2 struct {
|
||||
Detail *jcsypes.ObjectDetail
|
||||
Detail *jcstypes.ObjectDetail
|
||||
Raw DownloadReqeust
|
||||
}
|
||||
|
||||
type Downloading struct {
|
||||
Object *jcsypes.Object
|
||||
Object *jcstypes.Object
|
||||
File io.ReadCloser // 文件流,如果文件不存在,那么为nil
|
||||
Request DownloadReqeust
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func NewDownloader(cfg Config, conn *connectivity.Collector, stgPool *pool.Pool,
|
|||
}
|
||||
|
||||
func (d *Downloader) DownloadObjects(reqs []DownloadReqeust) DownloadIterator {
|
||||
objIDs := make([]jcsypes.ObjectID, len(reqs))
|
||||
objIDs := make([]jcstypes.ObjectID, len(reqs))
|
||||
for i, req := range reqs {
|
||||
objIDs[i] = req.ObjectID
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func (d *Downloader) DownloadObjects(reqs []DownloadReqeust) DownloadIterator {
|
|||
return iterator.FuseError[*Downloading](fmt.Errorf("request to db: %w", err))
|
||||
}
|
||||
|
||||
detailsMap := make(map[jcsypes.ObjectID]*jcsypes.ObjectDetail)
|
||||
detailsMap := make(map[jcstypes.ObjectID]*jcstypes.ObjectDetail)
|
||||
for _, detail := range objDetails {
|
||||
d := detail
|
||||
detailsMap[detail.Object.ObjectID] = &d
|
||||
|
@ -96,7 +96,7 @@ func (d *Downloader) DownloadObjects(reqs []DownloadReqeust) DownloadIterator {
|
|||
return NewDownloadObjectIterator(d, req2s)
|
||||
}
|
||||
|
||||
func (d *Downloader) DownloadObjectByDetail(detail jcsypes.ObjectDetail, off int64, length int64) (*Downloading, error) {
|
||||
func (d *Downloader) DownloadObjectByDetail(detail jcstypes.ObjectDetail, off int64, length int64) (*Downloading, error) {
|
||||
req2s := []downloadReqeust2{{
|
||||
Detail: &detail,
|
||||
Raw: DownloadReqeust{
|
||||
|
@ -110,56 +110,56 @@ func (d *Downloader) DownloadObjectByDetail(detail jcsypes.ObjectDetail, off int
|
|||
return iter.MoveNext()
|
||||
}
|
||||
|
||||
func (d *Downloader) DownloadPackage(pkgID jcsypes.PackageID, prefix string) (jcsypes.Package, DownloadIterator, error) {
|
||||
pkg, details, err := db.DoTx02(d.db, func(tx db.SQLContext) (jcsypes.Package, []jcsypes.ObjectDetail, error) {
|
||||
func (d *Downloader) DownloadPackage(pkgID jcstypes.PackageID, prefix string) (jcstypes.Package, DownloadIterator, error) {
|
||||
pkg, details, err := db.DoTx02(d.db, func(tx db.SQLContext) (jcstypes.Package, []jcstypes.ObjectDetail, error) {
|
||||
pkg, err := d.db.Package().GetByID(tx, pkgID)
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, nil, err
|
||||
return jcstypes.Package{}, nil, err
|
||||
}
|
||||
|
||||
var details []jcsypes.ObjectDetail
|
||||
var details []jcstypes.ObjectDetail
|
||||
if prefix != "" {
|
||||
objs, err := d.db.Object().GetWithPathPrefix(tx, pkgID, prefix)
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, nil, err
|
||||
return jcstypes.Package{}, nil, err
|
||||
}
|
||||
|
||||
objIDs := make([]jcsypes.ObjectID, len(objs))
|
||||
objIDs := make([]jcstypes.ObjectID, len(objs))
|
||||
for i, obj := range objs {
|
||||
objIDs[i] = obj.ObjectID
|
||||
}
|
||||
|
||||
allBlocks, err := d.db.ObjectBlock().BatchGetByObjectID(tx, objIDs)
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, nil, err
|
||||
return jcstypes.Package{}, nil, err
|
||||
}
|
||||
|
||||
allPinnedObjs, err := d.db.PinnedObject().BatchGetByObjectID(tx, objIDs)
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, nil, err
|
||||
return jcstypes.Package{}, nil, err
|
||||
|
||||
}
|
||||
details = make([]jcsypes.ObjectDetail, 0, len(objs))
|
||||
details = make([]jcstypes.ObjectDetail, 0, len(objs))
|
||||
for _, obj := range objs {
|
||||
detail := jcsypes.ObjectDetail{
|
||||
detail := jcstypes.ObjectDetail{
|
||||
Object: obj,
|
||||
}
|
||||
details = append(details, detail)
|
||||
}
|
||||
|
||||
jcsypes.DetailsFillObjectBlocks(details, allBlocks)
|
||||
jcsypes.DetailsFillPinnedAt(details, allPinnedObjs)
|
||||
jcstypes.DetailsFillObjectBlocks(details, allBlocks)
|
||||
jcstypes.DetailsFillPinnedAt(details, allPinnedObjs)
|
||||
} else {
|
||||
details, err = d.db.Object().GetPackageObjectDetails(tx, pkgID)
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, nil, err
|
||||
return jcstypes.Package{}, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return pkg, details, nil
|
||||
})
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, nil, err
|
||||
return jcstypes.Package{}, nil, err
|
||||
}
|
||||
|
||||
req2s := make([]downloadReqeust2, len(details))
|
||||
|
@ -180,11 +180,11 @@ func (d *Downloader) DownloadPackage(pkgID jcsypes.PackageID, prefix string) (jc
|
|||
|
||||
type ObjectECStrip struct {
|
||||
Data []byte
|
||||
ObjectFileHash jcsypes.FileHash // 添加这条缓存时,Object的FileHash
|
||||
ObjectFileHash jcstypes.FileHash // 添加这条缓存时,Object的FileHash
|
||||
}
|
||||
|
||||
type ECStripKey struct {
|
||||
ObjectID jcsypes.ObjectID
|
||||
ObjectID jcstypes.ObjectID
|
||||
StripIndex int64
|
||||
}
|
||||
|
||||
|
|
|
@ -12,14 +12,14 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc/parser"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type LRCStripIterator struct {
|
||||
downloader *Downloader
|
||||
object jcsypes.Object
|
||||
object jcstypes.Object
|
||||
blocks []downloadBlock
|
||||
red jcsypes.LRCRedundancy
|
||||
red jcstypes.LRCRedundancy
|
||||
curStripIndex int64
|
||||
cache *StripCache
|
||||
dataChan chan dataChanEntry
|
||||
|
@ -28,7 +28,7 @@ type LRCStripIterator struct {
|
|||
inited bool
|
||||
}
|
||||
|
||||
func NewLRCStripIterator(downloder *Downloader, object jcsypes.Object, blocks []downloadBlock, red jcsypes.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator {
|
||||
func NewLRCStripIterator(downloder *Downloader, object jcstypes.Object, blocks []downloadBlock, red jcstypes.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator {
|
||||
if maxPrefetch <= 0 {
|
||||
maxPrefetch = 1
|
||||
}
|
||||
|
|
|
@ -12,13 +12,13 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/consts"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Detail types.ObjectDetail
|
||||
Range math2.Range
|
||||
DestLocation cortypes.Location
|
||||
DestLocation jcstypes.Location
|
||||
}
|
||||
|
||||
type Strategy interface {
|
||||
|
@ -113,7 +113,7 @@ type downloadBlock struct {
|
|||
type request2 struct {
|
||||
Detail types.ObjectDetail
|
||||
Range math2.Range
|
||||
DestLocation cortypes.Location
|
||||
DestLocation jcstypes.Location
|
||||
}
|
||||
|
||||
func (s *Selector) selectForNoneOrRep(req request2) (Strategy, error) {
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types"
|
||||
cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/ecode"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type ObjectService struct {
|
||||
|
@ -113,9 +113,9 @@ func (s *ObjectService) Upload(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
copyToPath := make([]jcsypes.JPath, 0, len(info.CopyToPath))
|
||||
copyToPath := make([]jcstypes.JPath, 0, len(info.CopyToPath))
|
||||
for _, p := range info.CopyToPath {
|
||||
copyToPath = append(copyToPath, jcsypes.PathFromJcsPathString(p))
|
||||
copyToPath = append(copyToPath, jcstypes.PathFromJcsPathString(p))
|
||||
}
|
||||
|
||||
up, err := s.svc.Uploader.BeginUpdate(info.PackageID, info.Affinity, info.CopyTo, copyToPath)
|
||||
|
@ -147,7 +147,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) {
|
|||
}
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
err = up.Upload(jcsypes.PathFromJcsPathString(path), file)
|
||||
err = up.Upload(jcstypes.PathFromJcsPathString(path), file)
|
||||
if err != nil {
|
||||
log.Warnf("uploading file: %s", err.Error())
|
||||
ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("uploading file %v: %v", file.FileName(), err)))
|
||||
|
@ -163,7 +163,7 @@ func (s *ObjectService) Upload(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
uploadeds := make([]jcsypes.Object, len(pathes))
|
||||
uploadeds := make([]jcstypes.Object, len(pathes))
|
||||
for i := range pathes {
|
||||
uploadeds[i] = ret.Objects[pathes[i]]
|
||||
}
|
||||
|
@ -398,7 +398,7 @@ func (s *ObjectService) DeleteByPath(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
err = s.svc.ObjectSvc().Delete([]jcsypes.ObjectID{resp.Objects[0].ObjectID})
|
||||
err = s.svc.ObjectSvc().Delete([]jcstypes.ObjectID{resp.Objects[0].ObjectID})
|
||||
if err != nil {
|
||||
log.Warnf("deleting objects: %s", err.Error())
|
||||
ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, "delete objects failed"))
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/ecode"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/iterator"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
@ -151,9 +151,9 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
copyToPath := make([]jcsypes.JPath, 0, len(info.CopyToPath))
|
||||
copyToPath := make([]jcstypes.JPath, 0, len(info.CopyToPath))
|
||||
for _, p := range info.CopyToPath {
|
||||
copyToPath = append(copyToPath, jcsypes.PathFromJcsPathString(p))
|
||||
copyToPath = append(copyToPath, jcstypes.PathFromJcsPathString(p))
|
||||
}
|
||||
|
||||
up, err := s.svc.Uploader.BeginCreateUpload(info.BucketID, info.Name, info.CopyTo, copyToPath)
|
||||
|
@ -184,7 +184,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) {
|
|||
}
|
||||
path = filepath.ToSlash(path)
|
||||
|
||||
err = up.Upload(jcsypes.PathFromJcsPathString(path), file)
|
||||
err = up.Upload(jcstypes.PathFromJcsPathString(path), file)
|
||||
if err != nil {
|
||||
log.Warnf("uploading file: %s", err.Error())
|
||||
ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("uploading file %v: %v", file.FileName(), err)))
|
||||
|
@ -200,7 +200,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
objs := make([]jcsypes.Object, len(pathes))
|
||||
objs := make([]jcstypes.Object, len(pathes))
|
||||
for i := range pathes {
|
||||
objs[i] = ret.Objects[pathes[i]]
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func (s *PackageService) Download(ctx *gin.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *PackageService) downloadZip(ctx *gin.Context, req cliapi.PackageDownload, pkg jcsypes.Package, iter downloader.DownloadIterator) {
|
||||
func (s *PackageService) downloadZip(ctx *gin.Context, req cliapi.PackageDownload, pkg jcstypes.Package, iter downloader.DownloadIterator) {
|
||||
log := logger.WithField("HTTP", "Package.Download")
|
||||
|
||||
ctx.Header("Content-Disposition", "attachment; filename="+url.PathEscape(pkg.Name)+".zip")
|
||||
|
@ -276,7 +276,7 @@ func (s *PackageService) downloadZip(ctx *gin.Context, req cliapi.PackageDownloa
|
|||
}
|
||||
}
|
||||
|
||||
func (s *PackageService) downloadTar(ctx *gin.Context, req cliapi.PackageDownload, pkg jcsypes.Package, iter downloader.DownloadIterator) {
|
||||
func (s *PackageService) downloadTar(ctx *gin.Context, req cliapi.PackageDownload, pkg jcstypes.Package, iter downloader.DownloadIterator) {
|
||||
log := logger.WithField("HTTP", "Package.Download")
|
||||
|
||||
ctx.Header("Content-Disposition", "attachment; filename="+url.PathEscape(pkg.Name)+".tar")
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types"
|
||||
cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/ecode"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type PresignedService struct {
|
||||
|
@ -156,9 +156,9 @@ func (s *PresignedService) ObjectUpload(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
copyToPath := make([]jcsypes.JPath, 0, len(req.CopyToPath))
|
||||
copyToPath := make([]jcstypes.JPath, 0, len(req.CopyToPath))
|
||||
for _, p := range req.CopyToPath {
|
||||
copyToPath = append(copyToPath, jcsypes.PathFromJcsPathString(p))
|
||||
copyToPath = append(copyToPath, jcstypes.PathFromJcsPathString(p))
|
||||
}
|
||||
|
||||
up, err := s.svc.Uploader.BeginUpdate(req.PackageID, req.Affinity, req.CopyTo, copyToPath)
|
||||
|
@ -171,7 +171,7 @@ func (s *PresignedService) ObjectUpload(ctx *gin.Context) {
|
|||
|
||||
path := filepath.ToSlash(req.Path)
|
||||
|
||||
err = up.Upload(jcsypes.PathFromJcsPathString(path), ctx.Request.Body)
|
||||
err = up.Upload(jcstypes.PathFromJcsPathString(path), ctx.Request.Body)
|
||||
if err != nil {
|
||||
log.Warnf("uploading file: %s", err.Error())
|
||||
ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("uploading file %v: %v", req.Path, err)))
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types"
|
||||
cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/ecode"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type SpaceSyncerService struct {
|
||||
|
@ -41,21 +41,21 @@ func (s *SpaceSyncerService) CreateTask(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
dests := make([]jcsypes.SpaceSyncDest, 0, len(req.DestUserSpaceIDs))
|
||||
dests := make([]jcstypes.SpaceSyncDest, 0, len(req.DestUserSpaceIDs))
|
||||
for _, id := range req.DestUserSpaceIDs {
|
||||
dests = append(dests, jcsypes.SpaceSyncDest{
|
||||
DestUserSpaceID: jcsypes.UserSpaceID(id),
|
||||
DestPath: jcsypes.PathFromJcsPathString(req.DestPathes[0]),
|
||||
dests = append(dests, jcstypes.SpaceSyncDest{
|
||||
DestUserSpaceID: jcstypes.UserSpaceID(id),
|
||||
DestPath: jcstypes.PathFromJcsPathString(req.DestPathes[0]),
|
||||
})
|
||||
}
|
||||
|
||||
info, err := s.svc.SpaceSyncer.CreateTask(jcsypes.SpaceSyncTask{
|
||||
info, err := s.svc.SpaceSyncer.CreateTask(jcstypes.SpaceSyncTask{
|
||||
Trigger: req.Trigger,
|
||||
Mode: req.Mode,
|
||||
Filters: req.Filters,
|
||||
Options: req.Options,
|
||||
SrcUserSpaceID: req.SrcUserSpaceID,
|
||||
SrcPath: jcsypes.PathFromJcsPathString(req.SrcPath),
|
||||
SrcPath: jcstypes.PathFromJcsPathString(req.SrcPath),
|
||||
Dests: dests,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types"
|
||||
cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/ecode"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type UserSpaceService struct {
|
||||
|
@ -52,7 +52,7 @@ func (s *UserSpaceService) CreatePackage(ctx *gin.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
pkg, err := s.svc.Uploader.UserSpaceUpload(req.UserSpaceID, jcsypes.PathFromJcsPathString(req.Path), req.BucketID, req.Name, req.SpaceAffinity)
|
||||
pkg, err := s.svc.Uploader.UserSpaceUpload(req.UserSpaceID, jcstypes.PathFromJcsPathString(req.Path), req.BucketID, req.Name, req.SpaceAffinity)
|
||||
if err != nil {
|
||||
log.Warnf("userspace create package: %s", err.Error())
|
||||
ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, fmt.Sprintf("userspace create package: %v", err)))
|
||||
|
|
|
@ -8,12 +8,12 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
|
||||
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func (m *MetaCacheHost) AddConnectivity() *Connectivity {
|
||||
cache := &Connectivity{
|
||||
entries: make(map[cortypes.HubID]*ConnectivityEntry),
|
||||
entries: make(map[jcstypes.HubID]*ConnectivityEntry),
|
||||
}
|
||||
|
||||
m.caches = append(m.caches, cache)
|
||||
|
@ -22,10 +22,10 @@ func (m *MetaCacheHost) AddConnectivity() *Connectivity {
|
|||
|
||||
type Connectivity struct {
|
||||
lock sync.RWMutex
|
||||
entries map[cortypes.HubID]*ConnectivityEntry
|
||||
entries map[jcstypes.HubID]*ConnectivityEntry
|
||||
}
|
||||
|
||||
func (c *Connectivity) Get(from cortypes.HubID, to cortypes.HubID) *time.Duration {
|
||||
func (c *Connectivity) Get(from jcstypes.HubID, to jcstypes.HubID) *time.Duration {
|
||||
for i := 0; i < 2; i++ {
|
||||
c.lock.RLock()
|
||||
entry, ok := c.entries[from]
|
||||
|
@ -60,12 +60,12 @@ func (c *Connectivity) ClearOutdated() {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Connectivity) load(hubID cortypes.HubID) {
|
||||
func (c *Connectivity) load(hubID jcstypes.HubID) {
|
||||
coorCli := stgglb.CoordinatorRPCPool.Get()
|
||||
|
||||
defer coorCli.Release()
|
||||
|
||||
get, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{hubID}))
|
||||
get, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]jcstypes.HubID{hubID}))
|
||||
if cerr != nil {
|
||||
logger.Warnf("get hub connectivities: %v", cerr)
|
||||
return
|
||||
|
@ -76,7 +76,7 @@ func (c *Connectivity) load(hubID cortypes.HubID) {
|
|||
|
||||
ce := &ConnectivityEntry{
|
||||
From: hubID,
|
||||
To: make(map[cortypes.HubID]cortypes.HubConnectivity),
|
||||
To: make(map[jcstypes.HubID]jcstypes.HubConnectivity),
|
||||
UpdateTime: time.Now(),
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ func (c *Connectivity) load(hubID cortypes.HubID) {
|
|||
}
|
||||
|
||||
type ConnectivityEntry struct {
|
||||
From cortypes.HubID
|
||||
To map[cortypes.HubID]cortypes.HubConnectivity
|
||||
From jcstypes.HubID
|
||||
To map[jcstypes.HubID]jcstypes.HubConnectivity
|
||||
UpdateTime time.Time
|
||||
}
|
||||
|
|
|
@ -7,12 +7,12 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
|
||||
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func (m *MetaCacheHost) AddHubMeta() *HubMeta {
|
||||
meta := &HubMeta{}
|
||||
meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cortypes.HubID, cortypes.Hub]{
|
||||
meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[jcstypes.HubID, jcstypes.Hub]{
|
||||
Getter: meta.load,
|
||||
Expire: time.Minute * 5,
|
||||
})
|
||||
|
@ -22,10 +22,10 @@ func (m *MetaCacheHost) AddHubMeta() *HubMeta {
|
|||
}
|
||||
|
||||
type HubMeta struct {
|
||||
cache *SimpleMetaCache[cortypes.HubID, cortypes.Hub]
|
||||
cache *SimpleMetaCache[jcstypes.HubID, jcstypes.Hub]
|
||||
}
|
||||
|
||||
func (h *HubMeta) Get(hubID cortypes.HubID) *cortypes.Hub {
|
||||
func (h *HubMeta) Get(hubID jcstypes.HubID) *jcstypes.Hub {
|
||||
v, ok := h.cache.Get(hubID)
|
||||
if ok {
|
||||
return &v
|
||||
|
@ -33,9 +33,9 @@ func (h *HubMeta) Get(hubID cortypes.HubID) *cortypes.Hub {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (h *HubMeta) GetMany(hubIDs []cortypes.HubID) []*cortypes.Hub {
|
||||
func (h *HubMeta) GetMany(hubIDs []jcstypes.HubID) []*jcstypes.Hub {
|
||||
vs, oks := h.cache.GetMany(hubIDs)
|
||||
ret := make([]*cortypes.Hub, len(vs))
|
||||
ret := make([]*jcstypes.Hub, len(vs))
|
||||
for i := range vs {
|
||||
if oks[i] {
|
||||
ret[i] = &vs[i]
|
||||
|
@ -48,8 +48,8 @@ func (h *HubMeta) ClearOutdated() {
|
|||
h.cache.ClearOutdated()
|
||||
}
|
||||
|
||||
func (h *HubMeta) load(keys []cortypes.HubID) ([]cortypes.Hub, []bool) {
|
||||
vs := make([]cortypes.Hub, len(keys))
|
||||
func (h *HubMeta) load(keys []jcstypes.HubID) ([]jcstypes.Hub, []bool) {
|
||||
vs := make([]jcstypes.Hub, len(keys))
|
||||
oks := make([]bool, len(keys))
|
||||
|
||||
coorCli := stgglb.CoordinatorRPCPool.Get()
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
|
||||
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func (m *MetaCacheHost) AddStorageMeta() *UserSpaceMeta {
|
||||
|
@ -78,7 +78,7 @@ func (s *UserSpaceMeta) load(keys []types.UserSpaceID) ([]types.UserSpaceDetail,
|
|||
coorCli := stgglb.CoordinatorRPCPool.Get()
|
||||
defer coorCli.Release()
|
||||
|
||||
stgs := make([]cortypes.StorageType, len(spaces))
|
||||
stgs := make([]jcstypes.StorageType, len(spaces))
|
||||
for i := range spaces {
|
||||
stgs[i] = spaces[i].Storage
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
fuse2 "gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
|
@ -107,14 +107,14 @@ func (m *Mount) StartReclaimSpace() {
|
|||
m.vfs.ReclaimSpace()
|
||||
}
|
||||
|
||||
func (m *Mount) NotifyObjectInvalid(obj jcsypes.Object) {
|
||||
func (m *Mount) NotifyObjectInvalid(obj jcstypes.Object) {
|
||||
|
||||
}
|
||||
|
||||
func (m *Mount) NotifyPackageInvalid(pkg jcsypes.Package) {
|
||||
func (m *Mount) NotifyPackageInvalid(pkg jcstypes.Package) {
|
||||
|
||||
}
|
||||
|
||||
func (m *Mount) NotifyBucketInvalid(bkt jcsypes.Bucket) {
|
||||
func (m *Mount) NotifyBucketInvalid(bkt jcstypes.Bucket) {
|
||||
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/config"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type Mount struct {
|
||||
|
@ -43,14 +43,14 @@ func (m *Mount) StartReclaimSpace() {
|
|||
|
||||
}
|
||||
|
||||
func (m *Mount) NotifyObjectInvalid(obj jcsypes.Object) {
|
||||
func (m *Mount) NotifyObjectInvalid(obj jcstypes.Object) {
|
||||
|
||||
}
|
||||
|
||||
func (m *Mount) NotifyPackageInvalid(pkg jcsypes.Package) {
|
||||
func (m *Mount) NotifyPackageInvalid(pkg jcstypes.Package) {
|
||||
|
||||
}
|
||||
|
||||
func (m *Mount) NotifyBucketInvalid(bkt jcsypes.Bucket) {
|
||||
func (m *Mount) NotifyBucketInvalid(bkt jcstypes.Bucket) {
|
||||
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/config"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/uploader"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type CacheEntry interface {
|
||||
|
@ -196,7 +196,7 @@ func (c *Cache) CreateFile(pathComps []string) *CacheFile {
|
|||
// 尝试加载缓存文件,如果文件不存在,则使用obj的信息创建一个新缓存文件,而如果obj为nil,那么会返回nil。
|
||||
//
|
||||
// 记得使用Release减少引用计数
|
||||
func (c *Cache) LoadFile(pathComps []string, obj *jcsypes.Object) *CacheFile {
|
||||
func (c *Cache) LoadFile(pathComps []string, obj *jcstypes.Object) *CacheFile {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
|
@ -490,7 +490,7 @@ func (c *Cache) Move(pathComps []string, newPathComps []string) error {
|
|||
type syncPackage struct {
|
||||
bktName string
|
||||
pkgName string
|
||||
pkg jcsypes.Package
|
||||
pkg jcstypes.Package
|
||||
upObjs []*uploadingObject
|
||||
}
|
||||
|
||||
|
@ -917,7 +917,7 @@ func (c *Cache) doUpdatingOnly(pkgs []*syncPackage) {
|
|||
pathes := make([]string, 0, len(p.upObjs))
|
||||
modTimes := make([]time.Time, 0, len(p.upObjs))
|
||||
for _, obj := range p.upObjs {
|
||||
pathes = append(pathes, jcsypes.JoinObjectPath(obj.pathComps[2:]...))
|
||||
pathes = append(pathes, jcstypes.JoinObjectPath(obj.pathComps[2:]...))
|
||||
modTimes = append(modTimes, obj.modTime)
|
||||
}
|
||||
|
||||
|
@ -1008,7 +1008,7 @@ func (c *Cache) doUploading(pkgs []*syncPackage) {
|
|||
|
||||
counter := io2.Counter(&rd)
|
||||
|
||||
err = upder.Upload(jcsypes.PathFromComps(o.pathComps[2:]...), counter, uploader.UploadOption{
|
||||
err = upder.Upload(jcstypes.PathFromComps(o.pathComps[2:]...), counter, uploader.UploadOption{
|
||||
CreateTime: o.modTime,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1036,8 +1036,8 @@ func (c *Cache) doUploading(pkgs []*syncPackage) {
|
|||
continue
|
||||
}
|
||||
|
||||
oldPath := jcsypes.JoinObjectPath(o.pathComps[2:]...)
|
||||
newPath := jcsypes.JoinObjectPath(o.cache.pathComps[2:]...)
|
||||
oldPath := jcstypes.JoinObjectPath(o.pathComps[2:]...)
|
||||
newPath := jcstypes.JoinObjectPath(o.cache.pathComps[2:]...)
|
||||
|
||||
if o.isDeleted {
|
||||
upder.CancelObject(oldPath)
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/math2"
|
||||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type CacheLevel int
|
||||
|
@ -102,7 +102,7 @@ type CacheFile struct {
|
|||
cache *Cache
|
||||
pathComps []string
|
||||
info FileInfo
|
||||
remoteObj *jcsypes.Object
|
||||
remoteObj *jcstypes.Object
|
||||
rwLock *sync.RWMutex
|
||||
readers []*CacheFileHandle
|
||||
writers []*CacheFileHandle
|
||||
|
@ -262,7 +262,7 @@ func loadCacheFile(cache *Cache, pathComps []string) (*CacheFile, error) {
|
|||
return ch, nil
|
||||
}
|
||||
|
||||
func newCacheFileFromObject(cache *Cache, pathComps []string, obj *jcsypes.Object) (*CacheFile, error) {
|
||||
func newCacheFileFromObject(cache *Cache, pathComps []string, obj *jcstypes.Object) (*CacheFile, error) {
|
||||
metaPath := cache.GetCacheMetaPath(pathComps...)
|
||||
dataPath := cache.GetCacheDataPath(pathComps...)
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
@ -35,7 +35,7 @@ func child(vfs *Vfs, ctx context.Context, parent FuseNode, name string) (fuse.Fs
|
|||
return nil
|
||||
}
|
||||
|
||||
objPath := jcsypes.JoinObjectPath(childPathComps[2:]...)
|
||||
objPath := jcstypes.JoinObjectPath(childPathComps[2:]...)
|
||||
obj, err := d.Object().GetByPath(tx, pkg.PackageID, objPath)
|
||||
if err == nil {
|
||||
ret = newFileFromObject(vfs, childPathComps, obj)
|
||||
|
@ -45,7 +45,7 @@ func child(vfs *Vfs, ctx context.Context, parent FuseNode, name string) (fuse.Fs
|
|||
return err
|
||||
}
|
||||
|
||||
err = d.Object().HasObjectWithPrefix(tx, pkg.PackageID, objPath+jcsypes.ObjectPathSeparator)
|
||||
err = d.Object().HasObjectWithPrefix(tx, pkg.PackageID, objPath+jcstypes.ObjectPathSeparator)
|
||||
if err == nil {
|
||||
dir := vfs.cache.LoadDir(childPathComps, &cache.CreateDirOption{
|
||||
ModTime: time.Now(),
|
||||
|
@ -98,10 +98,10 @@ func listChildren(vfs *Vfs, ctx context.Context, parent FuseNode) ([]fuse.FsEntr
|
|||
return err
|
||||
}
|
||||
|
||||
objPath := jcsypes.JoinObjectPath(myPathComps[2:]...)
|
||||
objPath := jcstypes.JoinObjectPath(myPathComps[2:]...)
|
||||
objPrefix := objPath
|
||||
if objPath != "" {
|
||||
objPrefix += jcsypes.ObjectPathSeparator
|
||||
objPrefix += jcstypes.ObjectPathSeparator
|
||||
}
|
||||
|
||||
objs, coms, err := d.Object().GetByPrefixGrouped(tx, pkg.PackageID, objPrefix)
|
||||
|
@ -110,8 +110,8 @@ func listChildren(vfs *Vfs, ctx context.Context, parent FuseNode) ([]fuse.FsEntr
|
|||
}
|
||||
|
||||
for _, dir := range coms {
|
||||
dir = strings.TrimSuffix(dir, jcsypes.ObjectPathSeparator)
|
||||
pathComps := lo2.AppendNew(myPathComps, jcsypes.BaseName(dir))
|
||||
dir = strings.TrimSuffix(dir, jcstypes.ObjectPathSeparator)
|
||||
pathComps := lo2.AppendNew(myPathComps, jcstypes.BaseName(dir))
|
||||
|
||||
cd := vfs.cache.LoadDir(pathComps, &cache.CreateDirOption{
|
||||
ModTime: time.Now(),
|
||||
|
@ -124,7 +124,7 @@ func listChildren(vfs *Vfs, ctx context.Context, parent FuseNode) ([]fuse.FsEntr
|
|||
}
|
||||
|
||||
for _, obj := range objs {
|
||||
pathComps := lo2.AppendNew(myPathComps, jcsypes.BaseName(obj.Path))
|
||||
pathComps := lo2.AppendNew(myPathComps, jcstypes.BaseName(obj.Path))
|
||||
file := newFileFromObject(vfs, pathComps, obj)
|
||||
dbEntries[file.Name()] = file
|
||||
}
|
||||
|
@ -179,14 +179,14 @@ func newFile(vfs *Vfs, ctx context.Context, name string, parent FuseNode, flags
|
|||
|
||||
func removeChild(vfs *Vfs, ctx context.Context, name string, parent FuseNode) error {
|
||||
pathComps := lo2.AppendNew(parent.PathComps(), name)
|
||||
joinedPath := jcsypes.JoinObjectPath(pathComps[2:]...)
|
||||
joinedPath := jcstypes.JoinObjectPath(pathComps[2:]...)
|
||||
d := vfs.db
|
||||
|
||||
// TODO 生成系统事件
|
||||
return vfs.db.DoTx(func(tx db.SQLContext) error {
|
||||
pkg, err := d.Package().GetByFullName(tx, pathComps[0], pathComps[1])
|
||||
if err == nil {
|
||||
err := d.Object().HasObjectWithPrefix(tx, pkg.PackageID, joinedPath+jcsypes.ObjectPathSeparator)
|
||||
err := d.Object().HasObjectWithPrefix(tx, pkg.PackageID, joinedPath+jcstypes.ObjectPathSeparator)
|
||||
if err == nil {
|
||||
return fuse.ErrNotEmpty
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func removeChild(vfs *Vfs, ctx context.Context, name string, parent FuseNode) er
|
|||
func moveChild(vfs *Vfs, ctx context.Context, oldName string, oldParent FuseNode, newName string, newParent FuseNode) error {
|
||||
newParentPath := newParent.PathComps()
|
||||
newChildPath := lo2.AppendNew(newParentPath, newName)
|
||||
newChildPathJoined := jcsypes.JoinObjectPath(newChildPath[2:]...)
|
||||
newChildPathJoined := jcstypes.JoinObjectPath(newChildPath[2:]...)
|
||||
|
||||
// 不允许移动任何内容到Package层级以上
|
||||
if len(newParentPath) < 2 {
|
||||
|
@ -219,7 +219,7 @@ func moveChild(vfs *Vfs, ctx context.Context, oldName string, oldParent FuseNode
|
|||
}
|
||||
|
||||
oldChildPath := lo2.AppendNew(oldParent.PathComps(), oldName)
|
||||
oldChildPathJoined := jcsypes.JoinObjectPath(oldChildPath[2:]...)
|
||||
oldChildPathJoined := jcstypes.JoinObjectPath(oldChildPath[2:]...)
|
||||
|
||||
// 先更新远程,再更新本地,因为远程使用事务更新,可以回滚,而本地不行
|
||||
return vfs.db.DoTx(func(tx db.SQLContext) error {
|
||||
|
@ -259,7 +259,7 @@ func moveRemote(vfs *Vfs, tx db.SQLContext, oldChildPath []string, newParentPath
|
|||
return fuse.ErrExists
|
||||
}
|
||||
|
||||
err = d.Object().HasObjectWithPrefix(tx, newPkg.PackageID, newChildPathJoined+jcsypes.ObjectPathSeparator)
|
||||
err = d.Object().HasObjectWithPrefix(tx, newPkg.PackageID, newChildPathJoined+jcstypes.ObjectPathSeparator)
|
||||
if err == nil {
|
||||
return fuse.ErrExists
|
||||
}
|
||||
|
@ -283,17 +283,17 @@ func moveRemote(vfs *Vfs, tx db.SQLContext, oldChildPath []string, newParentPath
|
|||
oldObj.PackageID = newPkg.PackageID
|
||||
oldObj.Path = newChildPathJoined
|
||||
|
||||
return d.Object().BatchUpdate(tx, []jcsypes.Object{oldObj})
|
||||
return d.Object().BatchUpdate(tx, []jcstypes.Object{oldObj})
|
||||
}
|
||||
if err != gorm.ErrRecordNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d.Object().HasObjectWithPrefix(tx, oldPkg.PackageID, oldChildPathJoined+jcsypes.ObjectPathSeparator)
|
||||
err = d.Object().HasObjectWithPrefix(tx, oldPkg.PackageID, oldChildPathJoined+jcstypes.ObjectPathSeparator)
|
||||
if err == nil {
|
||||
return d.Object().MoveByPrefix(tx,
|
||||
oldPkg.PackageID, oldChildPathJoined+jcsypes.ObjectPathSeparator,
|
||||
newPkg.PackageID, newChildPathJoined+jcsypes.ObjectPathSeparator,
|
||||
oldPkg.PackageID, oldChildPathJoined+jcstypes.ObjectPathSeparator,
|
||||
newPkg.PackageID, newChildPathJoined+jcstypes.ObjectPathSeparator,
|
||||
)
|
||||
}
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
@ -117,7 +117,7 @@ func (r *FuseBucket) listChildren() ([]fuse.FsEntry, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
pkgMap := make(map[string]*jcsypes.Package)
|
||||
pkgMap := make(map[string]*jcstypes.Package)
|
||||
for _, pkg := range pkgs {
|
||||
p := pkg
|
||||
pkgMap[pkg.Name] = &p
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
db2 "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (r *FuseDir) loadCacheDir() *cache.CacheDir {
|
|||
return err
|
||||
}
|
||||
|
||||
err = r.vfs.db.Object().HasObjectWithPrefix(tx, pkg.PackageID, jcsypes.JoinObjectPath(r.pathComps[2:]...))
|
||||
err = r.vfs.db.Object().HasObjectWithPrefix(tx, pkg.PackageID, jcstypes.JoinObjectPath(r.pathComps[2:]...))
|
||||
if err == nil {
|
||||
createOpt = &cache.CreateDirOption{
|
||||
ModTime: time.Now(),
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
@ -28,7 +28,7 @@ func newFileFromCache(info cache.CacheEntryInfo, vfs *Vfs) *FuseFileNode {
|
|||
}
|
||||
}
|
||||
|
||||
func newFileFromObject(vfs *Vfs, pathComps []string, obj jcsypes.Object) *FuseFileNode {
|
||||
func newFileFromObject(vfs *Vfs, pathComps []string, obj jcstypes.Object) *FuseFileNode {
|
||||
return &FuseFileNode{
|
||||
vfs: vfs,
|
||||
pathComps: pathComps,
|
||||
|
@ -117,7 +117,7 @@ func (n *FuseFileNode) loadCacheFile() *cache.CacheFile {
|
|||
return n.vfs.cache.LoadFile(n.pathComps, nil)
|
||||
}
|
||||
|
||||
cdsObj, err := n.vfs.db.Object().GetByFullPath(n.vfs.db.DefCtx(), n.pathComps[0], n.pathComps[1], jcsypes.JoinObjectPath(n.pathComps[2:]...))
|
||||
cdsObj, err := n.vfs.db.Object().GetByFullPath(n.vfs.db.DefCtx(), n.pathComps[0], n.pathComps[1], jcstypes.JoinObjectPath(n.pathComps[2:]...))
|
||||
if err == nil {
|
||||
file := n.vfs.cache.LoadFile(n.pathComps, &cdsObj)
|
||||
if file == nil {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
db2 "gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/fuse"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/mount/vfs/cache"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
@ -104,7 +104,7 @@ func (r *FuseRoot) listChildren() ([]fuse.FsEntry, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
bktMap := make(map[string]*jcsypes.Bucket)
|
||||
bktMap := make(map[string]*jcstypes.Bucket)
|
||||
for _, bkt := range bkts {
|
||||
b := bkt
|
||||
bktMap[bkt.Name] = &b
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -31,7 +31,7 @@ func init() {
|
|||
fmt.Printf("Invalid user space ID: %s\n", args[1])
|
||||
}
|
||||
|
||||
loadByID(cmdCtx, jcsypes.PackageID(pkgID), jcsypes.UserSpaceID(userSpaceID), args[2])
|
||||
loadByID(cmdCtx, jcstypes.PackageID(pkgID), jcstypes.UserSpaceID(userSpaceID), args[2])
|
||||
} else {
|
||||
loadByPath(cmdCtx, args[0], args[1], args[2])
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func init() {
|
|||
}
|
||||
|
||||
func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath string) {
|
||||
comps := strings.Split(strings.Trim(pkgPath, jcsypes.ObjectPathSeparator), jcsypes.ObjectPathSeparator)
|
||||
comps := strings.Split(strings.Trim(pkgPath, jcstypes.ObjectPathSeparator), jcstypes.ObjectPathSeparator)
|
||||
if len(comps) != 2 {
|
||||
fmt.Printf("Package path must be in format of <bucket>/<package>")
|
||||
return
|
||||
|
@ -63,7 +63,7 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath
|
|||
loadByID(cmdCtx, pkg.PackageID, stg.StorageID, rootPath)
|
||||
}
|
||||
|
||||
func loadByID(cmdCtx *CommandContext, pkgID jcsypes.PackageID, stgID jcsypes.StorageID, rootPath string) {
|
||||
func loadByID(cmdCtx *CommandContext, pkgID jcstypes.PackageID, stgID jcstypes.StorageID, rootPath string) {
|
||||
startTime := time.Now()
|
||||
|
||||
err := cmdCtx.Cmdline.Svc.StorageSvc().LoadPackage(pkgID, stgID, rootPath)
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/spf13/cobra"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -26,7 +26,7 @@ func init() {
|
|||
return
|
||||
}
|
||||
|
||||
lspOneByID(cmdCtx, jcsypes.PackageID(id))
|
||||
lspOneByID(cmdCtx, jcstypes.PackageID(id))
|
||||
} else {
|
||||
lspByPath(cmdCtx, args[0])
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func init() {
|
|||
func lspByPath(cmdCtx *CommandContext, path string) {
|
||||
db2 := cmdCtx.repl.db
|
||||
|
||||
comps := strings.Split(strings.Trim(path, jcsypes.ObjectPathSeparator), jcsypes.ObjectPathSeparator)
|
||||
comps := strings.Split(strings.Trim(path, jcstypes.ObjectPathSeparator), jcstypes.ObjectPathSeparator)
|
||||
if len(comps) != 2 {
|
||||
fmt.Printf("Package path must be in format of <bucket>/<package>")
|
||||
return
|
||||
|
@ -58,7 +58,7 @@ func lspByPath(cmdCtx *CommandContext, path string) {
|
|||
fmt.Println(wr.Render())
|
||||
}
|
||||
|
||||
func lspOneByID(cmdCtx *CommandContext, id jcsypes.PackageID) {
|
||||
func lspOneByID(cmdCtx *CommandContext, id jcstypes.PackageID) {
|
||||
db2 := cmdCtx.repl.db
|
||||
|
||||
pkg, err := db2.Package().GetByID(db2.DefCtx(), id)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/samber/lo"
|
||||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
|
@ -31,41 +31,41 @@ func (svc *Service) UserSpaceSvc() *UserSpaceService {
|
|||
return &UserSpaceService{Service: svc}
|
||||
}
|
||||
|
||||
func (svc *UserSpaceService) Get(userspaceID jcsypes.UserSpaceID) (jcsypes.UserSpace, error) {
|
||||
func (svc *UserSpaceService) Get(userspaceID jcstypes.UserSpaceID) (jcstypes.UserSpace, error) {
|
||||
return svc.DB.UserSpace().GetByID(svc.DB.DefCtx(), userspaceID)
|
||||
}
|
||||
|
||||
func (svc *UserSpaceService) GetByName(name string) (jcsypes.UserSpace, error) {
|
||||
func (svc *UserSpaceService) GetByName(name string) (jcstypes.UserSpace, error) {
|
||||
return svc.DB.UserSpace().GetByName(svc.DB.DefCtx(), name)
|
||||
}
|
||||
|
||||
func (svc *UserSpaceService) GetAll() ([]jcsypes.UserSpace, error) {
|
||||
func (svc *UserSpaceService) GetAll() ([]jcstypes.UserSpace, error) {
|
||||
return svc.DB.UserSpace().GetAll(svc.DB.DefCtx())
|
||||
}
|
||||
|
||||
func (svc *UserSpaceService) Create(req cliapi.UserSpaceCreate) (*cliapi.UserSpaceCreateResp, *ecode.CodeError) {
|
||||
db2 := svc.DB
|
||||
space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcsypes.UserSpace, error) {
|
||||
space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcstypes.UserSpace, error) {
|
||||
space, err := db2.UserSpace().GetByName(tx, req.Name)
|
||||
if err == nil {
|
||||
return jcsypes.UserSpace{}, gorm.ErrDuplicatedKey
|
||||
return jcstypes.UserSpace{}, gorm.ErrDuplicatedKey
|
||||
}
|
||||
if err != gorm.ErrRecordNotFound {
|
||||
return jcsypes.UserSpace{}, err
|
||||
return jcstypes.UserSpace{}, err
|
||||
}
|
||||
|
||||
space = jcsypes.UserSpace{
|
||||
space = jcstypes.UserSpace{
|
||||
Name: req.Name,
|
||||
Storage: req.Storage,
|
||||
Credential: req.Credential,
|
||||
ShardStore: req.ShardStore,
|
||||
Features: req.Features,
|
||||
WorkingDir: jcsypes.PathFromJcsPathString(req.WorkingDir),
|
||||
WorkingDir: jcstypes.PathFromJcsPathString(req.WorkingDir),
|
||||
Revision: 0,
|
||||
}
|
||||
err = db2.UserSpace().Create(tx, &space)
|
||||
if err != nil {
|
||||
return jcsypes.UserSpace{}, err
|
||||
return jcstypes.UserSpace{}, err
|
||||
}
|
||||
return space, nil
|
||||
})
|
||||
|
@ -80,19 +80,19 @@ func (svc *UserSpaceService) Create(req cliapi.UserSpaceCreate) (*cliapi.UserSpa
|
|||
|
||||
func (svc *UserSpaceService) Update(req cliapi.UserSpaceUpdate) (*cliapi.UserSpaceUpdateResp, *ecode.CodeError) {
|
||||
db2 := svc.DB
|
||||
space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcsypes.UserSpace, error) {
|
||||
space, err := db.DoTx01(db2, func(tx db.SQLContext) (jcstypes.UserSpace, error) {
|
||||
space, err := db2.UserSpace().GetByID(tx, req.UserSpaceID)
|
||||
if err != nil {
|
||||
return jcsypes.UserSpace{}, err
|
||||
return jcstypes.UserSpace{}, err
|
||||
}
|
||||
|
||||
if space.Name != req.Name {
|
||||
_, err = db2.UserSpace().GetByName(tx, req.Name)
|
||||
if err == nil {
|
||||
return jcsypes.UserSpace{}, gorm.ErrDuplicatedKey
|
||||
return jcstypes.UserSpace{}, gorm.ErrDuplicatedKey
|
||||
}
|
||||
if err != gorm.ErrRecordNotFound {
|
||||
return jcsypes.UserSpace{}, err
|
||||
return jcstypes.UserSpace{}, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ func (svc *UserSpaceService) Update(req cliapi.UserSpaceUpdate) (*cliapi.UserSpa
|
|||
}
|
||||
|
||||
// 通知元数据缓存无效
|
||||
svc.UserSpaceMeta.Drop([]jcsypes.UserSpaceID{req.UserSpaceID})
|
||||
svc.UserSpaceMeta.Drop([]jcstypes.UserSpaceID{req.UserSpaceID})
|
||||
|
||||
// 通知存储服务组件池停止组件。TODO 对于在Hub上运行的组件,需要一个机制去定时清理
|
||||
svc.StgPool.Drop(stgglb.UserID, space.UserSpaceID)
|
||||
|
@ -155,7 +155,7 @@ func (svc *UserSpaceService) Delete(req cliapi.UserSpaceDelete) (*cliapi.UserSpa
|
|||
}
|
||||
|
||||
// 通知元数据缓存无效
|
||||
svc.UserSpaceMeta.Drop([]jcsypes.UserSpaceID{req.UserSpaceID})
|
||||
svc.UserSpaceMeta.Drop([]jcstypes.UserSpaceID{req.UserSpaceID})
|
||||
|
||||
// 通知存储服务组件池停止组件。TODO 对于在Hub上运行的组件,需要一个机制去定时清理
|
||||
svc.StgPool.Drop(stgglb.UserID, req.UserSpaceID)
|
||||
|
@ -166,13 +166,13 @@ func (svc *UserSpaceService) Delete(req cliapi.UserSpaceDelete) (*cliapi.UserSpa
|
|||
}
|
||||
|
||||
func (svc *UserSpaceService) Test(req cliapi.UserSpaceTest) (*cliapi.UserSpaceTestResp, *ecode.CodeError) {
|
||||
detail := jcsypes.UserSpaceDetail{
|
||||
detail := jcstypes.UserSpaceDetail{
|
||||
UserID: stgglb.UserID,
|
||||
UserSpace: jcsypes.UserSpace{
|
||||
UserSpace: jcstypes.UserSpace{
|
||||
Name: "test",
|
||||
Storage: req.Storage,
|
||||
Credential: req.Credential,
|
||||
WorkingDir: jcsypes.PathFromJcsPathString(req.WorikingDir),
|
||||
WorkingDir: jcstypes.PathFromJcsPathString(req.WorikingDir),
|
||||
},
|
||||
}
|
||||
blder := factory.GetBuilder(&detail)
|
||||
|
@ -189,7 +189,7 @@ func (svc *UserSpaceService) Test(req cliapi.UserSpaceTest) (*cliapi.UserSpaceTe
|
|||
return &cliapi.UserSpaceTestResp{}, nil
|
||||
}
|
||||
|
||||
func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, userspaceID jcsypes.UserSpaceID, rootPath string) error {
|
||||
func (svc *UserSpaceService) DownloadPackage(packageID jcstypes.PackageID, userspaceID jcstypes.UserSpaceID, rootPath string) error {
|
||||
destSpace := svc.UserSpaceMeta.Get(userspaceID)
|
||||
if destSpace == nil {
|
||||
return fmt.Errorf("userspace not found: %d", userspaceID)
|
||||
|
@ -208,10 +208,10 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp
|
|||
}
|
||||
defer mutex.Unlock()
|
||||
|
||||
rootJPath := jcsypes.PathFromJcsPathString(rootPath)
|
||||
rootJPath := jcstypes.PathFromJcsPathString(rootPath)
|
||||
|
||||
dIndex := 0
|
||||
var pinned []jcsypes.PinnedObject
|
||||
var pinned []jcstypes.PinnedObject
|
||||
for dIndex < len(details) {
|
||||
plans := exec.NewPlanBuilder()
|
||||
for i := 0; i < 10 && dIndex < len(details); i++ {
|
||||
|
@ -252,7 +252,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp
|
|||
return fmt.Errorf("unsupported download strategy: %T", strg)
|
||||
}
|
||||
|
||||
objPath := jcsypes.PathFromJcsPathString(details[dIndex].Object.Path)
|
||||
objPath := jcstypes.PathFromJcsPathString(details[dIndex].Object.Path)
|
||||
dstPath := rootJPath.ConcatNew(objPath)
|
||||
|
||||
newDstSpace := *destSpace
|
||||
|
@ -265,7 +265,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp
|
|||
// 顺便保存到同存储服务的分片存储中
|
||||
if destSpace.UserSpace.ShardStore != nil {
|
||||
ft.AddTo(ioswitch2.NewToShardStore(newDstSpace, ioswitch2.RawStream(), ""))
|
||||
pinned = append(pinned, jcsypes.PinnedObject{
|
||||
pinned = append(pinned, jcstypes.PinnedObject{
|
||||
ObjectID: details[dIndex].Object.ObjectID,
|
||||
UserSpaceID: destSpace.UserSpace.UserSpaceID,
|
||||
CreateTime: time.Now(),
|
||||
|
@ -297,7 +297,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp
|
|||
}
|
||||
|
||||
err = svc.DB.DoTx(func(tx db.SQLContext) error {
|
||||
objIDs := make([]jcsypes.ObjectID, len(pinned))
|
||||
objIDs := make([]jcstypes.ObjectID, len(pinned))
|
||||
for i, obj := range pinned {
|
||||
objIDs[i] = obj.ObjectID
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ func (svc *UserSpaceService) DownloadPackage(packageID jcsypes.PackageID, usersp
|
|||
return err
|
||||
}
|
||||
|
||||
pinned = lo.Filter(pinned, func(p jcsypes.PinnedObject, idx int) bool { return avaiIDs[p.ObjectID] })
|
||||
pinned = lo.Filter(pinned, func(p jcstypes.PinnedObject, idx int) bool { return avaiIDs[p.ObjectID] })
|
||||
return svc.DB.PinnedObject().BatchTryCreate(svc.DB.DefCtx(), pinned)
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types/datamap"
|
||||
)
|
||||
|
||||
func getBlockTypeFromRed(red jcsypes.Redundancy) string {
|
||||
func getBlockTypeFromRed(red jcstypes.Redundancy) string {
|
||||
switch red.(type) {
|
||||
case *jcsypes.NoneRedundancy:
|
||||
case *jcstypes.NoneRedundancy:
|
||||
return datamap.BlockTypeRaw
|
||||
|
||||
case *jcsypes.ECRedundancy:
|
||||
case *jcstypes.ECRedundancy:
|
||||
return datamap.BlockTypeEC
|
||||
|
||||
case *jcsypes.LRCRedundancy:
|
||||
case *jcstypes.LRCRedundancy:
|
||||
return datamap.BlockTypeEC
|
||||
|
||||
case *jcsypes.SegmentRedundancy:
|
||||
case *jcstypes.SegmentRedundancy:
|
||||
return datamap.BlockTypeSegment
|
||||
}
|
||||
return ""
|
||||
|
|
|
@ -12,10 +12,10 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
|
||||
stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDiff) {
|
||||
func executeDiff(syncer *SpaceSyncer, task *task, mode *jcstypes.SpaceSyncModeDiff) {
|
||||
log := logger.WithField("Mod", logMod).WithField("TaskID", task.Task.TaskID)
|
||||
|
||||
startTime := time.Now()
|
||||
|
@ -112,7 +112,7 @@ func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDif
|
|||
}
|
||||
|
||||
var willSync []stgtypes.DirEntry
|
||||
var willMkdirs []jcsypes.JPath
|
||||
var willMkdirs []jcstypes.JPath
|
||||
|
||||
dirTree.Iterate(func(path []string, node *trie.Node[srcDstDirEntry], isWordNode bool) trie.VisitCtrl {
|
||||
if node.Value.src == nil {
|
||||
|
@ -123,7 +123,7 @@ func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDif
|
|||
if node.Value.src.IsDir {
|
||||
if node.Value.dst == nil {
|
||||
if node.IsEmpty() {
|
||||
willMkdirs = append(willMkdirs, jcsypes.PathFromComps(path...))
|
||||
willMkdirs = append(willMkdirs, jcstypes.PathFromComps(path...))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -193,7 +193,7 @@ func executeDiff(syncer *SpaceSyncer, task *task, mode *jcsypes.SpaceSyncModeDif
|
|||
}
|
||||
}
|
||||
|
||||
func diffCreateSrcNode(tree *trie.Trie[srcDstDirEntry], path jcsypes.JPath, e *stgtypes.DirEntry) {
|
||||
func diffCreateSrcNode(tree *trie.Trie[srcDstDirEntry], path jcstypes.JPath, e *stgtypes.DirEntry) {
|
||||
var ptr = &tree.Root
|
||||
for _, c := range path.Comps() {
|
||||
if ptr.Value.src != nil && ptr.Value.src.IsDir {
|
||||
|
@ -205,7 +205,7 @@ func diffCreateSrcNode(tree *trie.Trie[srcDstDirEntry], path jcsypes.JPath, e *s
|
|||
ptr.Value.src = e
|
||||
}
|
||||
|
||||
func diffCreateDstNode(tree *trie.Trie[srcDstDirEntry], path jcsypes.JPath, e *stgtypes.DirEntry) {
|
||||
func diffCreateDstNode(tree *trie.Trie[srcDstDirEntry], path jcstypes.JPath, e *stgtypes.DirEntry) {
|
||||
var ptr = &tree.Root
|
||||
for _, c := range path.Comps() {
|
||||
if ptr.Value.src != nil && ptr.Value.src.IsDir {
|
||||
|
@ -227,7 +227,7 @@ type srcDstDirEntry struct {
|
|||
dst *stgtypes.DirEntry
|
||||
}
|
||||
|
||||
func cmpFile(diff *jcsypes.SpaceSyncModeDiff, src, dst *stgtypes.DirEntry) bool {
|
||||
func cmpFile(diff *jcstypes.SpaceSyncModeDiff, src, dst *stgtypes.DirEntry) bool {
|
||||
if diff.IncludeSize && src.Size != dst.Size {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package spacesyncer
|
|||
|
||||
import (
|
||||
stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type FilterFn func(info stgtypes.DirEntry) bool
|
||||
|
@ -11,7 +11,7 @@ func buildFilter(task *task) FilterFn {
|
|||
var fns []FilterFn
|
||||
for _, f := range task.Task.Filters {
|
||||
switch f := f.(type) {
|
||||
case *jcsypes.SpaceSyncFilterSize:
|
||||
case *jcstypes.SpaceSyncFilterSize:
|
||||
fns = append(fns, filterSize(f))
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ func buildFilter(task *task) FilterFn {
|
|||
}
|
||||
}
|
||||
|
||||
func filterSize(filter *jcsypes.SpaceSyncFilterSize) FilterFn {
|
||||
func filterSize(filter *jcstypes.SpaceSyncFilterSize) FilterFn {
|
||||
return func(info stgtypes.DirEntry) bool {
|
||||
if filter.MinSize > 0 && info.Size < filter.MinSize {
|
||||
return false
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache"
|
||||
stgpool "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -26,7 +26,7 @@ type SpaceSyncer struct {
|
|||
stgPool *stgpool.Pool
|
||||
spaceMeta *metacache.UserSpaceMeta
|
||||
lock sync.Mutex
|
||||
tasks map[jcsypes.SpaceSyncTaskID]*task
|
||||
tasks map[jcstypes.SpaceSyncTaskID]*task
|
||||
}
|
||||
|
||||
func New(db *db.DB, stgPool *stgpool.Pool, spaceMeta *metacache.UserSpaceMeta) *SpaceSyncer {
|
||||
|
@ -34,7 +34,7 @@ func New(db *db.DB, stgPool *stgpool.Pool, spaceMeta *metacache.UserSpaceMeta) *
|
|||
db: db,
|
||||
stgPool: stgPool,
|
||||
spaceMeta: spaceMeta,
|
||||
tasks: make(map[jcsypes.SpaceSyncTaskID]*task),
|
||||
tasks: make(map[jcstypes.SpaceSyncTaskID]*task),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ func (s *SpaceSyncer) Start() *async.UnboundChannel[SpaceSyncerEvent] {
|
|||
if err != nil {
|
||||
log.Warnf("load task from db: %v", err)
|
||||
} else {
|
||||
var rms []jcsypes.SpaceSyncTaskID
|
||||
var rms []jcstypes.SpaceSyncTaskID
|
||||
for _, t := range allTask {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
tsk := task{
|
||||
|
@ -60,14 +60,14 @@ func (s *SpaceSyncer) Start() *async.UnboundChannel[SpaceSyncerEvent] {
|
|||
}
|
||||
|
||||
switch tr := t.Trigger.(type) {
|
||||
case *jcsypes.SpaceSyncTriggerOnce:
|
||||
case *jcstypes.SpaceSyncTriggerOnce:
|
||||
// Once类型的任务没有执行完也不执行了
|
||||
rms = append(rms, t.TaskID)
|
||||
|
||||
case *jcsypes.SpaceSyncTriggerInterval:
|
||||
case *jcstypes.SpaceSyncTriggerInterval:
|
||||
triggerInterval(s, &tsk, tr)
|
||||
|
||||
case *jcsypes.SpaceSyncTriggerAt:
|
||||
case *jcstypes.SpaceSyncTriggerAt:
|
||||
triggerAt(s, &tsk, tr)
|
||||
}
|
||||
|
||||
|
@ -95,10 +95,10 @@ func (s *SpaceSyncer) Stop() {
|
|||
t.CancelFn()
|
||||
}
|
||||
|
||||
s.tasks = make(map[jcsypes.SpaceSyncTaskID]*task)
|
||||
s.tasks = make(map[jcstypes.SpaceSyncTaskID]*task)
|
||||
}
|
||||
|
||||
func (s *SpaceSyncer) CreateTask(t jcsypes.SpaceSyncTask) (*TaskInfo, error) {
|
||||
func (s *SpaceSyncer) CreateTask(t jcstypes.SpaceSyncTask) (*TaskInfo, error) {
|
||||
log := logger.WithField("Mod", logMod)
|
||||
|
||||
d := s.db
|
||||
|
@ -126,13 +126,13 @@ func (s *SpaceSyncer) CreateTask(t jcsypes.SpaceSyncTask) (*TaskInfo, error) {
|
|||
s.lock.Unlock()
|
||||
|
||||
switch tr := t.Trigger.(type) {
|
||||
case *jcsypes.SpaceSyncTriggerOnce:
|
||||
case *jcstypes.SpaceSyncTriggerOnce:
|
||||
triggerOnce(s, &tsk)
|
||||
|
||||
case *jcsypes.SpaceSyncTriggerInterval:
|
||||
case *jcstypes.SpaceSyncTriggerInterval:
|
||||
triggerInterval(s, &tsk, tr)
|
||||
|
||||
case *jcsypes.SpaceSyncTriggerAt:
|
||||
case *jcstypes.SpaceSyncTriggerAt:
|
||||
triggerAt(s, &tsk, tr)
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ func (s *SpaceSyncer) CreateTask(t jcsypes.SpaceSyncTask) (*TaskInfo, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *SpaceSyncer) CancelTask(taskID jcsypes.SpaceSyncTaskID) {
|
||||
func (s *SpaceSyncer) CancelTask(taskID jcstypes.SpaceSyncTaskID) {
|
||||
log := logger.WithField("Mod", logMod)
|
||||
|
||||
s.lock.Lock()
|
||||
|
@ -166,7 +166,7 @@ func (s *SpaceSyncer) CancelTask(taskID jcsypes.SpaceSyncTaskID) {
|
|||
log.Infof("task %v canceled", taskID)
|
||||
}
|
||||
|
||||
func (s *SpaceSyncer) GetTask(taskID jcsypes.SpaceSyncTaskID) *jcsypes.SpaceSyncTask {
|
||||
func (s *SpaceSyncer) GetTask(taskID jcstypes.SpaceSyncTaskID) *jcstypes.SpaceSyncTask {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
|
@ -181,11 +181,11 @@ func (s *SpaceSyncer) GetTask(taskID jcsypes.SpaceSyncTaskID) *jcsypes.SpaceSync
|
|||
}
|
||||
|
||||
type TaskInfo struct {
|
||||
Task jcsypes.SpaceSyncTask
|
||||
Task jcstypes.SpaceSyncTask
|
||||
}
|
||||
|
||||
type task struct {
|
||||
Task jcsypes.SpaceSyncTask
|
||||
Task jcstypes.SpaceSyncTask
|
||||
Context context.Context
|
||||
CancelFn func()
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
"gitlink.org.cn/cloudream/common/utils/sort2"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func triggerOnce(syncer *SpaceSyncer, task *task) {
|
||||
|
@ -31,7 +31,7 @@ func triggerOnce(syncer *SpaceSyncer, task *task) {
|
|||
}()
|
||||
}
|
||||
|
||||
func triggerInterval(syncer *SpaceSyncer, task *task, trigger *jcsypes.SpaceSyncTriggerInterval) {
|
||||
func triggerInterval(syncer *SpaceSyncer, task *task, trigger *jcstypes.SpaceSyncTriggerInterval) {
|
||||
go func() {
|
||||
log := logger.WithField("Mod", logMod)
|
||||
|
||||
|
@ -66,7 +66,7 @@ func triggerInterval(syncer *SpaceSyncer, task *task, trigger *jcsypes.SpaceSync
|
|||
}()
|
||||
}
|
||||
|
||||
func triggerAt(syncer *SpaceSyncer, task *task, trigger *jcsypes.SpaceSyncTriggerAt) {
|
||||
func triggerAt(syncer *SpaceSyncer, task *task, trigger *jcstypes.SpaceSyncTriggerAt) {
|
||||
go func() {
|
||||
log := logger.WithField("Mod", logMod)
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -121,9 +121,9 @@ func (p *SpeedStats) ShouldAtClient(size int64) bool {
|
|||
return v < prob
|
||||
}
|
||||
|
||||
func (p *SpeedStats) DumpStatus() jcsypes.SpeedStatsStatus {
|
||||
return jcsypes.SpeedStatsStatus{
|
||||
Below100M: []jcsypes.SpeedStatsStatusEntry{
|
||||
func (p *SpeedStats) DumpStatus() jcstypes.SpeedStatsStatus {
|
||||
return jcstypes.SpeedStatsStatus{
|
||||
Below100M: []jcstypes.SpeedStatsStatusEntry{
|
||||
{
|
||||
TotalSize: p.stats100M[0].TotalSize,
|
||||
TotalTime: p.stats100M[0].TotalTime,
|
||||
|
@ -137,7 +137,7 @@ func (p *SpeedStats) DumpStatus() jcsypes.SpeedStatsStatus {
|
|||
LastSpeed: p.stats100M[1].LastSpeed,
|
||||
},
|
||||
},
|
||||
Below1G: []jcsypes.SpeedStatsStatusEntry{
|
||||
Below1G: []jcstypes.SpeedStatsStatusEntry{
|
||||
{
|
||||
TotalSize: p.stats1G[0].TotalSize,
|
||||
TotalTime: p.stats1G[0].TotalTime,
|
||||
|
@ -151,7 +151,7 @@ func (p *SpeedStats) DumpStatus() jcsypes.SpeedStatsStatus {
|
|||
LastSpeed: p.stats1G[1].LastSpeed,
|
||||
},
|
||||
},
|
||||
Above1G: []jcsypes.SpeedStatsStatusEntry{
|
||||
Above1G: []jcstypes.SpeedStatsStatusEntry{
|
||||
{
|
||||
TotalSize: p.statsAbove1G[0].TotalSize,
|
||||
TotalTime: p.statsAbove1G[0].TotalTime,
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/reflect2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types/datamap"
|
||||
)
|
||||
|
||||
|
@ -34,7 +34,7 @@ func (j *ChangeRedundancy) Execute(t *TickTock) {
|
|||
|
||||
ctx := &changeRedundancyContext{
|
||||
ticktock: t,
|
||||
allUserSpaces: make(map[jcsypes.UserSpaceID]*userSpaceUsageInfo),
|
||||
allUserSpaces: make(map[jcstypes.UserSpaceID]*userSpaceUsageInfo),
|
||||
}
|
||||
|
||||
spaceIDs, err := t.db.UserSpace().GetAllIDs(t.db.DefCtx())
|
||||
|
@ -58,7 +58,7 @@ func (j *ChangeRedundancy) Execute(t *TickTock) {
|
|||
return
|
||||
}
|
||||
|
||||
lastPkgID := jcsypes.PackageID(0)
|
||||
lastPkgID := jcstypes.PackageID(0)
|
||||
|
||||
loop:
|
||||
for {
|
||||
|
@ -90,16 +90,16 @@ loop:
|
|||
|
||||
type changeRedundancyContext struct {
|
||||
ticktock *TickTock
|
||||
allUserSpaces map[jcsypes.UserSpaceID]*userSpaceUsageInfo
|
||||
mostBlockStgIDs []jcsypes.UserSpaceID
|
||||
allUserSpaces map[jcstypes.UserSpaceID]*userSpaceUsageInfo
|
||||
mostBlockStgIDs []jcstypes.UserSpaceID
|
||||
}
|
||||
|
||||
type userSpaceUsageInfo struct {
|
||||
UserSpace *jcsypes.UserSpaceDetail
|
||||
UserSpace *jcstypes.UserSpaceDetail
|
||||
AccessAmount float64
|
||||
}
|
||||
|
||||
func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcsypes.PackageDetail) error {
|
||||
func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcstypes.PackageDetail) error {
|
||||
log := logger.WithType[ChangeRedundancy]("TickTock")
|
||||
db2 := ctx.ticktock.db
|
||||
|
||||
|
@ -121,7 +121,7 @@ func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcsypes.P
|
|||
info.AccessAmount = stat.Amount
|
||||
}
|
||||
|
||||
lastObjID := jcsypes.ObjectID(0)
|
||||
lastObjID := jcstypes.ObjectID(0)
|
||||
for {
|
||||
objs, err := db.DoTx31(db2, db2.Object().BatchGetDetailsPaged, pkg.Package.PackageID, lastObjID, BatchGetObjectDetailCount)
|
||||
if err != nil {
|
||||
|
@ -139,7 +139,7 @@ func (j *ChangeRedundancy) changeOne(ctx *changeRedundancyContext, pkg jcsypes.P
|
|||
|
||||
ctx.mostBlockStgIDs = j.summaryRepObjectBlockUserSpaces(ctx, objs, 2)
|
||||
|
||||
var willShrinks []jcsypes.ObjectDetail
|
||||
var willShrinks []jcstypes.ObjectDetail
|
||||
|
||||
for _, obj := range objs {
|
||||
newRed, selectedSpaces := j.chooseRedundancy(ctx, obj)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
|
||||
stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
// CheckShardStore 代表一个用于处理代理缓存检查事件的结构体
|
||||
|
@ -52,8 +52,8 @@ func (j *CheckShardStore) Execute(t *TickTock) {
|
|||
}
|
||||
}
|
||||
|
||||
func (j *CheckShardStore) checkOne(t *TickTock, space *jcsypes.UserSpaceDetail) error {
|
||||
// addr, ok := space.RecommendHub.Address.(*cortypes.GRPCAddressInfo)
|
||||
func (j *CheckShardStore) checkOne(t *TickTock, space *jcstypes.UserSpaceDetail) error {
|
||||
// addr, ok := space.RecommendHub.Address.(*jcstypes.GRPCAddressInfo)
|
||||
// if !ok {
|
||||
// return fmt.Errorf("master of user space %v has no grpc address", space.UserSpace)
|
||||
// }
|
||||
|
@ -80,9 +80,9 @@ func (j *CheckShardStore) checkOne(t *TickTock, space *jcsypes.UserSpaceDetail)
|
|||
return fmt.Errorf("listing all files: %w", err)
|
||||
}
|
||||
|
||||
fileHashes := lo.Map(infos, func(info stgtypes.FileInfo, _ int) jcsypes.FileHash { return info.Hash })
|
||||
fileHashes := lo.Map(infos, func(info stgtypes.FileInfo, _ int) jcstypes.FileHash { return info.Hash })
|
||||
|
||||
realFileHashes := lo.SliceToMap(fileHashes, func(hash jcsypes.FileHash) (jcsypes.FileHash, bool) { return hash, true })
|
||||
realFileHashes := lo.SliceToMap(fileHashes, func(hash jcstypes.FileHash) (jcstypes.FileHash, bool) { return hash, true })
|
||||
|
||||
// 在事务中执行缓存更新操作
|
||||
t.db.DoTx(func(tx db.SQLContext) error {
|
||||
|
@ -95,7 +95,7 @@ func (j *CheckShardStore) checkOne(t *TickTock, space *jcsypes.UserSpaceDetail)
|
|||
}
|
||||
|
||||
// checkPinnedObject 对比PinnedObject表,若实际文件不存在,则进行删除操作
|
||||
func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space *jcsypes.UserSpaceDetail, realFileHashes map[jcsypes.FileHash]bool) {
|
||||
func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space *jcstypes.UserSpaceDetail, realFileHashes map[jcstypes.FileHash]bool) {
|
||||
log := logger.WithType[CheckShardStore]("TickTock")
|
||||
|
||||
objs, err := t.db.PinnedObject().GetObjectsByUserSpaceID(tx, space.UserSpace.UserSpaceID)
|
||||
|
@ -104,7 +104,7 @@ func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space *
|
|||
return
|
||||
}
|
||||
|
||||
var rms []jcsypes.ObjectID
|
||||
var rms []jcstypes.ObjectID
|
||||
for _, c := range objs {
|
||||
if realFileHashes[c.FileHash] {
|
||||
continue
|
||||
|
@ -121,7 +121,7 @@ func (*CheckShardStore) checkPinnedObject(t *TickTock, tx db.SQLContext, space *
|
|||
}
|
||||
|
||||
// checkObjectBlock 对比ObjectBlock表,若实际文件不存在,则进行删除操作
|
||||
func (*CheckShardStore) checkObjectBlock(t *TickTock, tx db.SQLContext, space *jcsypes.UserSpaceDetail, realFileHashes map[jcsypes.FileHash]bool) {
|
||||
func (*CheckShardStore) checkObjectBlock(t *TickTock, tx db.SQLContext, space *jcstypes.UserSpaceDetail, realFileHashes map[jcstypes.FileHash]bool) {
|
||||
log := logger.WithType[CheckShardStore]("TickTock")
|
||||
|
||||
blocks, err := t.db.ObjectBlock().GetByUserSpaceID(tx, space.UserSpace.UserSpaceID)
|
||||
|
@ -130,7 +130,7 @@ func (*CheckShardStore) checkObjectBlock(t *TickTock, tx db.SQLContext, space *j
|
|||
return
|
||||
}
|
||||
|
||||
var rms []jcsypes.FileHash
|
||||
var rms []jcstypes.FileHash
|
||||
for _, b := range blocks {
|
||||
if realFileHashes[b.FileHash] {
|
||||
continue
|
||||
|
|
|
@ -16,56 +16,55 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc"
|
||||
lrcparser "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc/parser"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types/datamap"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
)
|
||||
|
||||
func (t *ChangeRedundancy) chooseRedundancy(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail) (jcsypes.Redundancy, []*userSpaceUsageInfo) {
|
||||
func (t *ChangeRedundancy) chooseRedundancy(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail) (jcstypes.Redundancy, []*userSpaceUsageInfo) {
|
||||
switch obj.Object.Redundancy.(type) {
|
||||
case *jcsypes.NoneRedundancy:
|
||||
case *jcstypes.NoneRedundancy:
|
||||
if obj.Object.Size > ctx.ticktock.cfg.ECFileSizeThreshold {
|
||||
newStgs := t.chooseNewUserSpacesForEC(ctx, &jcsypes.DefaultECRedundancy)
|
||||
return &jcsypes.DefaultECRedundancy, newStgs
|
||||
newStgs := t.chooseNewUserSpacesForEC(ctx, &jcstypes.DefaultECRedundancy)
|
||||
return &jcstypes.DefaultECRedundancy, newStgs
|
||||
}
|
||||
|
||||
return &jcsypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcsypes.DefaultRepRedundancy)
|
||||
return &jcstypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcstypes.DefaultRepRedundancy)
|
||||
|
||||
case *jcsypes.RepRedundancy:
|
||||
case *jcstypes.RepRedundancy:
|
||||
if obj.Object.Size >= ctx.ticktock.cfg.ECFileSizeThreshold {
|
||||
newStgs := t.chooseNewUserSpacesForEC(ctx, &jcsypes.DefaultECRedundancy)
|
||||
return &jcsypes.DefaultECRedundancy, newStgs
|
||||
newStgs := t.chooseNewUserSpacesForEC(ctx, &jcstypes.DefaultECRedundancy)
|
||||
return &jcstypes.DefaultECRedundancy, newStgs
|
||||
}
|
||||
|
||||
newSpaces := t.rechooseUserSpacesForRep(ctx, &jcsypes.DefaultRepRedundancy)
|
||||
newSpaces := t.rechooseUserSpacesForRep(ctx, &jcstypes.DefaultRepRedundancy)
|
||||
for _, s := range newSpaces {
|
||||
if !obj.ContainsBlock(0, s.UserSpace.UserSpace.UserSpaceID) && !obj.ContainsPinned(s.UserSpace.UserSpace.UserSpaceID) {
|
||||
return &jcsypes.DefaultRepRedundancy, newSpaces
|
||||
return &jcstypes.DefaultRepRedundancy, newSpaces
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
||||
case *jcsypes.ECRedundancy:
|
||||
case *jcstypes.ECRedundancy:
|
||||
if obj.Object.Size < ctx.ticktock.cfg.ECFileSizeThreshold {
|
||||
return &jcsypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcsypes.DefaultRepRedundancy)
|
||||
return &jcstypes.DefaultRepRedundancy, t.chooseNewUserSpacesForRep(ctx, &jcstypes.DefaultRepRedundancy)
|
||||
}
|
||||
|
||||
newSpaces := t.rechooseUserSpacesForEC(ctx, obj, &jcsypes.DefaultECRedundancy)
|
||||
newSpaces := t.rechooseUserSpacesForEC(ctx, obj, &jcstypes.DefaultECRedundancy)
|
||||
for i, s := range newSpaces {
|
||||
if !obj.ContainsBlock(i, s.UserSpace.UserSpace.UserSpaceID) {
|
||||
return &jcsypes.DefaultECRedundancy, newSpaces
|
||||
return &jcstypes.DefaultECRedundancy, newSpaces
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
|
||||
case *jcsypes.LRCRedundancy:
|
||||
newLRCStgs := t.rechooseUserSpacesForLRC(ctx, obj, &jcsypes.DefaultLRCRedundancy)
|
||||
case *jcstypes.LRCRedundancy:
|
||||
newLRCStgs := t.rechooseUserSpacesForLRC(ctx, obj, &jcstypes.DefaultLRCRedundancy)
|
||||
|
||||
for i, s := range newLRCStgs {
|
||||
if !obj.ContainsBlock(i, s.UserSpace.UserSpace.UserSpaceID) {
|
||||
return &jcsypes.DefaultLRCRedundancy, newLRCStgs
|
||||
return &jcstypes.DefaultLRCRedundancy, newLRCStgs
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,7 +73,7 @@ func (t *ChangeRedundancy) chooseRedundancy(ctx *changeRedundancyContext, obj jc
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, newRed jcsypes.Redundancy, selectedUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, newRed jcstypes.Redundancy, selectedUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
log := logger.WithType[ChangeRedundancy]("TickTock")
|
||||
|
||||
var updating *db.UpdatingObjectRedundancy
|
||||
|
@ -82,48 +81,48 @@ func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj
|
|||
var err error
|
||||
|
||||
switch srcRed := obj.Object.Redundancy.(type) {
|
||||
case *jcsypes.NoneRedundancy:
|
||||
case *jcstypes.NoneRedundancy:
|
||||
switch newRed := newRed.(type) {
|
||||
case *jcsypes.RepRedundancy:
|
||||
case *jcstypes.RepRedundancy:
|
||||
log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> rep")
|
||||
updating, evt, err = t.noneToRep(ctx, obj, newRed, selectedUserSpaces)
|
||||
|
||||
case *jcsypes.ECRedundancy:
|
||||
case *jcstypes.ECRedundancy:
|
||||
log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> ec")
|
||||
updating, evt, err = t.noneToEC(ctx, obj, newRed, selectedUserSpaces)
|
||||
|
||||
case *jcsypes.LRCRedundancy:
|
||||
case *jcstypes.LRCRedundancy:
|
||||
log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> lrc")
|
||||
updating, evt, err = t.noneToLRC(ctx, obj, newRed, selectedUserSpaces)
|
||||
|
||||
case *jcsypes.SegmentRedundancy:
|
||||
case *jcstypes.SegmentRedundancy:
|
||||
log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: none -> segment")
|
||||
updating, evt, err = t.noneToSeg(ctx, obj, newRed, selectedUserSpaces)
|
||||
}
|
||||
|
||||
case *jcsypes.RepRedundancy:
|
||||
case *jcstypes.RepRedundancy:
|
||||
switch newRed := newRed.(type) {
|
||||
case *jcsypes.RepRedundancy:
|
||||
case *jcstypes.RepRedundancy:
|
||||
updating, evt, err = t.repToRep(ctx, obj, srcRed, selectedUserSpaces)
|
||||
|
||||
case *jcsypes.ECRedundancy:
|
||||
case *jcstypes.ECRedundancy:
|
||||
log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: rep -> ec")
|
||||
updating, evt, err = t.repToEC(ctx, obj, newRed, selectedUserSpaces)
|
||||
}
|
||||
|
||||
case *jcsypes.ECRedundancy:
|
||||
case *jcstypes.ECRedundancy:
|
||||
switch newRed := newRed.(type) {
|
||||
case *jcsypes.RepRedundancy:
|
||||
case *jcstypes.RepRedundancy:
|
||||
log.WithField("ObjectID", obj.Object.ObjectID).Debugf("redundancy: ec -> rep")
|
||||
updating, evt, err = t.ecToRep(ctx, obj, srcRed, newRed, selectedUserSpaces)
|
||||
|
||||
case *jcsypes.ECRedundancy:
|
||||
case *jcstypes.ECRedundancy:
|
||||
updating, evt, err = t.ecToEC(ctx, obj, srcRed, newRed, selectedUserSpaces)
|
||||
}
|
||||
|
||||
case *jcsypes.LRCRedundancy:
|
||||
case *jcstypes.LRCRedundancy:
|
||||
switch newRed := newRed.(type) {
|
||||
case *jcsypes.LRCRedundancy:
|
||||
case *jcstypes.LRCRedundancy:
|
||||
updating, evt, err = t.lrcToLRC(ctx, obj, srcRed, newRed, selectedUserSpaces)
|
||||
}
|
||||
}
|
||||
|
@ -132,16 +131,16 @@ func (t *ChangeRedundancy) doChangeRedundancy(ctx *changeRedundancyContext, obj
|
|||
}
|
||||
|
||||
// 统计每个对象块所在的节点,选出块最多的不超过userspaceCnt个节点
|
||||
func (t *ChangeRedundancy) summaryRepObjectBlockUserSpaces(ctx *changeRedundancyContext, objs []jcsypes.ObjectDetail, userspaceCnt int) []jcsypes.UserSpaceID {
|
||||
func (t *ChangeRedundancy) summaryRepObjectBlockUserSpaces(ctx *changeRedundancyContext, objs []jcstypes.ObjectDetail, userspaceCnt int) []jcstypes.UserSpaceID {
|
||||
type stgBlocks struct {
|
||||
UserSpaceID jcsypes.UserSpaceID
|
||||
UserSpaceID jcstypes.UserSpaceID
|
||||
Count int
|
||||
}
|
||||
|
||||
stgBlocksMap := make(map[jcsypes.UserSpaceID]*stgBlocks)
|
||||
stgBlocksMap := make(map[jcstypes.UserSpaceID]*stgBlocks)
|
||||
for _, obj := range objs {
|
||||
shouldUseEC := obj.Object.Size > ctx.ticktock.cfg.ECFileSizeThreshold
|
||||
if _, ok := obj.Object.Redundancy.(*jcsypes.RepRedundancy); ok && !shouldUseEC {
|
||||
if _, ok := obj.Object.Redundancy.(*jcstypes.RepRedundancy); ok && !shouldUseEC {
|
||||
for _, block := range obj.Blocks {
|
||||
if _, ok := stgBlocksMap[block.UserSpaceID]; !ok {
|
||||
stgBlocksMap[block.UserSpaceID] = &stgBlocks{
|
||||
|
@ -159,14 +158,14 @@ func (t *ChangeRedundancy) summaryRepObjectBlockUserSpaces(ctx *changeRedundancy
|
|||
return right.Count - left.Count
|
||||
})
|
||||
|
||||
ids := lo.Map(userspaces, func(item *stgBlocks, idx int) jcsypes.UserSpaceID { return item.UserSpaceID })
|
||||
ids := lo.Map(userspaces, func(item *stgBlocks, idx int) jcstypes.UserSpaceID { return item.UserSpaceID })
|
||||
if len(ids) > userspaceCnt {
|
||||
ids = ids[:userspaceCnt]
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) chooseNewUserSpacesForRep(ctx *changeRedundancyContext, red *jcsypes.RepRedundancy) []*userSpaceUsageInfo {
|
||||
func (t *ChangeRedundancy) chooseNewUserSpacesForRep(ctx *changeRedundancyContext, red *jcstypes.RepRedundancy) []*userSpaceUsageInfo {
|
||||
sortedUserSpaces := sort2.Sort(lo.Values(ctx.allUserSpaces), func(left *userSpaceUsageInfo, right *userSpaceUsageInfo) int {
|
||||
return sort2.Cmp(right.AccessAmount, left.AccessAmount)
|
||||
})
|
||||
|
@ -174,7 +173,7 @@ func (t *ChangeRedundancy) chooseNewUserSpacesForRep(ctx *changeRedundancyContex
|
|||
return t.chooseSoManyUserSpaces(red.RepCount, sortedUserSpaces)
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) chooseNewUserSpacesForEC(ctx *changeRedundancyContext, red *jcsypes.ECRedundancy) []*userSpaceUsageInfo {
|
||||
func (t *ChangeRedundancy) chooseNewUserSpacesForEC(ctx *changeRedundancyContext, red *jcstypes.ECRedundancy) []*userSpaceUsageInfo {
|
||||
sortedUserSpaces := sort2.Sort(lo.Values(ctx.allUserSpaces), func(left *userSpaceUsageInfo, right *userSpaceUsageInfo) int {
|
||||
return sort2.Cmp(right.AccessAmount, left.AccessAmount)
|
||||
})
|
||||
|
@ -182,7 +181,7 @@ func (t *ChangeRedundancy) chooseNewUserSpacesForEC(ctx *changeRedundancyContext
|
|||
return t.chooseSoManyUserSpaces(red.N, sortedUserSpaces)
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) chooseNewUserSpacesForLRC(ctx *changeRedundancyContext, red *jcsypes.LRCRedundancy) []*userSpaceUsageInfo {
|
||||
func (t *ChangeRedundancy) chooseNewUserSpacesForLRC(ctx *changeRedundancyContext, red *jcstypes.LRCRedundancy) []*userSpaceUsageInfo {
|
||||
sortedUserSpaces := sort2.Sort(lo.Values(ctx.allUserSpaces), func(left *userSpaceUsageInfo, right *userSpaceUsageInfo) int {
|
||||
return sort2.Cmp(right.AccessAmount, left.AccessAmount)
|
||||
})
|
||||
|
@ -198,7 +197,7 @@ func (t *ChangeRedundancy) chooseNewUserSpacesForSeg(ctx *changeRedundancyContex
|
|||
return t.chooseSoManyUserSpaces(segCount, sortedUserSpaces)
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) rechooseUserSpacesForRep(ctx *changeRedundancyContext, red *jcsypes.RepRedundancy) []*userSpaceUsageInfo {
|
||||
func (t *ChangeRedundancy) rechooseUserSpacesForRep(ctx *changeRedundancyContext, red *jcstypes.RepRedundancy) []*userSpaceUsageInfo {
|
||||
type rechooseUserSpace struct {
|
||||
*userSpaceUsageInfo
|
||||
HasBlock bool
|
||||
|
@ -233,7 +232,7 @@ func (t *ChangeRedundancy) rechooseUserSpacesForRep(ctx *changeRedundancyContext
|
|||
return t.chooseSoManyUserSpaces(red.RepCount, lo.Map(sortedStgs, func(userspace *rechooseUserSpace, idx int) *userSpaceUsageInfo { return userspace.userSpaceUsageInfo }))
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) rechooseUserSpacesForEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.ECRedundancy) []*userSpaceUsageInfo {
|
||||
func (t *ChangeRedundancy) rechooseUserSpacesForEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.ECRedundancy) []*userSpaceUsageInfo {
|
||||
type rechooseStg struct {
|
||||
*userSpaceUsageInfo
|
||||
CachedBlockIndex int
|
||||
|
@ -269,7 +268,7 @@ func (t *ChangeRedundancy) rechooseUserSpacesForEC(ctx *changeRedundancyContext,
|
|||
return t.chooseSoManyUserSpaces(red.N, lo.Map(sortedStgs, func(userspace *rechooseStg, idx int) *userSpaceUsageInfo { return userspace.userSpaceUsageInfo }))
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) rechooseUserSpacesForLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.LRCRedundancy) []*userSpaceUsageInfo {
|
||||
func (t *ChangeRedundancy) rechooseUserSpacesForLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.LRCRedundancy) []*userSpaceUsageInfo {
|
||||
type rechooseStg struct {
|
||||
*userSpaceUsageInfo
|
||||
CachedBlockIndex int
|
||||
|
@ -322,7 +321,7 @@ func (t *ChangeRedundancy) chooseSoManyUserSpaces(count int, stgs []*userSpaceUs
|
|||
var chosen []*userSpaceUsageInfo
|
||||
for len(chosen) < count {
|
||||
// 在每一轮内都选不同地区的节点,如果节点数不够,那么就再来一轮
|
||||
chosenLocations := make(map[cortypes.Location]bool)
|
||||
chosenLocations := make(map[jcstypes.Location]bool)
|
||||
for i, stg := range extendStgs {
|
||||
if stg == nil {
|
||||
continue
|
||||
|
@ -341,7 +340,7 @@ func (t *ChangeRedundancy) chooseSoManyUserSpaces(count int, stgs []*userSpaceUs
|
|||
return chosen
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
if len(obj.Blocks) == 0 {
|
||||
return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to rep")
|
||||
}
|
||||
|
@ -352,7 +351,7 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
}
|
||||
|
||||
// 如果选择的备份节点都是同一个,那么就只要上传一次
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
|
||||
ft := ioswitch2.NewFromTo()
|
||||
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream()))
|
||||
|
@ -374,11 +373,11 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
return nil, nil, fmt.Errorf("executing io plan: %w", err)
|
||||
}
|
||||
|
||||
var blocks []jcsypes.ObjectBlock
|
||||
var blocks []jcstypes.ObjectBlock
|
||||
var blockChgs []datamap.BlockChange
|
||||
for i, stg := range uploadStgs {
|
||||
r := ret.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue)
|
||||
blocks = append(blocks, jcsypes.ObjectBlock{
|
||||
blocks = append(blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: 0,
|
||||
UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID,
|
||||
|
@ -412,7 +411,7 @@ func (t *ChangeRedundancy) noneToRep(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.ECRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.ECRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
if len(obj.Blocks) == 0 {
|
||||
return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to ec")
|
||||
}
|
||||
|
@ -441,12 +440,12 @@ func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcsypes.Ob
|
|||
return nil, nil, fmt.Errorf("executing io plan: %w", err)
|
||||
}
|
||||
|
||||
var blocks []jcsypes.ObjectBlock
|
||||
var blocks []jcstypes.ObjectBlock
|
||||
var evtTargetBlocks []datamap.Block
|
||||
var evtBlockTrans []datamap.DataTransfer
|
||||
for i := 0; i < red.N; i++ {
|
||||
r := ioRet.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue)
|
||||
blocks = append(blocks, jcsypes.ObjectBlock{
|
||||
blocks = append(blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: i,
|
||||
UserSpaceID: uploadStgs[i].UserSpace.UserSpace.UserSpaceID,
|
||||
|
@ -494,7 +493,7 @@ func (t *ChangeRedundancy) noneToEC(ctx *changeRedundancyContext, obj jcsypes.Ob
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.LRCRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.LRCRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
if len(obj.Blocks) == 0 {
|
||||
return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to ec")
|
||||
}
|
||||
|
@ -522,12 +521,12 @@ func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
return nil, nil, fmt.Errorf("executing io plan: %w", err)
|
||||
}
|
||||
|
||||
var blocks []jcsypes.ObjectBlock
|
||||
var blocks []jcstypes.ObjectBlock
|
||||
var evtTargetBlocks []datamap.Block
|
||||
var evtBlockTrans []datamap.DataTransfer
|
||||
for i := 0; i < red.N; i++ {
|
||||
r := ioRet.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue)
|
||||
blocks = append(blocks, jcsypes.ObjectBlock{
|
||||
blocks = append(blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: i,
|
||||
UserSpaceID: uploadStgs[i].UserSpace.UserSpace.UserSpaceID,
|
||||
|
@ -576,7 +575,7 @@ func (t *ChangeRedundancy) noneToLRC(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.SegmentRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.SegmentRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
if len(obj.Blocks) == 0 {
|
||||
return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to rep")
|
||||
}
|
||||
|
@ -587,7 +586,7 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
}
|
||||
|
||||
// 如果选择的备份节点都是同一个,那么就只要上传一次
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
|
||||
ft := ioswitch2.NewFromTo()
|
||||
ft.SegmentParam = red
|
||||
|
@ -610,12 +609,12 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
return nil, nil, fmt.Errorf("executing io plan: %w", err)
|
||||
}
|
||||
|
||||
var blocks []jcsypes.ObjectBlock
|
||||
var blocks []jcstypes.ObjectBlock
|
||||
var evtTargetBlocks []datamap.Block
|
||||
var evtBlockTrans []datamap.DataTransfer
|
||||
for i, stg := range uploadStgs {
|
||||
r := ret.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue)
|
||||
blocks = append(blocks, jcsypes.ObjectBlock{
|
||||
blocks = append(blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: i,
|
||||
UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID,
|
||||
|
@ -664,7 +663,7 @@ func (t *ChangeRedundancy) noneToSeg(ctx *changeRedundancyContext, obj jcsypes.O
|
|||
nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
if len(obj.Blocks) == 0 {
|
||||
return nil, nil, fmt.Errorf("object is not cached on any userspaces, cannot change its redundancy to rep")
|
||||
}
|
||||
|
@ -675,7 +674,7 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.Ob
|
|||
}
|
||||
|
||||
// 如果选择的备份节点都是同一个,那么就只要上传一次
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
|
||||
ft := ioswitch2.NewFromTo()
|
||||
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.UserSpace, ioswitch2.RawStream()))
|
||||
|
@ -697,11 +696,11 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.Ob
|
|||
return nil, nil, fmt.Errorf("executing io plan: %w", err)
|
||||
}
|
||||
|
||||
var blocks []jcsypes.ObjectBlock
|
||||
var blocks []jcstypes.ObjectBlock
|
||||
var blockChgs []datamap.BlockChange
|
||||
for i, stg := range uploadStgs {
|
||||
r := ret.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue)
|
||||
blocks = append(blocks, jcsypes.ObjectBlock{
|
||||
blocks = append(blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: 0,
|
||||
UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID,
|
||||
|
@ -737,14 +736,14 @@ func (t *ChangeRedundancy) repToRep(ctx *changeRedundancyContext, obj jcsypes.Ob
|
|||
nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) repToEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, red *jcsypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) repToEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, red *jcstypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
return t.noneToEC(ctx, obj, red, uploadUserSpaces)
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, srcRed *jcsypes.ECRedundancy, tarRed *jcsypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
var chosenBlocks []jcsypes.GrouppedObjectBlock
|
||||
func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, srcRed *jcstypes.ECRedundancy, tarRed *jcstypes.RepRedundancy, uploadStgs []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
var chosenBlocks []jcstypes.GrouppedObjectBlock
|
||||
var chosenBlockIndexes []int
|
||||
var chosenBlockStg []jcsypes.UserSpaceDetail
|
||||
var chosenBlockStg []jcstypes.UserSpaceDetail
|
||||
for _, block := range obj.GroupBlocks() {
|
||||
if len(block.UserSpaceIDs) > 0 {
|
||||
// TODO 考虑选择最优的节点
|
||||
|
@ -768,7 +767,7 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.Obj
|
|||
}
|
||||
|
||||
// 如果选择的备份节点都是同一个,那么就只要上传一次
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcsypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
uploadStgs = lo.UniqBy(uploadStgs, func(item *userSpaceUsageInfo) jcstypes.UserSpaceID { return item.UserSpace.UserSpace.UserSpaceID })
|
||||
|
||||
planBlder := exec.NewPlanBuilder()
|
||||
ft := ioswitch2.NewFromTo()
|
||||
|
@ -795,11 +794,11 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.Obj
|
|||
return nil, nil, fmt.Errorf("executing io plan: %w", err)
|
||||
}
|
||||
|
||||
var blocks []jcsypes.ObjectBlock
|
||||
var blocks []jcstypes.ObjectBlock
|
||||
|
||||
for i := range uploadStgs {
|
||||
r := ioRet.Get(fmt.Sprintf("%d", i)).(*ops2.FileInfoValue)
|
||||
blocks = append(blocks, jcsypes.ObjectBlock{
|
||||
blocks = append(blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: 0,
|
||||
UserSpaceID: uploadStgs[i].UserSpace.UserSpace.UserSpaceID,
|
||||
|
@ -866,11 +865,11 @@ func (t *ChangeRedundancy) ecToRep(ctx *changeRedundancyContext, obj jcsypes.Obj
|
|||
nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, srcRed *jcsypes.ECRedundancy, tarRed *jcsypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, srcRed *jcstypes.ECRedundancy, tarRed *jcstypes.ECRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
grpBlocks := obj.GroupBlocks()
|
||||
|
||||
var chosenBlocks []jcsypes.GrouppedObjectBlock
|
||||
var chosenBlockStg []jcsypes.UserSpaceDetail
|
||||
var chosenBlocks []jcstypes.GrouppedObjectBlock
|
||||
var chosenBlockStg []jcstypes.UserSpaceDetail
|
||||
for _, block := range grpBlocks {
|
||||
if len(block.UserSpaceIDs) > 0 {
|
||||
stg, ok := ctx.allUserSpaces[block.UserSpaceIDs[0]]
|
||||
|
@ -909,16 +908,16 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.Obje
|
|||
})
|
||||
}
|
||||
|
||||
var newBlocks []jcsypes.ObjectBlock
|
||||
var newBlocks []jcstypes.ObjectBlock
|
||||
shouldUpdateBlocks := false
|
||||
for i, stg := range uploadUserSpaces {
|
||||
newBlock := jcsypes.ObjectBlock{
|
||||
newBlock := jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: i,
|
||||
UserSpaceID: stg.UserSpace.UserSpace.UserSpaceID,
|
||||
}
|
||||
|
||||
grp, ok := lo.Find(grpBlocks, func(grp jcsypes.GrouppedObjectBlock) bool { return grp.Index == i })
|
||||
grp, ok := lo.Find(grpBlocks, func(grp jcstypes.GrouppedObjectBlock) bool { return grp.Index == i })
|
||||
|
||||
// 如果新选中的节点已经记录在Block表中,那么就不需要任何变更
|
||||
if ok && lo.Contains(grp.UserSpaceIDs, stg.UserSpace.UserSpace.UserSpaceID) {
|
||||
|
@ -988,7 +987,7 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.Obje
|
|||
|
||||
var blockChgs []datamap.BlockChange
|
||||
for _, block := range obj.Blocks {
|
||||
keep := lo.ContainsBy(newBlocks, func(newBlock jcsypes.ObjectBlock) bool {
|
||||
keep := lo.ContainsBy(newBlocks, func(newBlock jcstypes.ObjectBlock) bool {
|
||||
return newBlock.Index == block.Index && newBlock.UserSpaceID == block.UserSpaceID
|
||||
})
|
||||
if !keep {
|
||||
|
@ -1019,7 +1018,7 @@ func (t *ChangeRedundancy) ecToEC(ctx *changeRedundancyContext, obj jcsypes.Obje
|
|||
nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) lrcToLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, srcRed *jcsypes.LRCRedundancy, tarRed *jcsypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) lrcToLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, srcRed *jcstypes.LRCRedundancy, tarRed *jcstypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
|
||||
blocksGrpByIndex := obj.GroupBlocks()
|
||||
|
||||
|
@ -1062,8 +1061,8 @@ func (t *ChangeRedundancy) lrcToLRC(ctx *changeRedundancyContext, obj jcsypes.Ob
|
|||
/*
|
||||
TODO2 修复这一块的代码
|
||||
|
||||
func (t *ChangeRedundancy) groupReconstructLRC(obj jcsypes.ObjectDetail, lostBlocks []int, lostBlockGrps []int, grpedBlocks []jcsypes.GrouppedObjectBlock, red *jcsypes.LRCRedundancy, uploadUserSpaces []*UserSpaceLoadInfo) (*db.UpdatingObjectRedundancy, error) {
|
||||
grped := make(map[int]jcsypes.GrouppedObjectBlock)
|
||||
func (t *ChangeRedundancy) groupReconstructLRC(obj jcstypes.ObjectDetail, lostBlocks []int, lostBlockGrps []int, grpedBlocks []jcstypes.GrouppedObjectBlock, red *jcstypes.LRCRedundancy, uploadUserSpaces []*UserSpaceLoadInfo) (*db.UpdatingObjectRedundancy, error) {
|
||||
grped := make(map[int]jcstypes.GrouppedObjectBlock)
|
||||
for _, b := range grpedBlocks {
|
||||
grped[b.Index] = b
|
||||
}
|
||||
|
@ -1098,9 +1097,9 @@ TODO2 修复这一块的代码
|
|||
return nil, fmt.Errorf("executing io plan: %w", err)
|
||||
}
|
||||
|
||||
var newBlocks []jcsypes.ObjectBlock
|
||||
var newBlocks []jcstypes.ObjectBlock
|
||||
for _, i := range lostBlocks {
|
||||
newBlocks = append(newBlocks, jcsypes.ObjectBlock{
|
||||
newBlocks = append(newBlocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: i,
|
||||
UserSpaceID: uploadUserSpaces[i].UserSpace.UserSpace.UserSpaceID,
|
||||
|
@ -1109,7 +1108,7 @@ TODO2 修复这一块的代码
|
|||
}
|
||||
for _, b := range grpedBlocks {
|
||||
for _, hubID := range b.UserSpaceIDs {
|
||||
newBlocks = append(newBlocks, jcsypes.ObjectBlock{
|
||||
newBlocks = append(newBlocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: b.Index,
|
||||
UserSpaceID: hubID,
|
||||
|
@ -1125,9 +1124,9 @@ TODO2 修复这一块的代码
|
|||
}, nil
|
||||
}
|
||||
*/
|
||||
func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj jcsypes.ObjectDetail, grpBlocks []jcsypes.GrouppedObjectBlock, red *jcsypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
var chosenBlocks []jcsypes.GrouppedObjectBlock
|
||||
var chosenBlockStg []jcsypes.UserSpaceDetail
|
||||
func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj jcstypes.ObjectDetail, grpBlocks []jcstypes.GrouppedObjectBlock, red *jcstypes.LRCRedundancy, uploadUserSpaces []*userSpaceUsageInfo) (*db.UpdatingObjectRedundancy, datamap.SysEventBody, error) {
|
||||
var chosenBlocks []jcstypes.GrouppedObjectBlock
|
||||
var chosenBlockStg []jcstypes.UserSpaceDetail
|
||||
|
||||
for _, block := range grpBlocks {
|
||||
if len(block.UserSpaceIDs) > 0 && block.Index < red.M() {
|
||||
|
@ -1154,16 +1153,16 @@ func (t *ChangeRedundancy) reconstructLRC(ctx *changeRedundancyContext, obj jcsy
|
|||
|
||||
var froms []ioswitchlrc.From
|
||||
var toes []ioswitchlrc.To
|
||||
var newBlocks []jcsypes.ObjectBlock
|
||||
var newBlocks []jcstypes.ObjectBlock
|
||||
shouldUpdateBlocks := false
|
||||
for i, userspace := range uploadUserSpaces {
|
||||
newBlock := jcsypes.ObjectBlock{
|
||||
newBlock := jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: i,
|
||||
UserSpaceID: userspace.UserSpace.UserSpace.UserSpaceID,
|
||||
}
|
||||
|
||||
grp, ok := lo.Find(grpBlocks, func(grp jcsypes.GrouppedObjectBlock) bool { return grp.Index == i })
|
||||
grp, ok := lo.Find(grpBlocks, func(grp jcstypes.GrouppedObjectBlock) bool { return grp.Index == i })
|
||||
|
||||
// 如果新选中的节点已经记录在Block表中,那么就不需要任何变更
|
||||
if ok && lo.Contains(grp.UserSpaceIDs, userspace.UserSpace.UserSpace.UserSpaceID) {
|
||||
|
|
|
@ -21,14 +21,14 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types/datamap"
|
||||
)
|
||||
|
||||
func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, pkg jcsypes.PackageDetail, objs []jcsypes.ObjectDetail, reen *publock.Reentrant) ([]db.UpdatingObjectRedundancy, []datamap.SysEventBody, error) {
|
||||
func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext, pkg jcstypes.PackageDetail, objs []jcstypes.ObjectDetail, reen *publock.Reentrant) ([]db.UpdatingObjectRedundancy, []datamap.SysEventBody, error) {
|
||||
log := logger.WithType[ChangeRedundancy]("TickTock")
|
||||
|
||||
var readerStgIDs []jcsypes.UserSpaceID
|
||||
var readerStgIDs []jcstypes.UserSpaceID
|
||||
for _, space := range execCtx.allUserSpaces {
|
||||
// TODO 可以考虑做成配置
|
||||
if space.AccessAmount >= float64(pkg.ObjectCount/2) {
|
||||
|
@ -37,17 +37,17 @@ func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext,
|
|||
}
|
||||
|
||||
// 只对ec和rep对象进行处理
|
||||
var ecObjects []jcsypes.ObjectDetail
|
||||
var repObjects []jcsypes.ObjectDetail
|
||||
var ecObjects []jcstypes.ObjectDetail
|
||||
var repObjects []jcstypes.ObjectDetail
|
||||
for _, obj := range objs {
|
||||
if _, ok := obj.Object.Redundancy.(*jcsypes.ECRedundancy); ok {
|
||||
if _, ok := obj.Object.Redundancy.(*jcstypes.ECRedundancy); ok {
|
||||
ecObjects = append(ecObjects, obj)
|
||||
} else if _, ok := obj.Object.Redundancy.(*jcsypes.RepRedundancy); ok {
|
||||
} else if _, ok := obj.Object.Redundancy.(*jcstypes.RepRedundancy); ok {
|
||||
repObjects = append(repObjects, obj)
|
||||
}
|
||||
}
|
||||
|
||||
planningStgIDs := make(map[jcsypes.UserSpaceID]bool)
|
||||
planningStgIDs := make(map[jcstypes.UserSpaceID]bool)
|
||||
|
||||
var sysEvents []datamap.SysEventBody
|
||||
|
||||
|
@ -80,7 +80,7 @@ func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext,
|
|||
// 对于ec对象,则每个对象单独进行退火算法
|
||||
var ecObjectsUpdating []db.UpdatingObjectRedundancy
|
||||
for i, obj := range ecObjects {
|
||||
ecRed := obj.Object.Redundancy.(*jcsypes.ECRedundancy)
|
||||
ecRed := obj.Object.Redundancy.(*jcstypes.ECRedundancy)
|
||||
solu := t.startAnnealing(execCtx, readerStgIDs, annealingObject{
|
||||
totalBlockCount: ecRed.N,
|
||||
minBlockCnt: ecRed.K,
|
||||
|
@ -105,15 +105,15 @@ func (t *ChangeRedundancy) doRedundancyShrink(execCtx *changeRedundancyContext,
|
|||
return append(repObjectsUpdating, ecObjectsUpdating...), sysEvents, nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) summaryRepObjectBlockNodes(objs []jcsypes.ObjectDetail) []jcsypes.UserSpaceID {
|
||||
func (t *ChangeRedundancy) summaryRepObjectBlockNodes(objs []jcstypes.ObjectDetail) []jcstypes.UserSpaceID {
|
||||
type stgBlocks struct {
|
||||
UserSpaceID jcsypes.UserSpaceID
|
||||
UserSpaceID jcstypes.UserSpaceID
|
||||
Count int
|
||||
}
|
||||
|
||||
stgBlocksMap := make(map[jcsypes.UserSpaceID]*stgBlocks)
|
||||
stgBlocksMap := make(map[jcstypes.UserSpaceID]*stgBlocks)
|
||||
for _, obj := range objs {
|
||||
cacheBlockStgs := make(map[jcsypes.UserSpaceID]bool)
|
||||
cacheBlockStgs := make(map[jcstypes.UserSpaceID]bool)
|
||||
for _, block := range obj.Blocks {
|
||||
if _, ok := stgBlocksMap[block.UserSpaceID]; !ok {
|
||||
stgBlocksMap[block.UserSpaceID] = &stgBlocks{
|
||||
|
@ -153,17 +153,17 @@ func (t *ChangeRedundancy) summaryRepObjectBlockNodes(objs []jcsypes.ObjectDetai
|
|||
}
|
||||
}
|
||||
|
||||
return lo.Map(stgs, func(item *stgBlocks, idx int) jcsypes.UserSpaceID { return item.UserSpaceID })
|
||||
return lo.Map(stgs, func(item *stgBlocks, idx int) jcstypes.UserSpaceID { return item.UserSpaceID })
|
||||
}
|
||||
|
||||
type annealingState struct {
|
||||
ctx *changeRedundancyContext
|
||||
readerStgIDs []jcsypes.UserSpaceID // 近期可能访问此对象的节点
|
||||
stgsSortedByReader map[jcsypes.UserSpaceID][]stgDist // 拥有数据的节点到每个可能访问对象的节点按距离排序
|
||||
object annealingObject // 进行退火的对象
|
||||
blockList []objectBlock // 排序后的块分布情况
|
||||
stgBlockBitmaps map[jcsypes.UserSpaceID]*bitmap.Bitmap64 // 用位图的形式表示每一个节点上有哪些块
|
||||
stgCombTree combinatorialTree // 节点组合树,用于加速计算容灾度
|
||||
readerStgIDs []jcstypes.UserSpaceID // 近期可能访问此对象的节点
|
||||
stgsSortedByReader map[jcstypes.UserSpaceID][]stgDist // 拥有数据的节点到每个可能访问对象的节点按距离排序
|
||||
object annealingObject // 进行退火的对象
|
||||
blockList []objectBlock // 排序后的块分布情况
|
||||
stgBlockBitmaps map[jcstypes.UserSpaceID]*bitmap.Bitmap64 // 用位图的形式表示每一个节点上有哪些块
|
||||
stgCombTree combinatorialTree // 节点组合树,用于加速计算容灾度
|
||||
|
||||
maxScore float64 // 搜索过程中得到过的最大分数
|
||||
maxScoreRmBlocks []bool // 最大分数对应的删除方案
|
||||
|
@ -178,30 +178,30 @@ type annealingState struct {
|
|||
|
||||
type objectBlock struct {
|
||||
Index int
|
||||
UserSpaceID jcsypes.UserSpaceID
|
||||
HasEntity bool // 节点拥有实际的文件数据块
|
||||
HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块
|
||||
FileHash jcsypes.FileHash // 只有在拥有实际文件数据块时,这个字段才有值
|
||||
Size int64 // 块大小
|
||||
UserSpaceID jcstypes.UserSpaceID
|
||||
HasEntity bool // 节点拥有实际的文件数据块
|
||||
HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块
|
||||
FileHash jcstypes.FileHash // 只有在拥有实际文件数据块时,这个字段才有值
|
||||
Size int64 // 块大小
|
||||
}
|
||||
|
||||
type stgDist struct {
|
||||
UserSpaceID jcsypes.UserSpaceID
|
||||
UserSpaceID jcstypes.UserSpaceID
|
||||
Distance float64
|
||||
}
|
||||
|
||||
type combinatorialTree struct {
|
||||
nodes []combinatorialTreeNode
|
||||
blocksMaps map[int]bitmap.Bitmap64
|
||||
stgIDToLocalStgID map[jcsypes.UserSpaceID]int
|
||||
localStgIDToStgID []jcsypes.UserSpaceID
|
||||
stgIDToLocalStgID map[jcstypes.UserSpaceID]int
|
||||
localStgIDToStgID []jcstypes.UserSpaceID
|
||||
}
|
||||
|
||||
type annealingObject struct {
|
||||
totalBlockCount int
|
||||
minBlockCnt int
|
||||
pinnedAt []jcsypes.UserSpaceID
|
||||
blocks []jcsypes.ObjectBlock
|
||||
pinnedAt []jcstypes.UserSpaceID
|
||||
blocks []jcstypes.ObjectBlock
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -210,10 +210,10 @@ const (
|
|||
iterActionBreak = 2
|
||||
)
|
||||
|
||||
func newCombinatorialTree(stgBlocksMaps map[jcsypes.UserSpaceID]*bitmap.Bitmap64) combinatorialTree {
|
||||
func newCombinatorialTree(stgBlocksMaps map[jcstypes.UserSpaceID]*bitmap.Bitmap64) combinatorialTree {
|
||||
tree := combinatorialTree{
|
||||
blocksMaps: make(map[int]bitmap.Bitmap64),
|
||||
stgIDToLocalStgID: make(map[jcsypes.UserSpaceID]int),
|
||||
stgIDToLocalStgID: make(map[jcstypes.UserSpaceID]int),
|
||||
}
|
||||
|
||||
tree.nodes = make([]combinatorialTreeNode, (1 << len(stgBlocksMaps)))
|
||||
|
@ -271,7 +271,7 @@ func (t *combinatorialTree) GetDepth(index int) int {
|
|||
|
||||
// 更新某一个算力中心节点的块分布位图,同时更新它对应组合树节点的所有子节点。
|
||||
// 如果更新到某个节点时,已有K个块,那么就不会再更新它的子节点
|
||||
func (t *combinatorialTree) UpdateBitmap(stgID jcsypes.UserSpaceID, mp bitmap.Bitmap64, k int) {
|
||||
func (t *combinatorialTree) UpdateBitmap(stgID jcstypes.UserSpaceID, mp bitmap.Bitmap64, k int) {
|
||||
t.blocksMaps[t.stgIDToLocalStgID[stgID]] = mp
|
||||
// 首先定义两种遍历树节点时的移动方式:
|
||||
// 1. 竖直移动(深度增加):从一个节点移动到它最左边的子节点。每移动一步,index+1
|
||||
|
@ -416,13 +416,13 @@ type annealingSolution struct {
|
|||
minAccessCost float64 // 本方案的最小访问费用
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) startAnnealing(ctx *changeRedundancyContext, readerStgIDs []jcsypes.UserSpaceID, object annealingObject) annealingSolution {
|
||||
func (t *ChangeRedundancy) startAnnealing(ctx *changeRedundancyContext, readerStgIDs []jcstypes.UserSpaceID, object annealingObject) annealingSolution {
|
||||
state := &annealingState{
|
||||
ctx: ctx,
|
||||
readerStgIDs: readerStgIDs,
|
||||
stgsSortedByReader: make(map[jcsypes.UserSpaceID][]stgDist),
|
||||
stgsSortedByReader: make(map[jcstypes.UserSpaceID][]stgDist),
|
||||
object: object,
|
||||
stgBlockBitmaps: make(map[jcsypes.UserSpaceID]*bitmap.Bitmap64),
|
||||
stgBlockBitmaps: make(map[jcstypes.UserSpaceID]*bitmap.Bitmap64),
|
||||
}
|
||||
|
||||
t.initBlockList(state)
|
||||
|
@ -486,7 +486,7 @@ func (t *ChangeRedundancy) startAnnealing(ctx *changeRedundancyContext, readerSt
|
|||
}
|
||||
|
||||
func (t *ChangeRedundancy) initBlockList(ctx *annealingState) {
|
||||
blocksMap := make(map[jcsypes.UserSpaceID][]objectBlock)
|
||||
blocksMap := make(map[jcstypes.UserSpaceID][]objectBlock)
|
||||
|
||||
// 先生成所有的影子块
|
||||
for _, pinned := range ctx.object.pinnedAt {
|
||||
|
@ -680,7 +680,7 @@ func (t *ChangeRedundancy) alwaysAccept(curTemp float64, dScore float64, cooling
|
|||
return v > rand.Float64()
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcsypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcsypes.UserSpaceID]bool) db.UpdatingObjectRedundancy {
|
||||
func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcstypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcstypes.UserSpaceID]bool) db.UpdatingObjectRedundancy {
|
||||
entry := db.UpdatingObjectRedundancy{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
FileHash: obj.Object.FileHash,
|
||||
|
@ -694,8 +694,8 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s
|
|||
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg, ioswitch2.RawStream()))
|
||||
|
||||
for i, f := range solu.rmBlocks {
|
||||
hasCache := lo.ContainsBy(obj.Blocks, func(b jcsypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) ||
|
||||
lo.ContainsBy(obj.PinnedAt, func(n jcsypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID })
|
||||
hasCache := lo.ContainsBy(obj.Blocks, func(b jcstypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) ||
|
||||
lo.ContainsBy(obj.PinnedAt, func(n jcstypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID })
|
||||
willRm := f
|
||||
|
||||
if !willRm {
|
||||
|
@ -706,7 +706,7 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s
|
|||
|
||||
planningHubIDs[solu.blockList[i].UserSpaceID] = true
|
||||
}
|
||||
entry.Blocks = append(entry.Blocks, jcsypes.ObjectBlock{
|
||||
entry.Blocks = append(entry.Blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: solu.blockList[i].Index,
|
||||
UserSpaceID: solu.blockList[i].UserSpaceID,
|
||||
|
@ -724,12 +724,12 @@ func (t *ChangeRedundancy) makePlansForRepObject(ctx *changeRedundancyContext, s
|
|||
return entry
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) generateSysEventForRepObject(solu annealingSolution, obj jcsypes.ObjectDetail) []datamap.SysEventBody {
|
||||
func (t *ChangeRedundancy) generateSysEventForRepObject(solu annealingSolution, obj jcstypes.ObjectDetail) []datamap.SysEventBody {
|
||||
var blockChgs []datamap.BlockChange
|
||||
|
||||
for i, f := range solu.rmBlocks {
|
||||
hasCache := lo.ContainsBy(obj.Blocks, func(b jcsypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) ||
|
||||
lo.ContainsBy(obj.PinnedAt, func(n jcsypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID })
|
||||
hasCache := lo.ContainsBy(obj.Blocks, func(b jcstypes.ObjectBlock) bool { return b.UserSpaceID == solu.blockList[i].UserSpaceID }) ||
|
||||
lo.ContainsBy(obj.PinnedAt, func(n jcstypes.UserSpaceID) bool { return n == solu.blockList[i].UserSpaceID })
|
||||
willRm := f
|
||||
|
||||
if !willRm {
|
||||
|
@ -782,7 +782,7 @@ func (t *ChangeRedundancy) generateSysEventForRepObject(solu annealingSolution,
|
|||
return []datamap.SysEventBody{transEvt, distEvt}
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcsypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcsypes.UserSpaceID]bool) db.UpdatingObjectRedundancy {
|
||||
func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, solu annealingSolution, obj jcstypes.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[jcstypes.UserSpaceID]bool) db.UpdatingObjectRedundancy {
|
||||
entry := db.UpdatingObjectRedundancy{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
FileHash: obj.Object.FileHash,
|
||||
|
@ -790,11 +790,11 @@ func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, so
|
|||
Redundancy: obj.Object.Redundancy,
|
||||
}
|
||||
|
||||
reconstrct := make(map[jcsypes.UserSpaceID]*[]int)
|
||||
reconstrct := make(map[jcstypes.UserSpaceID]*[]int)
|
||||
for i, f := range solu.rmBlocks {
|
||||
block := solu.blockList[i]
|
||||
if !f {
|
||||
entry.Blocks = append(entry.Blocks, jcsypes.ObjectBlock{
|
||||
entry.Blocks = append(entry.Blocks, jcstypes.ObjectBlock{
|
||||
ObjectID: obj.Object.ObjectID,
|
||||
Index: block.Index,
|
||||
UserSpaceID: block.UserSpaceID,
|
||||
|
@ -815,7 +815,7 @@ func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, so
|
|||
}
|
||||
}
|
||||
|
||||
ecRed := obj.Object.Redundancy.(*jcsypes.ECRedundancy)
|
||||
ecRed := obj.Object.Redundancy.(*jcstypes.ECRedundancy)
|
||||
|
||||
for id, idxs := range reconstrct {
|
||||
// 依次生成每个节点上的执行计划,因为如果放到一个计划里一起生成,不能保证每个节点上的块用的都是本节点上的副本
|
||||
|
@ -838,10 +838,10 @@ func (t *ChangeRedundancy) makePlansForECObject(ctx *changeRedundancyContext, so
|
|||
return entry
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, obj jcsypes.ObjectDetail) []datamap.SysEventBody {
|
||||
func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, obj jcstypes.ObjectDetail) []datamap.SysEventBody {
|
||||
var blockChgs []datamap.BlockChange
|
||||
|
||||
reconstrct := make(map[jcsypes.UserSpaceID]*[]int)
|
||||
reconstrct := make(map[jcstypes.UserSpaceID]*[]int)
|
||||
for i, f := range solu.rmBlocks {
|
||||
block := solu.blockList[i]
|
||||
if !f {
|
||||
|
@ -917,7 +917,7 @@ func (t *ChangeRedundancy) generateSysEventForECObject(solu annealingSolution, o
|
|||
return []datamap.SysEventBody{transEvt, distEvt}
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *exec.PlanBuilder, planningSpaceIDs map[jcsypes.UserSpaceID]bool, reen *publock.Reentrant) (exec.PlanResult, error) {
|
||||
func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *exec.PlanBuilder, planningSpaceIDs map[jcstypes.UserSpaceID]bool, reen *publock.Reentrant) (exec.PlanResult, error) {
|
||||
reqBlder := reqbuilder.NewBuilder()
|
||||
for id, _ := range planningSpaceIDs {
|
||||
reqBlder.UserSpace().Buzy(id)
|
||||
|
@ -955,7 +955,7 @@ func (t *ChangeRedundancy) executePlans(ctx *changeRedundancyContext, planBld *e
|
|||
return ioSwRets, nil
|
||||
}
|
||||
|
||||
func (t *ChangeRedundancy) populateECObjectEntry(entry *db.UpdatingObjectRedundancy, obj jcsypes.ObjectDetail, ioRets exec.PlanResult) {
|
||||
func (t *ChangeRedundancy) populateECObjectEntry(entry *db.UpdatingObjectRedundancy, obj jcstypes.ObjectDetail, ioRets exec.PlanResult) {
|
||||
for i := range entry.Blocks {
|
||||
if entry.Blocks[i].FileHash != "" {
|
||||
continue
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type Uploader struct {
|
||||
|
@ -43,7 +43,7 @@ func NewUploader(pubLock *publock.Service, connectivity *connectivity.Collector,
|
|||
}
|
||||
}
|
||||
|
||||
func (u *Uploader) BeginUpdate(pkgID jcsypes.PackageID, affinity jcsypes.UserSpaceID, copyTo []jcsypes.UserSpaceID, copyToPath []jcsypes.JPath) (*UpdateUploader, error) {
|
||||
func (u *Uploader) BeginUpdate(pkgID jcstypes.PackageID, affinity jcstypes.UserSpaceID, copyTo []jcstypes.UserSpaceID, copyToPath []jcstypes.JPath) (*UpdateUploader, error) {
|
||||
spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting user space ids: %w", err)
|
||||
|
@ -84,9 +84,9 @@ func (u *Uploader) BeginUpdate(pkgID jcsypes.PackageID, affinity jcsypes.UserSpa
|
|||
return nil, fmt.Errorf("user no available userspaces")
|
||||
}
|
||||
|
||||
copyToSpaces := make([]jcsypes.UserSpaceDetail, len(copyTo))
|
||||
copyToSpaces := make([]jcstypes.UserSpaceDetail, len(copyTo))
|
||||
for i, spaceID := range copyTo {
|
||||
space, ok := lo.Find(spaceDetails, func(space *jcsypes.UserSpaceDetail) bool {
|
||||
space, ok := lo.Find(spaceDetails, func(space *jcstypes.UserSpaceDetail) bool {
|
||||
return space.UserSpace.UserSpaceID == spaceID
|
||||
})
|
||||
if !ok {
|
||||
|
@ -118,7 +118,7 @@ func (u *Uploader) BeginUpdate(pkgID jcsypes.PackageID, affinity jcsypes.UserSpa
|
|||
// 1. 选择设置了亲和性的节点
|
||||
// 2. 从与当前客户端相同地域的节点中随机选一个
|
||||
// 3. 没有的话从所有节点选择延迟最低的节点
|
||||
func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity jcsypes.UserSpaceID) UploadSpaceInfo {
|
||||
func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity jcstypes.UserSpaceID) UploadSpaceInfo {
|
||||
if spaceAffinity > 0 {
|
||||
aff, ok := lo.Find(spaces, func(space UploadSpaceInfo) bool { return space.Space.UserSpace.UserSpaceID == spaceAffinity })
|
||||
if ok {
|
||||
|
@ -137,10 +137,10 @@ func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity j
|
|||
return spaces[0]
|
||||
}
|
||||
|
||||
func (u *Uploader) BeginCreateUpload(bktID jcsypes.BucketID, pkgName string, copyTo []jcsypes.UserSpaceID, copyToPath []jcsypes.JPath) (*CreateUploader, error) {
|
||||
func (u *Uploader) BeginCreateUpload(bktID jcstypes.BucketID, pkgName string, copyTo []jcstypes.UserSpaceID, copyToPath []jcstypes.JPath) (*CreateUploader, error) {
|
||||
getSpaces := u.spaceMeta.GetMany(copyTo)
|
||||
|
||||
spacesStgs := make([]jcsypes.UserSpaceDetail, len(copyTo))
|
||||
spacesStgs := make([]jcstypes.UserSpaceDetail, len(copyTo))
|
||||
for i, stg := range getSpaces {
|
||||
if stg == nil {
|
||||
return nil, fmt.Errorf("storage %v not found", copyTo[i])
|
||||
|
@ -148,10 +148,10 @@ func (u *Uploader) BeginCreateUpload(bktID jcsypes.BucketID, pkgName string, cop
|
|||
spacesStgs[i] = *stg
|
||||
}
|
||||
|
||||
pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcsypes.Package, error) {
|
||||
pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcstypes.Package, error) {
|
||||
_, err := u.db.Bucket().GetByID(tx, bktID)
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, err
|
||||
return jcstypes.Package{}, err
|
||||
}
|
||||
|
||||
return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName, time.Now())
|
||||
|
@ -178,19 +178,19 @@ func (u *Uploader) BeginCreateUpload(bktID jcsypes.BucketID, pkgName string, cop
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (u *Uploader) UploadPart(objID jcsypes.ObjectID, index int, stream io.Reader) error {
|
||||
func (u *Uploader) UploadPart(objID jcstypes.ObjectID, index int, stream io.Reader) error {
|
||||
detail, err := u.db.Object().GetDetail(u.db.DefCtx(), objID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting object detail: %w", err)
|
||||
}
|
||||
|
||||
objDe := detail
|
||||
_, ok := objDe.Object.Redundancy.(*jcsypes.MultipartUploadRedundancy)
|
||||
_, ok := objDe.Object.Redundancy.(*jcstypes.MultipartUploadRedundancy)
|
||||
if !ok {
|
||||
return fmt.Errorf("object %v is not a multipart upload", objID)
|
||||
}
|
||||
|
||||
var space jcsypes.UserSpaceDetail
|
||||
var space jcstypes.UserSpaceDetail
|
||||
if len(objDe.Blocks) > 0 {
|
||||
cstg := u.spaceMeta.Get(objDe.Blocks[0].UserSpaceID)
|
||||
if cstg == nil {
|
||||
|
@ -272,7 +272,7 @@ func (u *Uploader) UploadPart(objID jcsypes.ObjectID, index int, stream io.Reade
|
|||
|
||||
shardInfo := ret.Get("shard").(*ops2.FileInfoValue)
|
||||
err = u.db.DoTx(func(tx db.SQLContext) error {
|
||||
return u.db.Object().AppendPart(tx, jcsypes.ObjectBlock{
|
||||
return u.db.Object().AppendPart(tx, jcstypes.ObjectBlock{
|
||||
ObjectID: objID,
|
||||
Index: index,
|
||||
UserSpaceID: space.UserSpace.UserSpaceID,
|
||||
|
|
|
@ -17,20 +17,19 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder"
|
||||
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcsypes.JPath, targetBktID jcsypes.BucketID, newPkgName string, uploadAffinity jcsypes.UserSpaceID) (*jcsypes.Package, error) {
|
||||
func (u *Uploader) UserSpaceUpload(userSpaceID jcstypes.UserSpaceID, rootPath jcstypes.JPath, targetBktID jcstypes.BucketID, newPkgName string, uploadAffinity jcstypes.UserSpaceID) (*jcstypes.Package, error) {
|
||||
srcSpace := u.spaceMeta.Get(userSpaceID)
|
||||
if srcSpace == nil {
|
||||
return nil, fmt.Errorf("user space %d not found", userSpaceID)
|
||||
}
|
||||
|
||||
pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcsypes.Package, error) {
|
||||
pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (jcstypes.Package, error) {
|
||||
_, err := u.db.Bucket().GetByID(tx, targetBktID)
|
||||
if err != nil {
|
||||
return jcsypes.Package{}, err
|
||||
return jcstypes.Package{}, err
|
||||
}
|
||||
|
||||
return u.db.Package().Create(tx, targetBktID, newPkgName, time.Now())
|
||||
|
@ -49,7 +48,7 @@ func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcs
|
|||
}
|
||||
|
||||
spaceDetails := u.spaceMeta.GetMany(spaceIDs)
|
||||
spaceDetails = lo.Filter(spaceDetails, func(e *jcsypes.UserSpaceDetail, i int) bool {
|
||||
spaceDetails = lo.Filter(spaceDetails, func(e *jcstypes.UserSpaceDetail, i int) bool {
|
||||
return e != nil && e.UserSpace.ShardStore != nil
|
||||
})
|
||||
|
||||
|
@ -59,13 +58,13 @@ func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcs
|
|||
coorCli := stgglb.CoordinatorRPCPool.Get()
|
||||
defer coorCli.Release()
|
||||
|
||||
resp, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]cortypes.HubID{srcSpace.RecommendHub.HubID}))
|
||||
resp, cerr := coorCli.GetHubConnectivities(context.Background(), corrpc.ReqGetHubConnectivities([]jcstypes.HubID{srcSpace.RecommendHub.HubID}))
|
||||
if cerr != nil {
|
||||
delPkg()
|
||||
return nil, fmt.Errorf("getting hub connectivities: %w", cerr.ToError())
|
||||
}
|
||||
|
||||
cons := make(map[cortypes.HubID]cortypes.HubConnectivity)
|
||||
cons := make(map[jcstypes.HubID]jcstypes.HubConnectivity)
|
||||
for _, c := range resp.Connectivities {
|
||||
cons[c.ToHubID] = c
|
||||
}
|
||||
|
@ -153,7 +152,7 @@ func (u *Uploader) UserSpaceUpload(userSpaceID jcsypes.UserSpaceID, rootPath jcs
|
|||
return &pkg, nil
|
||||
}
|
||||
|
||||
func (u *Uploader) uploadFromBaseStore(srcSpace *jcsypes.UserSpaceDetail, targetSpace *jcsypes.UserSpaceDetail, entries []types.DirEntry, rootPath jcsypes.JPath) ([]db.AddObjectEntry, error) {
|
||||
func (u *Uploader) uploadFromBaseStore(srcSpace *jcstypes.UserSpaceDetail, targetSpace *jcstypes.UserSpaceDetail, entries []types.DirEntry, rootPath jcstypes.JPath) ([]db.AddObjectEntry, error) {
|
||||
ft := ioswitch2.FromTo{}
|
||||
|
||||
for _, e := range entries {
|
||||
|
@ -198,7 +197,7 @@ func (u *Uploader) uploadFromBaseStore(srcSpace *jcsypes.UserSpaceDetail, target
|
|||
Size: info.Size,
|
||||
FileHash: info.Hash,
|
||||
CreateTime: time.Now(),
|
||||
UserSpaceIDs: []jcsypes.UserSpaceID{targetSpace.UserSpace.UserSpaceID},
|
||||
UserSpaceIDs: []jcstypes.UserSpaceID{targetSpace.UserSpace.UserSpaceID},
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"net/http"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/sdks"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type BucketService struct {
|
||||
|
@ -18,7 +18,7 @@ func (c *Client) Bucket() *BucketService {
|
|||
const BucketGetPath = "/bucket/get"
|
||||
|
||||
type BucketGet struct {
|
||||
BucketID jcsypes.BucketID `json:"bucketID" binding:"required"`
|
||||
BucketID jcstypes.BucketID `json:"bucketID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *BucketGet) MakeParam() *sdks.RequestParam {
|
||||
|
@ -26,7 +26,7 @@ func (r *BucketGet) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type BucketGetResp struct {
|
||||
Bucket jcsypes.Bucket `json:"bucket"`
|
||||
Bucket jcstypes.Bucket `json:"bucket"`
|
||||
}
|
||||
|
||||
func (r *BucketGetResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -48,7 +48,7 @@ func (r *BucketGetByName) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type BucketGetByNameResp struct {
|
||||
Bucket jcsypes.Bucket `json:"bucket"`
|
||||
Bucket jcstypes.Bucket `json:"bucket"`
|
||||
}
|
||||
|
||||
func (r *BucketGetByNameResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -70,7 +70,7 @@ func (r *BucketCreate) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type BucketCreateResp struct {
|
||||
Bucket jcsypes.Bucket `json:"bucket"`
|
||||
Bucket jcstypes.Bucket `json:"bucket"`
|
||||
}
|
||||
|
||||
func (r *BucketCreateResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -84,7 +84,7 @@ func (c *BucketService) Create(req BucketCreate) (*BucketCreateResp, error) {
|
|||
const BucketDeletePath = "/bucket/delete"
|
||||
|
||||
type BucketDelete struct {
|
||||
BucketID jcsypes.BucketID `json:"bucketID" binding:"required"`
|
||||
BucketID jcstypes.BucketID `json:"bucketID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *BucketDelete) MakeParam() *sdks.RequestParam {
|
||||
|
@ -111,7 +111,7 @@ func (r *BucketListAll) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type BucketListAllResp struct {
|
||||
Buckets []jcsypes.Bucket `json:"buckets"`
|
||||
Buckets []jcstypes.Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
func (r *BucketListAllResp) ParseResponse(resp *http.Response) error {
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/sdks"
|
||||
"gitlink.org.cn/cloudream/common/utils/http2"
|
||||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type PackageService struct {
|
||||
|
@ -27,7 +27,7 @@ func (c *Client) Package() *PackageService {
|
|||
const PackageGetPath = "/package/get"
|
||||
|
||||
type PackageGet struct {
|
||||
PackageID jcsypes.PackageID `form:"packageID" url:"packageID" binding:"required"`
|
||||
PackageID jcstypes.PackageID `form:"packageID" url:"packageID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *PackageGet) MakeParam() *sdks.RequestParam {
|
||||
|
@ -35,7 +35,7 @@ func (r *PackageGet) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type PackageGetResp struct {
|
||||
Package jcsypes.Package `json:"package"`
|
||||
Package jcstypes.Package `json:"package"`
|
||||
}
|
||||
|
||||
func (r *PackageGetResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -58,7 +58,7 @@ func (r *PackageGetByFullName) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type PackageGetByFullNameResp struct {
|
||||
Package jcsypes.Package `json:"package"`
|
||||
Package jcstypes.Package `json:"package"`
|
||||
}
|
||||
|
||||
func (r *PackageGetByFullNameResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -72,8 +72,8 @@ func (c *PackageService) GetByFullName(req PackageGetByFullName) (*PackageGetByF
|
|||
const PackageCreatePath = "/package/create"
|
||||
|
||||
type PackageCreate struct {
|
||||
BucketID jcsypes.BucketID `json:"bucketID"`
|
||||
Name string `json:"name"`
|
||||
BucketID jcstypes.BucketID `json:"bucketID"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func (r *PackageCreate) MakeParam() *sdks.RequestParam {
|
||||
|
@ -81,7 +81,7 @@ func (r *PackageCreate) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type PackageCreateResp struct {
|
||||
Package jcsypes.Package `json:"package"`
|
||||
Package jcstypes.Package `json:"package"`
|
||||
}
|
||||
|
||||
func (r *PackageCreateResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -99,14 +99,14 @@ type PackageCreateUpload struct {
|
|||
Files UploadObjectIterator `json:"-"`
|
||||
}
|
||||
type PackageCreateUploadInfo struct {
|
||||
BucketID jcsypes.BucketID `json:"bucketID" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
CopyTo []jcsypes.UserSpaceID `json:"copyTo"`
|
||||
CopyToPath []string `json:"copyToPath"`
|
||||
BucketID jcstypes.BucketID `json:"bucketID" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
CopyTo []jcstypes.UserSpaceID `json:"copyTo"`
|
||||
CopyToPath []string `json:"copyToPath"`
|
||||
}
|
||||
type PackageCreateUploadResp struct {
|
||||
Package jcsypes.Package `json:"package"`
|
||||
Objects []jcsypes.Object `json:"objects"`
|
||||
Package jcstypes.Package `json:"package"`
|
||||
Objects []jcstypes.Object `json:"objects"`
|
||||
}
|
||||
|
||||
func (c *PackageService) CreateUpload(req PackageCreateUpload) (*PackageCreateUploadResp, error) {
|
||||
|
@ -148,10 +148,10 @@ func (c *PackageService) CreateUpload(req PackageCreateUpload) (*PackageCreateUp
|
|||
const PackageDownloadPath = "/package/download"
|
||||
|
||||
type PackageDownload struct {
|
||||
PackageID jcsypes.PackageID `url:"packageID" form:"packageID" binding:"required"`
|
||||
Prefix string `url:"prefix" form:"prefix"`
|
||||
NewPrefix *string `url:"newPrefix,omitempty" form:"newPrefix"`
|
||||
Zip bool `url:"zip,omitempty" form:"zip"`
|
||||
PackageID jcstypes.PackageID `url:"packageID" form:"packageID" binding:"required"`
|
||||
Prefix string `url:"prefix" form:"prefix"`
|
||||
NewPrefix *string `url:"newPrefix,omitempty" form:"newPrefix"`
|
||||
Zip bool `url:"zip,omitempty" form:"zip"`
|
||||
}
|
||||
|
||||
func (r *PackageDownload) MakeParam() *sdks.RequestParam {
|
||||
|
@ -208,7 +208,7 @@ func (c *PackageService) Download(req PackageDownload) (*DownloadingPackage, err
|
|||
const PackageDeletePath = "/package/delete"
|
||||
|
||||
type PackageDelete struct {
|
||||
PackageID jcsypes.PackageID `json:"packageID" binding:"required"`
|
||||
PackageID jcstypes.PackageID `json:"packageID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *PackageDelete) MakeParam() *sdks.RequestParam {
|
||||
|
@ -228,9 +228,9 @@ func (c *PackageService) Delete(req PackageDelete) error {
|
|||
const PackageClonePath = "/package/clone"
|
||||
|
||||
type PackageClone struct {
|
||||
PackageID jcsypes.PackageID `json:"packageID" binding:"required"`
|
||||
BucketID jcsypes.BucketID `json:"bucketID" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
PackageID jcstypes.PackageID `json:"packageID" binding:"required"`
|
||||
BucketID jcstypes.BucketID `json:"bucketID" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *PackageClone) MakeParam() *sdks.RequestParam {
|
||||
|
@ -238,7 +238,7 @@ func (r *PackageClone) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type PackageCloneResp struct {
|
||||
Package jcsypes.Package `json:"package"`
|
||||
Package jcstypes.Package `json:"package"`
|
||||
}
|
||||
|
||||
func (r *PackageCloneResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -252,7 +252,7 @@ func (c *PackageService) Clone(req PackageClone) (*PackageCloneResp, error) {
|
|||
const PackageListBucketPackagesPath = "/package/listBucketPackages"
|
||||
|
||||
type PackageListBucketPackages struct {
|
||||
BucketID jcsypes.BucketID `form:"bucketID" url:"bucketID" binding:"required"`
|
||||
BucketID jcstypes.BucketID `form:"bucketID" url:"bucketID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *PackageListBucketPackages) MakeParam() *sdks.RequestParam {
|
||||
|
@ -260,7 +260,7 @@ func (r *PackageListBucketPackages) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type PackageListBucketPackagesResp struct {
|
||||
Packages []jcsypes.Package `json:"packages"`
|
||||
Packages []jcstypes.Package `json:"packages"`
|
||||
}
|
||||
|
||||
func (r *PackageListBucketPackagesResp) ParseResponse(resp *http.Response) error {
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/google/go-querystring/query"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/sdk/signer"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type PresignedService struct {
|
||||
|
@ -33,10 +33,10 @@ func (c *PresignedService) ObjectListByPath(req PresignedObjectListByPath, expir
|
|||
const PresignedObjectDownloadByPathPath = "/presigned/object/downloadByPath"
|
||||
|
||||
type PresignedObjectDownloadByPath struct {
|
||||
PackageID jcsypes.PackageID `form:"packageID" url:"packageID" binding:"required"`
|
||||
Path string `form:"path" url:"path" binding:"required"`
|
||||
Offset int64 `form:"offset" url:"offset,omitempty"`
|
||||
Length *int64 `form:"length" url:"length,omitempty"`
|
||||
PackageID jcstypes.PackageID `form:"packageID" url:"packageID" binding:"required"`
|
||||
Path string `form:"path" url:"path" binding:"required"`
|
||||
Offset int64 `form:"offset" url:"offset,omitempty"`
|
||||
Length *int64 `form:"length" url:"length,omitempty"`
|
||||
}
|
||||
|
||||
func (c *PresignedService) ObjectDownloadByPath(req PresignedObjectDownloadByPath, expireIn int) (string, error) {
|
||||
|
@ -46,9 +46,9 @@ func (c *PresignedService) ObjectDownloadByPath(req PresignedObjectDownloadByPat
|
|||
const PresignedObjectDownloadPath = "/presigned/object/download"
|
||||
|
||||
type PresignedObjectDownload struct {
|
||||
ObjectID jcsypes.ObjectID `form:"objectID" url:"objectID" binding:"required"`
|
||||
Offset int64 `form:"offset" url:"offset,omitempty"`
|
||||
Length *int64 `form:"length" url:"length,omitempty"`
|
||||
ObjectID jcstypes.ObjectID `form:"objectID" url:"objectID" binding:"required"`
|
||||
Offset int64 `form:"offset" url:"offset,omitempty"`
|
||||
Length *int64 `form:"length" url:"length,omitempty"`
|
||||
}
|
||||
|
||||
func (c *PresignedService) ObjectDownload(req PresignedObjectDownload, expireIn int) (string, error) {
|
||||
|
@ -58,15 +58,15 @@ func (c *PresignedService) ObjectDownload(req PresignedObjectDownload, expireIn
|
|||
const PresignedObjectUploadPath = "/presigned/object/upload"
|
||||
|
||||
type PresignedObjectUpload struct {
|
||||
PackageID jcsypes.PackageID `form:"packageID" binding:"required" url:"packageID"`
|
||||
Path string `form:"path" binding:"required" url:"path"`
|
||||
Affinity jcsypes.UserSpaceID `form:"affinity" url:"affinity,omitempty"`
|
||||
CopyTo []jcsypes.UserSpaceID `form:"copyTo" url:"copyTo,omitempty"`
|
||||
CopyToPath []string `form:"copyToPath" url:"copyToPath,omitempty"`
|
||||
PackageID jcstypes.PackageID `form:"packageID" binding:"required" url:"packageID"`
|
||||
Path string `form:"path" binding:"required" url:"path"`
|
||||
Affinity jcstypes.UserSpaceID `form:"affinity" url:"affinity,omitempty"`
|
||||
CopyTo []jcstypes.UserSpaceID `form:"copyTo" url:"copyTo,omitempty"`
|
||||
CopyToPath []string `form:"copyToPath" url:"copyToPath,omitempty"`
|
||||
}
|
||||
|
||||
type PresignedObjectUploadResp struct {
|
||||
Object jcsypes.Object `json:"object"`
|
||||
Object jcstypes.Object `json:"object"`
|
||||
}
|
||||
|
||||
func (c *PresignedService) ObjectUpload(req PresignedObjectUpload, expireIn int) (string, error) {
|
||||
|
@ -76,12 +76,12 @@ func (c *PresignedService) ObjectUpload(req PresignedObjectUpload, expireIn int)
|
|||
const PresignedObjectNewMultipartUploadPath = "/presigned/object/newMultipartUpload"
|
||||
|
||||
type PresignedObjectNewMultipartUpload struct {
|
||||
PackageID jcsypes.PackageID `form:"packageID" binding:"required" url:"packageID"`
|
||||
Path string `form:"path" binding:"required" url:"path"`
|
||||
PackageID jcstypes.PackageID `form:"packageID" binding:"required" url:"packageID"`
|
||||
Path string `form:"path" binding:"required" url:"path"`
|
||||
}
|
||||
|
||||
type PresignedObjectNewMultipartUploadResp struct {
|
||||
Object jcsypes.Object `json:"object"`
|
||||
Object jcstypes.Object `json:"object"`
|
||||
}
|
||||
|
||||
func (c *PresignedService) ObjectNewMultipartUpload(req PresignedObjectNewMultipartUpload, expireIn int) (string, error) {
|
||||
|
@ -91,8 +91,8 @@ func (c *PresignedService) ObjectNewMultipartUpload(req PresignedObjectNewMultip
|
|||
const PresignedObjectUploadPartPath = "/presigned/object/uploadPart"
|
||||
|
||||
type PresignedObjectUploadPart struct {
|
||||
ObjectID jcsypes.ObjectID `form:"objectID" binding:"required" url:"objectID"`
|
||||
Index int `form:"index" binding:"required" url:"index"`
|
||||
ObjectID jcstypes.ObjectID `form:"objectID" binding:"required" url:"objectID"`
|
||||
Index int `form:"index" binding:"required" url:"index"`
|
||||
}
|
||||
|
||||
type PresignedUploadPartResp struct{}
|
||||
|
@ -104,12 +104,12 @@ func (c *PresignedService) ObjectUploadPart(req PresignedObjectUploadPart, expir
|
|||
const PresignedObjectCompleteMultipartUploadPath = "/presigned/object/completeMultipartUpload"
|
||||
|
||||
type PresignedObjectCompleteMultipartUpload struct {
|
||||
ObjectID jcsypes.ObjectID `form:"objectID" binding:"required" url:"objectID"`
|
||||
Indexes []int `form:"indexes" binding:"required" url:"indexes"`
|
||||
ObjectID jcstypes.ObjectID `form:"objectID" binding:"required" url:"objectID"`
|
||||
Indexes []int `form:"indexes" binding:"required" url:"indexes"`
|
||||
}
|
||||
|
||||
type PresignedObjectCompleteMultipartUploadResp struct {
|
||||
Object jcsypes.Object `json:"object"`
|
||||
Object jcstypes.Object `json:"object"`
|
||||
}
|
||||
|
||||
func (c *PresignedService) ObjectCompleteMultipartUpload(req PresignedObjectCompleteMultipartUpload, expireIn int) (string, error) {
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"net/http"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/sdks"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type SpaceSyncerService struct {
|
||||
|
@ -20,14 +20,14 @@ func (c *Client) SpaceSyncer() *SpaceSyncerService {
|
|||
const SpaceSyncerCreateTaskPath = "/spaceSyncer/createTask"
|
||||
|
||||
type SpaceSyncerCreateTask struct {
|
||||
Trigger jcsypes.SpaceSyncTrigger `json:"trigger" binding:"required"`
|
||||
Mode jcsypes.SpaceSyncMode `json:"mode" binding:"required"`
|
||||
Filters []jcsypes.SpaceSyncFilter `json:"filters"`
|
||||
Options jcsypes.SpaceSyncOptions `json:"options" binding:"required"`
|
||||
SrcUserSpaceID jcsypes.UserSpaceID `json:"srcUserSpaceID" binding:"required"`
|
||||
SrcPath string `json:"srcPath"`
|
||||
DestUserSpaceIDs []jcsypes.UserSpaceID `json:"destUserSpaceIDs" binding:"required"`
|
||||
DestPathes []string `json:"destPathes" binding:"required"`
|
||||
Trigger jcstypes.SpaceSyncTrigger `json:"trigger" binding:"required"`
|
||||
Mode jcstypes.SpaceSyncMode `json:"mode" binding:"required"`
|
||||
Filters []jcstypes.SpaceSyncFilter `json:"filters"`
|
||||
Options jcstypes.SpaceSyncOptions `json:"options" binding:"required"`
|
||||
SrcUserSpaceID jcstypes.UserSpaceID `json:"srcUserSpaceID" binding:"required"`
|
||||
SrcPath string `json:"srcPath"`
|
||||
DestUserSpaceIDs []jcstypes.UserSpaceID `json:"destUserSpaceIDs" binding:"required"`
|
||||
DestPathes []string `json:"destPathes" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *SpaceSyncerCreateTask) MakeParam() *sdks.RequestParam {
|
||||
|
@ -35,7 +35,7 @@ func (r *SpaceSyncerCreateTask) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type SpaceSyncerCreateTaskResp struct {
|
||||
Task jcsypes.SpaceSyncTask `json:"task"`
|
||||
Task jcstypes.SpaceSyncTask `json:"task"`
|
||||
}
|
||||
|
||||
func (r *SpaceSyncerCreateTaskResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -49,7 +49,7 @@ func (c *SpaceSyncerService) CreateTask(req SpaceSyncerCreateTask) (*SpaceSyncer
|
|||
const SpaceSyncerGetTaskPath = "/spaceSyncer/getTask"
|
||||
|
||||
type SpaceSyncerGetTask struct {
|
||||
TaskID jcsypes.SpaceSyncTaskID `url:"taskID" binding:"required"`
|
||||
TaskID jcstypes.SpaceSyncTaskID `url:"taskID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *SpaceSyncerGetTask) MakeParam() *sdks.RequestParam {
|
||||
|
@ -57,7 +57,7 @@ func (r *SpaceSyncerGetTask) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type SpaceSyncerGetTaskResp struct {
|
||||
Task jcsypes.SpaceSyncTask `json:"task"`
|
||||
Task jcstypes.SpaceSyncTask `json:"task"`
|
||||
}
|
||||
|
||||
func (r *SpaceSyncerGetTaskResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -71,7 +71,7 @@ func (c *SpaceSyncerService) GetTask(req SpaceSyncerGetTask) (*SpaceSyncerGetTas
|
|||
const SpaceSyncerCancelTaskPath = "/spaceSyncer/cancelTask"
|
||||
|
||||
type SpaceSyncerCancelTask struct {
|
||||
TaskID jcsypes.SpaceSyncTaskID `json:"taskID" binding:"required"`
|
||||
TaskID jcstypes.SpaceSyncTaskID `json:"taskID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *SpaceSyncerCancelTask) MakeParam() *sdks.RequestParam {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
. "github.com/smartystreets/goconvey/convey"
|
||||
"gitlink.org.cn/cloudream/common/pkgs/iterator"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/client/sdk/api"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func Test_PackageGet(t *testing.T) {
|
||||
|
@ -74,7 +74,7 @@ func Test_Object(t *testing.T) {
|
|||
fileData[i] = byte(i)
|
||||
}
|
||||
|
||||
stgAff := jcsypes.UserSpaceID(2)
|
||||
stgAff := jcstypes.UserSpaceID(2)
|
||||
|
||||
pkgName := uuid.NewString()
|
||||
createResp, err := cli.Package().Create(PackageCreate{
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"net/http"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/sdks"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type SystemService struct {
|
||||
|
@ -25,7 +25,7 @@ func (r *SystemStatus) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type SystemStatusResp struct {
|
||||
SpeedStats jcsypes.SpeedStatsStatus `json:"speedStats"`
|
||||
SpeedStats jcstypes.SpeedStatsStatus `json:"speedStats"`
|
||||
}
|
||||
|
||||
func (r *SystemStatusResp) ParseResponse(resp *http.Response) error {
|
||||
|
|
|
@ -4,8 +4,7 @@ import (
|
|||
"net/http"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/sdks"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type UserSpaceService struct {
|
||||
|
@ -19,9 +18,9 @@ func (c *Client) UserSpace() *UserSpaceService {
|
|||
const UserSpaceDownloadPackagePath = "/userSpace/downloadPackage"
|
||||
|
||||
type UserSpaceDownloadPackageReq struct {
|
||||
PackageID jcsypes.PackageID `json:"packageID" binding:"required"`
|
||||
UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
RootPath string `json:"rootPath"`
|
||||
PackageID jcstypes.PackageID `json:"packageID" binding:"required"`
|
||||
UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
RootPath string `json:"rootPath"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceDownloadPackageReq) MakeParam() *sdks.RequestParam {
|
||||
|
@ -41,11 +40,11 @@ func (c *UserSpaceService) DownloadPackage(req UserSpaceDownloadPackageReq) (*Us
|
|||
const UserSpaceCreatePackagePath = "/userSpace/createPackage"
|
||||
|
||||
type UserSpaceCreatePackageReq struct {
|
||||
UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
Path string `json:"path" binding:"required"`
|
||||
BucketID jcsypes.BucketID `json:"bucketID" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
SpaceAffinity jcsypes.UserSpaceID `json:"spaceAffinity"`
|
||||
UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
Path string `json:"path" binding:"required"`
|
||||
BucketID jcstypes.BucketID `json:"bucketID" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
SpaceAffinity jcstypes.UserSpaceID `json:"spaceAffinity"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceCreatePackageReq) MakeParam() *sdks.RequestParam {
|
||||
|
@ -53,7 +52,7 @@ func (r *UserSpaceCreatePackageReq) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type UserSpaceCreatePackageResp struct {
|
||||
Package jcsypes.Package `json:"package"`
|
||||
Package jcstypes.Package `json:"package"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceCreatePackageResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -67,7 +66,7 @@ func (c *UserSpaceService) CreatePackage(req UserSpaceCreatePackageReq) (*UserSp
|
|||
const UserSpaceGetPath = "/userSpace/get"
|
||||
|
||||
type UserSpaceGet struct {
|
||||
UserSpaceID jcsypes.UserSpaceID `form:"userSpaceID" url:"userSpaceID" binding:"required"`
|
||||
UserSpaceID jcstypes.UserSpaceID `form:"userSpaceID" url:"userSpaceID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceGet) MakeParam() *sdks.RequestParam {
|
||||
|
@ -75,7 +74,7 @@ func (r *UserSpaceGet) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type UserSpaceGetResp struct {
|
||||
UserSpace jcsypes.UserSpace `json:"userSpace"`
|
||||
UserSpace jcstypes.UserSpace `json:"userSpace"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceGetResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -97,7 +96,7 @@ func (r *UserSpaceGetByName) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type UserSpaceGetByNameResp struct {
|
||||
UserSpace jcsypes.UserSpace `json:"userSpace"`
|
||||
UserSpace jcstypes.UserSpace `json:"userSpace"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceGetByNameResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -117,7 +116,7 @@ func (r *UserSpaceGetAll) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type UserSpaceGetAllResp struct {
|
||||
UserSpaces []jcsypes.UserSpace `json:"userSpaces"`
|
||||
UserSpaces []jcstypes.UserSpace `json:"userSpaces"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceGetAllResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -134,10 +133,10 @@ const UserSpaceCreatePath = "/userSpace/create"
|
|||
|
||||
type UserSpaceCreate struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
Storage cortypes.StorageType `json:"storage" binding:"required"`
|
||||
Credential cortypes.StorageCredential `json:"credential" binding:"required"`
|
||||
ShardStore *cortypes.ShardStoreUserConfig `json:"shardStore"`
|
||||
Features []cortypes.StorageFeature `json:"features"`
|
||||
Storage jcstypes.StorageType `json:"storage" binding:"required"`
|
||||
Credential jcstypes.StorageCredential `json:"credential" binding:"required"`
|
||||
ShardStore *jcstypes.ShardStoreUserConfig `json:"shardStore"`
|
||||
Features []jcstypes.StorageFeature `json:"features"`
|
||||
WorkingDir string `json:"workingDir"`
|
||||
}
|
||||
|
||||
|
@ -146,7 +145,7 @@ func (r *UserSpaceCreate) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type UserSpaceCreateResp struct {
|
||||
UserSpace jcsypes.UserSpace `json:"userSpace"`
|
||||
UserSpace jcstypes.UserSpace `json:"userSpace"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceCreateResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -161,10 +160,10 @@ func (c *UserSpaceService) Create(req UserSpaceCreate) (*UserSpaceCreateResp, er
|
|||
const UserSpaceUpdatePath = "/userSpace/update"
|
||||
|
||||
type UserSpaceUpdate struct {
|
||||
UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
Credential cortypes.StorageCredential `json:"credential" binding:"required"`
|
||||
Features []cortypes.StorageFeature `json:"features"`
|
||||
Credential jcstypes.StorageCredential `json:"credential" binding:"required"`
|
||||
Features []jcstypes.StorageFeature `json:"features"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceUpdate) MakeParam() *sdks.RequestParam {
|
||||
|
@ -172,7 +171,7 @@ func (r *UserSpaceUpdate) MakeParam() *sdks.RequestParam {
|
|||
}
|
||||
|
||||
type UserSpaceUpdateResp struct {
|
||||
UserSpace jcsypes.UserSpace `json:"userSpace"`
|
||||
UserSpace jcstypes.UserSpace `json:"userSpace"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceUpdateResp) ParseResponse(resp *http.Response) error {
|
||||
|
@ -187,7 +186,7 @@ func (c *UserSpaceService) Update(req UserSpaceUpdate) (*UserSpaceUpdateResp, er
|
|||
const UserSpaceDeletePath = "/userSpace/delete"
|
||||
|
||||
type UserSpaceDelete struct {
|
||||
UserSpaceID jcsypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID" binding:"required"`
|
||||
}
|
||||
|
||||
func (r *UserSpaceDelete) MakeParam() *sdks.RequestParam {
|
||||
|
@ -208,8 +207,8 @@ func (c *UserSpaceService) Delete(req UserSpaceDelete) (*UserSpaceDeleteResp, er
|
|||
const UserSpaceTestPath = "/userSpace/test"
|
||||
|
||||
type UserSpaceTest struct {
|
||||
Storage cortypes.StorageType `json:"storage" binding:"required"`
|
||||
Credential cortypes.StorageCredential `json:"credential" binding:"required"`
|
||||
Storage jcstypes.StorageType `json:"storage" binding:"required"`
|
||||
Credential jcstypes.StorageCredential `json:"credential" binding:"required"`
|
||||
WorikingDir string `json:"workingDir"`
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package stgglb
|
||||
|
||||
import (
|
||||
"gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type LocalMachineInfo struct {
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package stgglb
|
||||
|
||||
import cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
import jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
|
||||
// 根据当前节点与目标地址的距离关系,选择合适的地址
|
||||
func SelectGRPCAddress(hub *cortypes.Hub, addr *cortypes.GRPCAddressInfo) (string, int) {
|
||||
func SelectGRPCAddress(hub *jcstypes.Hub, addr *jcstypes.GRPCAddressInfo) (string, int) {
|
||||
// TODO 重新设计选择LocalIP的策略
|
||||
return addr.ExternalIP, addr.ExternalGRPCPort
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/async"
|
||||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type CacheEvent interface {
|
||||
|
@ -23,17 +23,17 @@ type ExitEvent struct {
|
|||
}
|
||||
|
||||
type CacheKey struct {
|
||||
UserID cortypes.UserID
|
||||
TokenID cortypes.AccessTokenID
|
||||
UserID jcstypes.UserID
|
||||
TokenID jcstypes.AccessTokenID
|
||||
}
|
||||
|
||||
var ErrTokenNotFound = fmt.Errorf("token not found")
|
||||
|
||||
type AccessTokenLoader func(key CacheKey) (cortypes.UserAccessToken, error)
|
||||
type AccessTokenLoader func(key CacheKey) (jcstypes.UserAccessToken, error)
|
||||
|
||||
type CacheEntry struct {
|
||||
IsTokenValid bool
|
||||
Token cortypes.UserAccessToken
|
||||
Token jcstypes.UserAccessToken
|
||||
PublicKey ed25519.PublicKey
|
||||
LoadedAt time.Time
|
||||
LastUsedAt time.Time
|
||||
|
@ -227,6 +227,6 @@ func (mc *Cache) Verify(authInfo rpc.AccessTokenAuthInfo) bool {
|
|||
return ed25519.Verify(token.PublicKey, []byte(MakeStringToSign(authInfo.UserID, authInfo.AccessTokenID, authInfo.Nonce)), []byte(sig))
|
||||
}
|
||||
|
||||
func MakeStringToSign(userID cortypes.UserID, tokenID cortypes.AccessTokenID, nonce string) string {
|
||||
func MakeStringToSign(userID jcstypes.UserID, tokenID jcstypes.AccessTokenID, nonce string) string {
|
||||
return fmt.Sprintf("%v.%v.%v", userID, tokenID, nonce)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
|
||||
corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator"
|
||||
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type CollectorEvent interface {
|
||||
|
@ -28,7 +28,7 @@ type CollectedEvent struct {
|
|||
}
|
||||
|
||||
type Connectivity struct {
|
||||
ToHubID cortypes.HubID
|
||||
ToHubID jcstypes.HubID
|
||||
Latency *time.Duration
|
||||
TestTime time.Time
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ type Collector struct {
|
|||
enabled bool
|
||||
collectNow chan any
|
||||
done chan any
|
||||
connectivities map[cortypes.HubID]Connectivity
|
||||
connectivities map[jcstypes.HubID]Connectivity
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
|
@ -48,7 +48,7 @@ func NewEnabled(cfg Config) *Collector {
|
|||
enabled: true,
|
||||
collectNow: make(chan any, 1),
|
||||
done: make(chan any, 1),
|
||||
connectivities: make(map[cortypes.HubID]Connectivity),
|
||||
connectivities: make(map[jcstypes.HubID]Connectivity),
|
||||
lock: &sync.RWMutex{},
|
||||
}
|
||||
return &rpt
|
||||
|
@ -58,16 +58,16 @@ func NewDisabled() *Collector {
|
|||
enabled: false,
|
||||
collectNow: make(chan any, 1),
|
||||
done: make(chan any, 1),
|
||||
connectivities: make(map[cortypes.HubID]Connectivity),
|
||||
connectivities: make(map[jcstypes.HubID]Connectivity),
|
||||
lock: &sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Collector) GetAll() map[cortypes.HubID]Connectivity {
|
||||
func (r *Collector) GetAll() map[jcstypes.HubID]Connectivity {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
|
||||
ret := make(map[cortypes.HubID]Connectivity)
|
||||
ret := make(map[jcstypes.HubID]Connectivity)
|
||||
for k, v := range r.connectivities {
|
||||
ret[k] = v
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ func (r *Collector) testing() bool {
|
|||
|
||||
r.lock.Lock()
|
||||
// 删除所有hub的记录,然后重建,避免hub数量变化时导致残余数据
|
||||
r.connectivities = make(map[cortypes.HubID]Connectivity)
|
||||
r.connectivities = make(map[jcstypes.HubID]Connectivity)
|
||||
for _, con := range cons {
|
||||
r.connectivities[con.ToHubID] = con
|
||||
}
|
||||
|
@ -179,13 +179,13 @@ func (r *Collector) testing() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (r *Collector) ping(hub cortypes.Hub) Connectivity {
|
||||
func (r *Collector) ping(hub jcstypes.Hub) Connectivity {
|
||||
log := logger.WithType[Collector]("").WithField("HubID", hub.HubID)
|
||||
|
||||
var ip string
|
||||
var port int
|
||||
switch addr := hub.Address.(type) {
|
||||
case *cortypes.GRPCAddressInfo:
|
||||
case *jcstypes.GRPCAddressInfo:
|
||||
// TODO 重新设计选择LocalIP的策略
|
||||
ip = addr.ExternalIP
|
||||
port = addr.ExternalGRPCPort
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/math2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type From interface {
|
||||
|
@ -69,9 +69,9 @@ type FromTos []FromTo
|
|||
|
||||
type FromTo struct {
|
||||
// 如果输入或者输出用到了EC编码的流,则需要提供EC参数。
|
||||
ECParam *jcsypes.ECRedundancy
|
||||
ECParam *jcstypes.ECRedundancy
|
||||
// 同上
|
||||
SegmentParam *jcsypes.SegmentRedundancy
|
||||
SegmentParam *jcstypes.SegmentRedundancy
|
||||
Froms []From
|
||||
Toes []To
|
||||
}
|
||||
|
@ -110,12 +110,12 @@ func (f *FromDriver) GetStreamIndex() StreamIndex {
|
|||
}
|
||||
|
||||
type FromShardStore struct {
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
StreamIndex StreamIndex
|
||||
}
|
||||
|
||||
func NewFromShardstore(fileHash jcsypes.FileHash, space jcsypes.UserSpaceDetail, strIdx StreamIndex) *FromShardStore {
|
||||
func NewFromShardstore(fileHash jcstypes.FileHash, space jcstypes.UserSpaceDetail, strIdx StreamIndex) *FromShardStore {
|
||||
return &FromShardStore{
|
||||
FileHash: fileHash,
|
||||
UserSpace: space,
|
||||
|
@ -128,11 +128,11 @@ func (f *FromShardStore) GetStreamIndex() StreamIndex {
|
|||
}
|
||||
|
||||
type FromBaseStore struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
}
|
||||
|
||||
func NewFromBaseStore(space jcsypes.UserSpaceDetail, path jcsypes.JPath) *FromBaseStore {
|
||||
func NewFromBaseStore(space jcstypes.UserSpaceDetail, path jcstypes.JPath) *FromBaseStore {
|
||||
return &FromBaseStore{
|
||||
UserSpace: space,
|
||||
Path: path,
|
||||
|
@ -177,13 +177,13 @@ func (t *ToDriver) GetRange() math2.Range {
|
|||
}
|
||||
|
||||
type ToShardStore struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
StreamIndex StreamIndex
|
||||
Range math2.Range
|
||||
ResultStoreKey string
|
||||
}
|
||||
|
||||
func NewToShardStore(space jcsypes.UserSpaceDetail, strIdx StreamIndex, retStoreKey string) *ToShardStore {
|
||||
func NewToShardStore(space jcstypes.UserSpaceDetail, strIdx StreamIndex, retStoreKey string) *ToShardStore {
|
||||
return &ToShardStore{
|
||||
UserSpace: space,
|
||||
StreamIndex: strIdx,
|
||||
|
@ -191,7 +191,7 @@ func NewToShardStore(space jcsypes.UserSpaceDetail, strIdx StreamIndex, retStore
|
|||
}
|
||||
}
|
||||
|
||||
func NewToShardStoreWithRange(space jcsypes.UserSpaceDetail, streamIndex StreamIndex, retStoreKey string, rng math2.Range) *ToShardStore {
|
||||
func NewToShardStoreWithRange(space jcstypes.UserSpaceDetail, streamIndex StreamIndex, retStoreKey string, rng math2.Range) *ToShardStore {
|
||||
return &ToShardStore{
|
||||
UserSpace: space,
|
||||
StreamIndex: streamIndex,
|
||||
|
@ -209,12 +209,12 @@ func (t *ToShardStore) GetRange() math2.Range {
|
|||
}
|
||||
|
||||
type ToBaseStore struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
ObjectPath jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
ObjectPath jcstypes.JPath
|
||||
Option types.WriteOption
|
||||
}
|
||||
|
||||
func NewToBaseStore(space jcsypes.UserSpaceDetail, objectPath jcsypes.JPath) *ToBaseStore {
|
||||
func NewToBaseStore(space jcstypes.UserSpaceDetail, objectPath jcstypes.JPath) *ToBaseStore {
|
||||
return &ToBaseStore{
|
||||
UserSpace: space,
|
||||
ObjectPath: objectPath,
|
||||
|
|
|
@ -9,16 +9,16 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/io2"
|
||||
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
hubapi "gitlink.org.cn/cloudream/jcs-pub/hub/sdk/api"
|
||||
)
|
||||
|
||||
type HttpHubWorker struct {
|
||||
Hub cortypes.Hub
|
||||
Hub jcstypes.Hub
|
||||
}
|
||||
|
||||
func (w *HttpHubWorker) NewClient() (exec.WorkerClient, error) {
|
||||
addressInfo := w.Hub.Address.(*cortypes.HttpAddressInfo)
|
||||
addressInfo := w.Hub.Address.(*jcstypes.HttpAddressInfo)
|
||||
baseUrl := "http://" + addressInfo.ExternalIP + ":" + strconv.Itoa(addressInfo.Port)
|
||||
config := hubapi.Config{
|
||||
URL: baseUrl,
|
||||
|
@ -47,7 +47,7 @@ func (w *HttpHubWorker) Equals(worker exec.WorkerInfo) bool {
|
|||
}
|
||||
|
||||
type HttpHubWorkerClient struct {
|
||||
hubID cortypes.HubID
|
||||
hubID jcstypes.HubID
|
||||
cli *hubapi.Client
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.WorkerInfo](
|
||||
|
@ -20,8 +20,8 @@ var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.Wo
|
|||
)))
|
||||
|
||||
type HubWorker struct {
|
||||
Hub cortypes.Hub
|
||||
Address cortypes.GRPCAddressInfo
|
||||
Hub jcstypes.Hub
|
||||
Address jcstypes.GRPCAddressInfo
|
||||
}
|
||||
|
||||
func (w *HubWorker) NewClient() (exec.WorkerClient, error) {
|
||||
|
@ -43,7 +43,7 @@ func (w *HubWorker) Equals(worker exec.WorkerInfo) bool {
|
|||
}
|
||||
|
||||
type HubWorkerClient struct {
|
||||
hubID cortypes.HubID
|
||||
hubID jcstypes.HubID
|
||||
cli *hubrpc.Client
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -43,8 +43,8 @@ func (v *BaseReadStatsValue) Clone() exec.VarValue {
|
|||
|
||||
type BaseRead struct {
|
||||
Output exec.VarID
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
Option types.OpenOption
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ func (o *BaseRead) String() string {
|
|||
}
|
||||
|
||||
type BaseReadDyn struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Output exec.VarID
|
||||
FileInfo exec.VarID
|
||||
Option types.OpenOption
|
||||
|
@ -167,8 +167,8 @@ func (o *BaseReadDyn) String() string {
|
|||
|
||||
type BaseWrite struct {
|
||||
Input exec.VarID
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
FileInfo exec.VarID
|
||||
Option types.WriteOption
|
||||
}
|
||||
|
@ -213,12 +213,12 @@ func (o *BaseWrite) String() string {
|
|||
type BaseReadNode struct {
|
||||
dag.NodeBase
|
||||
From ioswitch2.From
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
Option types.OpenOption
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewBaseRead(from ioswitch2.From, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.OpenOption) *BaseReadNode {
|
||||
func (b *GraphNodeBuilder) NewBaseRead(from ioswitch2.From, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.OpenOption) *BaseReadNode {
|
||||
node := &BaseReadNode{
|
||||
From: from,
|
||||
UserSpace: userSpace,
|
||||
|
@ -254,11 +254,11 @@ func (t *BaseReadNode) GenerateOp() (exec.Op, error) {
|
|||
type BaseReadDynNode struct {
|
||||
dag.NodeBase
|
||||
From ioswitch2.From
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Option types.OpenOption
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitch2.From, userSpace jcsypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode {
|
||||
func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitch2.From, userSpace jcstypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode {
|
||||
node := &BaseReadDynNode{
|
||||
From: from,
|
||||
UserSpace: userSpace,
|
||||
|
@ -301,12 +301,12 @@ func (t *BaseReadDynNode) GenerateOp() (exec.Op, error) {
|
|||
type BaseWriteNode struct {
|
||||
dag.NodeBase
|
||||
To ioswitch2.To
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
Option types.WriteOption
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewBaseWrite(to ioswitch2.To, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.WriteOption) *BaseWriteNode {
|
||||
func (b *GraphNodeBuilder) NewBaseWrite(to ioswitch2.To, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.WriteOption) *BaseWriteNode {
|
||||
node := &BaseWriteNode{
|
||||
To: to,
|
||||
UserSpace: userSpace,
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -17,8 +17,8 @@ func init() {
|
|||
|
||||
// 旁路Http读取
|
||||
type GetShardHTTPRequest struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
Output exec.VarID
|
||||
}
|
||||
|
||||
|
@ -64,11 +64,11 @@ func (o *GetShardHTTPRequest) String() string {
|
|||
// 旁路Http读取
|
||||
type GetShardHTTPRequestNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewGetShardHTTPRequest(userSpace jcsypes.UserSpaceDetail, fileHash jcsypes.FileHash) *GetShardHTTPRequestNode {
|
||||
func (b *GraphNodeBuilder) NewGetShardHTTPRequest(userSpace jcstypes.UserSpaceDetail, fileHash jcstypes.FileHash) *GetShardHTTPRequestNode {
|
||||
node := &GetShardHTTPRequestNode{
|
||||
UserSpace: userSpace,
|
||||
FileHash: fileHash,
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/utils"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -146,7 +146,7 @@ func (o *ECMultiply) String() string {
|
|||
}
|
||||
|
||||
type CallECMultiplier struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Coef [][]byte
|
||||
Inputs []exec.VarID
|
||||
Outputs []exec.VarID
|
||||
|
@ -203,12 +203,12 @@ func (o *CallECMultiplier) String() string {
|
|||
|
||||
type ECMultiplyNode struct {
|
||||
dag.NodeBase
|
||||
EC jcsypes.ECRedundancy
|
||||
EC jcstypes.ECRedundancy
|
||||
InputIndexes []int
|
||||
OutputIndexes []int
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewECMultiply(ec jcsypes.ECRedundancy) *ECMultiplyNode {
|
||||
func (b *GraphNodeBuilder) NewECMultiply(ec jcstypes.ECRedundancy) *ECMultiplyNode {
|
||||
node := &ECMultiplyNode{
|
||||
EC: ec,
|
||||
}
|
||||
|
@ -257,13 +257,13 @@ func (t *ECMultiplyNode) GenerateOp() (exec.Op, error) {
|
|||
|
||||
type CallECMultiplierNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
EC jcsypes.ECRedundancy
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
EC jcstypes.ECRedundancy
|
||||
InputIndexes []int
|
||||
OutputIndexes []int
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewCallECMultiplier(userSpace jcsypes.UserSpaceDetail) *CallECMultiplierNode {
|
||||
func (b *GraphNodeBuilder) NewCallECMultiplier(userSpace jcstypes.UserSpaceDetail) *CallECMultiplierNode {
|
||||
node := &CallECMultiplierNode{
|
||||
UserSpace: userSpace,
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -40,7 +40,7 @@ func (v *UploadedPartInfoValue) Clone() exec.VarValue {
|
|||
}
|
||||
|
||||
type MultipartInitiator struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
UploadArgs exec.VarID
|
||||
UploadedParts []exec.VarID
|
||||
FileOutput exec.VarID // 分片上传之后的临时文件的路径
|
||||
|
@ -99,7 +99,7 @@ func (o *MultipartInitiator) String() string {
|
|||
}
|
||||
|
||||
type MultipartUpload struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
UploadArgs exec.VarID
|
||||
UploadResult exec.VarID
|
||||
PartStream exec.VarID
|
||||
|
@ -149,10 +149,10 @@ func (o *MultipartUpload) String() string {
|
|||
|
||||
type MultipartInitiatorNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewMultipartInitiator(userSpace jcsypes.UserSpaceDetail) *MultipartInitiatorNode {
|
||||
func (b *GraphNodeBuilder) NewMultipartInitiator(userSpace jcstypes.UserSpaceDetail) *MultipartInitiatorNode {
|
||||
node := &MultipartInitiatorNode{
|
||||
UserSpace: userSpace,
|
||||
}
|
||||
|
@ -194,12 +194,12 @@ func (n *MultipartInitiatorNode) GenerateOp() (exec.Op, error) {
|
|||
|
||||
type MultipartUploadNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
PartNumber int
|
||||
PartSize int64
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewMultipartUpload(userSpace jcsypes.UserSpaceDetail, partNumber int, partSize int64) *MultipartUploadNode {
|
||||
func (b *GraphNodeBuilder) NewMultipartUpload(userSpace jcstypes.UserSpaceDetail, partNumber int, partSize int64) *MultipartUploadNode {
|
||||
node := &MultipartUploadNode{
|
||||
UserSpace: userSpace,
|
||||
PartNumber: partNumber,
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -15,10 +15,10 @@ func init() {
|
|||
}
|
||||
|
||||
type S2STransfer struct {
|
||||
SrcSpace jcsypes.UserSpaceDetail
|
||||
SrcPath jcsypes.JPath
|
||||
DstSpace jcsypes.UserSpaceDetail
|
||||
DstPath jcsypes.JPath
|
||||
SrcSpace jcstypes.UserSpaceDetail
|
||||
SrcPath jcstypes.JPath
|
||||
DstSpace jcstypes.UserSpaceDetail
|
||||
DstPath jcstypes.JPath
|
||||
Output exec.VarID
|
||||
}
|
||||
|
||||
|
@ -55,10 +55,10 @@ func (o *S2STransfer) String() string {
|
|||
}
|
||||
|
||||
type S2STransferDyn struct {
|
||||
SrcSpace jcsypes.UserSpaceDetail
|
||||
SrcSpace jcstypes.UserSpaceDetail
|
||||
SrcFileInfo exec.VarID
|
||||
DstSpace jcsypes.UserSpaceDetail
|
||||
DstPath jcsypes.JPath
|
||||
DstSpace jcstypes.UserSpaceDetail
|
||||
DstPath jcstypes.JPath
|
||||
Output exec.VarID
|
||||
}
|
||||
|
||||
|
@ -101,13 +101,13 @@ func (o *S2STransferDyn) String() string {
|
|||
|
||||
type S2STransferNode struct {
|
||||
dag.NodeBase
|
||||
SrcSpace jcsypes.UserSpaceDetail
|
||||
SrcPath jcsypes.JPath
|
||||
DstSpace jcsypes.UserSpaceDetail
|
||||
DstPath jcsypes.JPath
|
||||
SrcSpace jcstypes.UserSpaceDetail
|
||||
SrcPath jcstypes.JPath
|
||||
DstSpace jcstypes.UserSpaceDetail
|
||||
DstPath jcstypes.JPath
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewS2STransfer(srcSpace jcsypes.UserSpaceDetail, srcPath jcsypes.JPath, dstSpace jcsypes.UserSpaceDetail, dstPath jcsypes.JPath) *S2STransferNode {
|
||||
func (b *GraphNodeBuilder) NewS2STransfer(srcSpace jcstypes.UserSpaceDetail, srcPath jcstypes.JPath, dstSpace jcstypes.UserSpaceDetail, dstPath jcstypes.JPath) *S2STransferNode {
|
||||
n := &S2STransferNode{
|
||||
SrcSpace: srcSpace,
|
||||
SrcPath: srcPath,
|
||||
|
@ -139,12 +139,12 @@ func (n *S2STransferNode) GenerateOp() (exec.Op, error) {
|
|||
|
||||
type S2STransferDynNode struct {
|
||||
dag.NodeBase
|
||||
SrcSpace jcsypes.UserSpaceDetail
|
||||
DstSpace jcsypes.UserSpaceDetail
|
||||
DstPath jcsypes.JPath
|
||||
SrcSpace jcstypes.UserSpaceDetail
|
||||
DstSpace jcstypes.UserSpaceDetail
|
||||
DstPath jcstypes.JPath
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewS2STransferDyn(srcSpace jcsypes.UserSpaceDetail, dstSpace jcsypes.UserSpaceDetail, dstPath jcsypes.JPath) *S2STransferDynNode {
|
||||
func (b *GraphNodeBuilder) NewS2STransferDyn(srcSpace jcstypes.UserSpaceDetail, dstSpace jcstypes.UserSpaceDetail, dstPath jcstypes.JPath) *S2STransferDynNode {
|
||||
n := &S2STransferDynNode{
|
||||
SrcSpace: srcSpace,
|
||||
DstSpace: dstSpace,
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -15,8 +15,8 @@ func init() {
|
|||
}
|
||||
|
||||
type GetShardInfo struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
ShardInfo exec.VarID
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ func (o *GetShardInfo) String() string {
|
|||
}
|
||||
|
||||
type StoreShard struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileInfo exec.VarID
|
||||
ShardInfo exec.VarID
|
||||
}
|
||||
|
@ -84,11 +84,11 @@ func (o *StoreShard) String() string {
|
|||
|
||||
type GetShardInfoNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcsypes.UserSpaceDetail, fileHash jcsypes.FileHash) *GetShardInfoNode {
|
||||
func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcstypes.UserSpaceDetail, fileHash jcstypes.FileHash) *GetShardInfoNode {
|
||||
node := &GetShardInfoNode{
|
||||
UserSpace: userSpace,
|
||||
FileHash: fileHash,
|
||||
|
@ -116,11 +116,11 @@ func (n *GetShardInfoNode) GenerateOp() (exec.Op, error) {
|
|||
|
||||
type StoreShardNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
ShardInfoKey string
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewStoreShard(userSpace jcsypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode {
|
||||
func (b *GraphNodeBuilder) NewStoreShard(userSpace jcstypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode {
|
||||
node := &StoreShardNode{
|
||||
UserSpace: userSpace,
|
||||
ShardInfoKey: shardInfoKey,
|
||||
|
|
|
@ -12,8 +12,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser/state"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
// 检查使用不同编码时参数是否设置到位
|
||||
|
@ -385,17 +384,17 @@ func buildToNode(ctx *state.GenerateState, t ioswitch2.To) (ops2.ToNode, error)
|
|||
}
|
||||
}
|
||||
|
||||
func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error {
|
||||
func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error {
|
||||
if space.RecommendHub == nil {
|
||||
n.Env().ToEnvDriver(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch addr := space.RecommendHub.Address.(type) {
|
||||
case *cortypes.HttpAddressInfo:
|
||||
case *jcstypes.HttpAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true)
|
||||
|
||||
case *cortypes.GRPCAddressInfo:
|
||||
case *jcstypes.GRPCAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true)
|
||||
|
||||
default:
|
||||
|
|
|
@ -5,21 +5,20 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error {
|
||||
func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error {
|
||||
if space.RecommendHub == nil {
|
||||
n.Env().ToEnvDriver(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch addr := space.RecommendHub.Address.(type) {
|
||||
case *cortypes.HttpAddressInfo:
|
||||
case *jcstypes.HttpAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true)
|
||||
|
||||
case *cortypes.GRPCAddressInfo:
|
||||
case *jcstypes.GRPCAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true)
|
||||
|
||||
default:
|
||||
|
|
|
@ -8,10 +8,10 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/plan"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func CompleteMultipart(blocks []jcsypes.ObjectBlock, blockSpaces []jcsypes.UserSpaceDetail, targetSpace jcsypes.UserSpaceDetail, shardInfoKey string, blder *exec.PlanBuilder) error {
|
||||
func CompleteMultipart(blocks []jcstypes.ObjectBlock, blockSpaces []jcstypes.UserSpaceDetail, targetSpace jcstypes.UserSpaceDetail, shardInfoKey string, blder *exec.PlanBuilder) error {
|
||||
da := ops2.NewGraphNodeBuilder()
|
||||
|
||||
sizes := make([]int64, len(blocks))
|
||||
|
|
|
@ -5,21 +5,20 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error {
|
||||
func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error {
|
||||
if space.RecommendHub == nil {
|
||||
n.Env().ToEnvDriver(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch addr := space.RecommendHub.Address.(type) {
|
||||
case *cortypes.HttpAddressInfo:
|
||||
case *jcstypes.HttpAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true)
|
||||
|
||||
case *cortypes.GRPCAddressInfo:
|
||||
case *jcstypes.GRPCAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true)
|
||||
|
||||
default:
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/math2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type From interface {
|
||||
|
@ -39,12 +39,12 @@ func (f *FromDriver) GetDataIndex() int {
|
|||
}
|
||||
|
||||
type FromNode struct {
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
DataIndex int
|
||||
}
|
||||
|
||||
func NewFromStorage(fileHash jcsypes.FileHash, space jcsypes.UserSpaceDetail, dataIndex int) *FromNode {
|
||||
func NewFromStorage(fileHash jcstypes.FileHash, space jcstypes.UserSpaceDetail, dataIndex int) *FromNode {
|
||||
return &FromNode{
|
||||
FileHash: fileHash,
|
||||
DataIndex: dataIndex,
|
||||
|
@ -88,14 +88,14 @@ func (t *ToDriver) GetRange() math2.Range {
|
|||
}
|
||||
|
||||
type ToNode struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
DataIndex int
|
||||
Range math2.Range
|
||||
FileHashStoreKey string
|
||||
Option types.WriteOption
|
||||
}
|
||||
|
||||
func NewToStorage(space jcsypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string) *ToNode {
|
||||
func NewToStorage(space jcstypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string) *ToNode {
|
||||
return &ToNode{
|
||||
UserSpace: space,
|
||||
DataIndex: dataIndex,
|
||||
|
@ -103,7 +103,7 @@ func NewToStorage(space jcsypes.UserSpaceDetail, dataIndex int, fileHashStoreKey
|
|||
}
|
||||
}
|
||||
|
||||
func NewToStorageWithRange(space jcsypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode {
|
||||
func NewToStorageWithRange(space jcstypes.UserSpaceDetail, dataIndex int, fileHashStoreKey string, rng math2.Range) *ToNode {
|
||||
return &ToNode{
|
||||
UserSpace: space,
|
||||
DataIndex: dataIndex,
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
// var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.WorkerInfo](
|
||||
|
@ -15,8 +15,8 @@ import (
|
|||
// )))
|
||||
|
||||
type HubWorker struct {
|
||||
Hub cortypes.Hub
|
||||
Address cortypes.GRPCAddressInfo
|
||||
Hub jcstypes.Hub
|
||||
Address jcstypes.GRPCAddressInfo
|
||||
}
|
||||
|
||||
func (w *HubWorker) NewClient() (exec.WorkerClient, error) {
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -23,8 +23,8 @@ func init() {
|
|||
|
||||
type BaseRead struct {
|
||||
Output exec.VarID
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
Option types.OpenOption
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ func (o *BaseRead) String() string {
|
|||
}
|
||||
|
||||
type BaseReadDyn struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Output exec.VarID
|
||||
Path exec.VarID
|
||||
Option types.OpenOption
|
||||
|
@ -118,8 +118,8 @@ func (o *BaseReadDyn) String() string {
|
|||
|
||||
type BaseWrite struct {
|
||||
Input exec.VarID
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
WriteResult exec.VarID
|
||||
Option types.WriteOption
|
||||
}
|
||||
|
@ -164,12 +164,12 @@ func (o *BaseWrite) String() string {
|
|||
type BaseReadNode struct {
|
||||
dag.NodeBase
|
||||
From ioswitchlrc.From
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
Option types.OpenOption
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewBaseRead(from ioswitchlrc.From, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.OpenOption) *BaseReadNode {
|
||||
func (b *GraphNodeBuilder) NewBaseRead(from ioswitchlrc.From, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.OpenOption) *BaseReadNode {
|
||||
node := &BaseReadNode{
|
||||
From: from,
|
||||
UserSpace: userSpace,
|
||||
|
@ -205,11 +205,11 @@ func (t *BaseReadNode) GenerateOp() (exec.Op, error) {
|
|||
type BaseReadDynNode struct {
|
||||
dag.NodeBase
|
||||
From ioswitchlrc.From
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Option types.OpenOption
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitchlrc.From, userSpace jcsypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode {
|
||||
func (b *GraphNodeBuilder) NewBaseReadDyn(from ioswitchlrc.From, userSpace jcstypes.UserSpaceDetail, opt types.OpenOption) *BaseReadDynNode {
|
||||
node := &BaseReadDynNode{
|
||||
From: from,
|
||||
UserSpace: userSpace,
|
||||
|
@ -252,12 +252,12 @@ func (t *BaseReadDynNode) GenerateOp() (exec.Op, error) {
|
|||
type BaseWriteNode struct {
|
||||
dag.NodeBase
|
||||
To ioswitchlrc.To
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Path jcsypes.JPath
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path jcstypes.JPath
|
||||
Option types.WriteOption
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewBaseWrite(to ioswitchlrc.To, userSpace jcsypes.UserSpaceDetail, path jcsypes.JPath, opt types.WriteOption) *BaseWriteNode {
|
||||
func (b *GraphNodeBuilder) NewBaseWrite(to ioswitchlrc.To, userSpace jcstypes.UserSpaceDetail, path jcstypes.JPath, opt types.WriteOption) *BaseWriteNode {
|
||||
node := &BaseWriteNode{
|
||||
To: to,
|
||||
UserSpace: userSpace,
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/utils"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -114,12 +114,12 @@ func (o *GalMultiply) String() string {
|
|||
|
||||
type LRCConstructAnyNode struct {
|
||||
dag.NodeBase
|
||||
LRC jcsypes.LRCRedundancy
|
||||
LRC jcstypes.LRCRedundancy
|
||||
InputIndexes []int
|
||||
OutputIndexes []int
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewLRCConstructAny(lrc jcsypes.LRCRedundancy) *LRCConstructAnyNode {
|
||||
func (b *GraphNodeBuilder) NewLRCConstructAny(lrc jcstypes.LRCRedundancy) *LRCConstructAnyNode {
|
||||
node := &LRCConstructAnyNode{
|
||||
LRC: lrc,
|
||||
}
|
||||
|
@ -168,11 +168,11 @@ func (t *LRCConstructAnyNode) GenerateOp() (exec.Op, error) {
|
|||
|
||||
type LRCConstructGroupNode struct {
|
||||
dag.NodeBase
|
||||
LRC jcsypes.LRCRedundancy
|
||||
LRC jcstypes.LRCRedundancy
|
||||
TargetBlockIndex int
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewLRCConstructGroup(lrc jcsypes.LRCRedundancy) *LRCConstructGroupNode {
|
||||
func (b *GraphNodeBuilder) NewLRCConstructGroup(lrc jcstypes.LRCRedundancy) *LRCConstructGroupNode {
|
||||
node := &LRCConstructGroupNode{
|
||||
LRC: lrc,
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -15,8 +15,8 @@ func init() {
|
|||
}
|
||||
|
||||
type GetShardInfo struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
ShardInfo exec.VarID
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ func (o *GetShardInfo) String() string {
|
|||
}
|
||||
|
||||
type StoreShard struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileInfo exec.VarID
|
||||
ShardInfo exec.VarID
|
||||
}
|
||||
|
@ -84,11 +84,11 @@ func (o *StoreShard) String() string {
|
|||
|
||||
type GetShardInfoNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
FileHash jcsypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
FileHash jcstypes.FileHash
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcsypes.UserSpaceDetail, fileHash jcsypes.FileHash) *GetShardInfoNode {
|
||||
func (b *GraphNodeBuilder) NewGetShardInfo(userSpace jcstypes.UserSpaceDetail, fileHash jcstypes.FileHash) *GetShardInfoNode {
|
||||
node := &GetShardInfoNode{
|
||||
UserSpace: userSpace,
|
||||
FileHash: fileHash,
|
||||
|
@ -116,11 +116,11 @@ func (n *GetShardInfoNode) GenerateOp() (exec.Op, error) {
|
|||
|
||||
type StoreShardNode struct {
|
||||
dag.NodeBase
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
ShardInfoKey string
|
||||
}
|
||||
|
||||
func (b *GraphNodeBuilder) NewStoreShard(userSpace jcsypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode {
|
||||
func (b *GraphNodeBuilder) NewStoreShard(userSpace jcstypes.UserSpaceDetail, shardInfoKey string) *StoreShardNode {
|
||||
node := &StoreShardNode{
|
||||
UserSpace: userSpace,
|
||||
ShardInfoKey: shardInfoKey,
|
||||
|
|
|
@ -9,11 +9,11 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/plan"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitchlrc/ops2"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type GenerateContext struct {
|
||||
LRC jcsypes.LRCRedundancy
|
||||
LRC jcstypes.LRCRedundancy
|
||||
DAG *ops2.GraphNodeBuilder
|
||||
To []ioswitchlrc.To
|
||||
ToNodes map[ioswitchlrc.To]ops2.ToNode
|
||||
|
@ -27,7 +27,7 @@ func Encode(fr ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder)
|
|||
}
|
||||
|
||||
ctx := GenerateContext{
|
||||
LRC: jcsypes.DefaultLRCRedundancy,
|
||||
LRC: jcstypes.DefaultLRCRedundancy,
|
||||
DAG: ops2.NewGraphNodeBuilder(),
|
||||
To: toes,
|
||||
ToNodes: make(map[ioswitchlrc.To]ops2.ToNode),
|
||||
|
@ -124,7 +124,7 @@ func buildDAGEncode(ctx *GenerateContext, fr ioswitchlrc.From, toes []ioswitchlr
|
|||
// 提供数据块+编码块中的k个块,重建任意块,包括完整文件。
|
||||
func ReconstructAny(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder) error {
|
||||
ctx := GenerateContext{
|
||||
LRC: jcsypes.DefaultLRCRedundancy,
|
||||
LRC: jcstypes.DefaultLRCRedundancy,
|
||||
DAG: ops2.NewGraphNodeBuilder(),
|
||||
To: toes,
|
||||
ToNodes: make(map[ioswitchlrc.To]ops2.ToNode),
|
||||
|
@ -245,7 +245,7 @@ func buildDAGReconstructAny(ctx *GenerateContext, frs []ioswitchlrc.From, toes [
|
|||
// 输入同一组的多个块,恢复出剩下缺少的一个块。
|
||||
func ReconstructGroup(frs []ioswitchlrc.From, toes []ioswitchlrc.To, blder *exec.PlanBuilder) error {
|
||||
ctx := GenerateContext{
|
||||
LRC: jcsypes.DefaultLRCRedundancy,
|
||||
LRC: jcstypes.DefaultLRCRedundancy,
|
||||
DAG: ops2.NewGraphNodeBuilder(),
|
||||
To: toes,
|
||||
ToNodes: make(map[ioswitchlrc.To]ops2.ToNode),
|
||||
|
|
|
@ -5,21 +5,20 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/dag"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func setEnvBySpace(n dag.Node, space *jcsypes.UserSpaceDetail) error {
|
||||
func setEnvBySpace(n dag.Node, space *jcstypes.UserSpaceDetail) error {
|
||||
if space.RecommendHub == nil {
|
||||
n.Env().ToEnvDriver(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch addr := space.RecommendHub.Address.(type) {
|
||||
case *cortypes.HttpAddressInfo:
|
||||
case *jcstypes.HttpAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Hub: *space.RecommendHub}, true)
|
||||
|
||||
case *cortypes.GRPCAddressInfo:
|
||||
case *jcstypes.GRPCAddressInfo:
|
||||
n.Env().ToEnvWorker(&ioswitch2.HubWorker{Hub: *space.RecommendHub, Address: *addr}, true)
|
||||
|
||||
default:
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/lockprovider"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type UserSpaceLockReqBuilder struct {
|
||||
|
@ -15,7 +15,7 @@ type UserSpaceLockReqBuilder struct {
|
|||
func (b *LockRequestBuilder) UserSpace() *UserSpaceLockReqBuilder {
|
||||
return &UserSpaceLockReqBuilder{LockRequestBuilder: b}
|
||||
}
|
||||
func (b *UserSpaceLockReqBuilder) Buzy(spaceID jcsypes.UserSpaceID) *UserSpaceLockReqBuilder {
|
||||
func (b *UserSpaceLockReqBuilder) Buzy(spaceID jcstypes.UserSpaceID) *UserSpaceLockReqBuilder {
|
||||
b.locks = append(b.locks, types.Lock{
|
||||
Path: b.makePath(spaceID),
|
||||
Name: lockprovider.UserSpaceBuzyLock,
|
||||
|
@ -24,7 +24,7 @@ func (b *UserSpaceLockReqBuilder) Buzy(spaceID jcsypes.UserSpaceID) *UserSpaceLo
|
|||
return b
|
||||
}
|
||||
|
||||
func (b *UserSpaceLockReqBuilder) GC(spaceID jcsypes.UserSpaceID) *UserSpaceLockReqBuilder {
|
||||
func (b *UserSpaceLockReqBuilder) GC(spaceID jcstypes.UserSpaceID) *UserSpaceLockReqBuilder {
|
||||
b.locks = append(b.locks, types.Lock{
|
||||
Path: b.makePath(spaceID),
|
||||
Name: lockprovider.UserSpaceGCLock,
|
||||
|
@ -33,6 +33,6 @@ func (b *UserSpaceLockReqBuilder) GC(spaceID jcsypes.UserSpaceID) *UserSpaceLock
|
|||
return b
|
||||
}
|
||||
|
||||
func (b *UserSpaceLockReqBuilder) makePath(hubID jcsypes.UserSpaceID) []string {
|
||||
func (b *UserSpaceLockReqBuilder) makePath(hubID jcstypes.UserSpaceID) []string {
|
||||
return []string{lockprovider.UserSpaceLockPathPrefix, strconv.FormatInt(int64(hubID), 10)}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -27,8 +27,8 @@ const (
|
|||
)
|
||||
|
||||
type AccessTokenAuthInfo struct {
|
||||
UserID cortypes.UserID
|
||||
AccessTokenID cortypes.AccessTokenID
|
||||
UserID jcstypes.UserID
|
||||
AccessTokenID jcstypes.AccessTokenID
|
||||
Nonce string
|
||||
Signature string
|
||||
}
|
||||
|
@ -125,8 +125,8 @@ func (s *ServerBase) authUnary(
|
|||
}
|
||||
|
||||
authInfo := AccessTokenAuthInfo{
|
||||
UserID: cortypes.UserID(userID),
|
||||
AccessTokenID: cortypes.AccessTokenID(accessTokenIDs[0]),
|
||||
UserID: jcstypes.UserID(userID),
|
||||
AccessTokenID: jcstypes.AccessTokenID(accessTokenIDs[0]),
|
||||
Nonce: nonce[0],
|
||||
Signature: signature[0],
|
||||
}
|
||||
|
@ -200,8 +200,8 @@ func (s *ServerBase) authStream(
|
|||
}
|
||||
|
||||
authInfo := AccessTokenAuthInfo{
|
||||
UserID: cortypes.UserID(userID),
|
||||
AccessTokenID: cortypes.AccessTokenID(accessTokenIDs[0]),
|
||||
UserID: jcstypes.UserID(userID),
|
||||
AccessTokenID: jcstypes.AccessTokenID(accessTokenIDs[0]),
|
||||
Nonce: nonce[0],
|
||||
Signature: signature[0],
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
context "context"
|
||||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type HubService interface {
|
||||
|
@ -18,18 +18,18 @@ type HubService interface {
|
|||
}
|
||||
|
||||
type GetHubConfig struct {
|
||||
HubID cortypes.HubID `json:"hubID"`
|
||||
HubID jcstypes.HubID `json:"hubID"`
|
||||
}
|
||||
type GetHubConfigResp struct {
|
||||
Hub cortypes.Hub `json:"hub"`
|
||||
Hub jcstypes.Hub `json:"hub"`
|
||||
}
|
||||
|
||||
func ReqGetHubConfig(hubID cortypes.HubID) *GetHubConfig {
|
||||
func ReqGetHubConfig(hubID jcstypes.HubID) *GetHubConfig {
|
||||
return &GetHubConfig{
|
||||
HubID: hubID,
|
||||
}
|
||||
}
|
||||
func RespGetHubConfig(hub cortypes.Hub) *GetHubConfigResp {
|
||||
func RespGetHubConfig(hub jcstypes.Hub) *GetHubConfigResp {
|
||||
return &GetHubConfigResp{
|
||||
Hub: hub,
|
||||
}
|
||||
|
@ -43,23 +43,23 @@ func (s *Server) GetHubConfig(ctx context.Context, req *rpc.Request) (*rpc.Respo
|
|||
|
||||
// 获取指定节点的信息。如果HubIDs为nil,则返回所有Hub
|
||||
type GetHubs struct {
|
||||
HubIDs []cortypes.HubID `json:"hubIDs"`
|
||||
HubIDs []jcstypes.HubID `json:"hubIDs"`
|
||||
}
|
||||
type GetHubsResp struct {
|
||||
Hubs []*cortypes.Hub `json:"hubs"`
|
||||
Hubs []*jcstypes.Hub `json:"hubs"`
|
||||
}
|
||||
|
||||
func NewGetHubs(hubIDs []cortypes.HubID) *GetHubs {
|
||||
func NewGetHubs(hubIDs []jcstypes.HubID) *GetHubs {
|
||||
return &GetHubs{
|
||||
HubIDs: hubIDs,
|
||||
}
|
||||
}
|
||||
func NewGetHubsResp(hubs []*cortypes.Hub) *GetHubsResp {
|
||||
func NewGetHubsResp(hubs []*jcstypes.Hub) *GetHubsResp {
|
||||
return &GetHubsResp{
|
||||
Hubs: hubs,
|
||||
}
|
||||
}
|
||||
func (r *GetHubsResp) GetHub(id cortypes.HubID) *cortypes.Hub {
|
||||
func (r *GetHubsResp) GetHub(id jcstypes.HubID) *jcstypes.Hub {
|
||||
for _, n := range r.Hubs {
|
||||
if n.HubID == id {
|
||||
return n
|
||||
|
@ -78,18 +78,18 @@ func (s *Server) GetHubs(ctx context.Context, req *rpc.Request) (*rpc.Response,
|
|||
// 获取节点连通性信息
|
||||
|
||||
type GetHubConnectivities struct {
|
||||
HubIDs []cortypes.HubID `json:"hubIDs"`
|
||||
HubIDs []jcstypes.HubID `json:"hubIDs"`
|
||||
}
|
||||
type GetHubConnectivitiesResp struct {
|
||||
Connectivities []cortypes.HubConnectivity `json:"hubs"`
|
||||
Connectivities []jcstypes.HubConnectivity `json:"hubs"`
|
||||
}
|
||||
|
||||
func ReqGetHubConnectivities(hubIDs []cortypes.HubID) *GetHubConnectivities {
|
||||
func ReqGetHubConnectivities(hubIDs []jcstypes.HubID) *GetHubConnectivities {
|
||||
return &GetHubConnectivities{
|
||||
HubIDs: hubIDs,
|
||||
}
|
||||
}
|
||||
func RespGetHubConnectivities(cons []cortypes.HubConnectivity) *GetHubConnectivitiesResp {
|
||||
func RespGetHubConnectivities(cons []jcstypes.HubConnectivity) *GetHubConnectivitiesResp {
|
||||
return &GetHubConnectivitiesResp{
|
||||
Connectivities: cons,
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func (s *Server) GetHubConnectivities(ctx context.Context, req *rpc.Request) (*r
|
|||
|
||||
// 上报节点连通性信息
|
||||
type ReportHubConnectivity struct {
|
||||
Connecttivities []cortypes.HubConnectivity
|
||||
Connecttivities []jcstypes.HubConnectivity
|
||||
}
|
||||
type ReportHubConnectivityResp struct {
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
context "context"
|
||||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type StorageService interface {
|
||||
|
@ -13,10 +13,10 @@ type StorageService interface {
|
|||
|
||||
// 为指定的Storage选择一个适合通信的Hub
|
||||
type SelectStorageHub struct {
|
||||
Storages []cortypes.StorageType
|
||||
Storages []jcstypes.StorageType
|
||||
}
|
||||
type SelectStorageHubResp struct {
|
||||
Hubs []*cortypes.Hub
|
||||
Hubs []*jcstypes.Hub
|
||||
}
|
||||
|
||||
var _ = TokenAuth(Coordinator_SelectStorageHub_FullMethodName)
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
context "context"
|
||||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type UserService interface {
|
||||
|
@ -23,7 +23,7 @@ type UserLogin struct {
|
|||
Password string
|
||||
}
|
||||
type UserLoginResp struct {
|
||||
Token cortypes.UserAccessToken
|
||||
Token jcstypes.UserAccessToken
|
||||
PrivateKey string
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ func (s *Server) UserLogin(ctx context.Context, req *rpc.Request) (*rpc.Response
|
|||
// 客户端刷新Token,原始Token会继续有效。
|
||||
type UserRefreshToken struct{}
|
||||
type UserRefreshTokenResp struct {
|
||||
Token cortypes.UserAccessToken
|
||||
Token jcstypes.UserAccessToken
|
||||
PrivateKey string
|
||||
}
|
||||
|
||||
|
@ -76,12 +76,12 @@ func (s *Server) UserLogout(ctx context.Context, req *rpc.Request) (*rpc.Respons
|
|||
|
||||
// Hub服务加载AccessToken
|
||||
type HubLoadAccessToken struct {
|
||||
HubID cortypes.HubID
|
||||
UserID cortypes.UserID
|
||||
TokenID cortypes.AccessTokenID
|
||||
HubID jcstypes.HubID
|
||||
UserID jcstypes.UserID
|
||||
TokenID jcstypes.AccessTokenID
|
||||
}
|
||||
type HubLoadAccessTokenResp struct {
|
||||
Token cortypes.UserAccessToken
|
||||
Token jcstypes.UserAccessToken
|
||||
}
|
||||
|
||||
func (c *Client) HubLoadAccessToken(ctx context.Context, msg *HubLoadAccessToken) (*HubLoadAccessTokenResp, *rpc.CodeError) {
|
||||
|
|
|
@ -4,7 +4,7 @@ package hubrpc
|
|||
import (
|
||||
"context"
|
||||
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
)
|
||||
|
||||
|
@ -15,10 +15,10 @@ type CacheSvc interface {
|
|||
|
||||
// 获取Cache中文件列表
|
||||
type CheckCache struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
}
|
||||
type CheckCacheResp struct {
|
||||
FileHashes []jcsypes.FileHash
|
||||
FileHashes []jcstypes.FileHash
|
||||
}
|
||||
|
||||
func (c *Client) CheckCache(ctx context.Context, req *CheckCache) (*CheckCacheResp, *rpc.CodeError) {
|
||||
|
@ -33,8 +33,8 @@ func (s *Server) CheckCache(ctx context.Context, req *rpc.Request) (*rpc.Respons
|
|||
|
||||
// 清理Cache中不用的文件
|
||||
type CacheGC struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
Availables []jcsypes.FileHash
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Availables []jcstypes.FileHash
|
||||
}
|
||||
type CacheGCResp struct{}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
context "context"
|
||||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type UserSvc interface {
|
||||
|
@ -13,8 +13,8 @@ type UserSvc interface {
|
|||
|
||||
// 通知用户的Token登出
|
||||
type NotifyUserAccessTokenInvalid struct {
|
||||
UserID cortypes.UserID
|
||||
TokenID cortypes.AccessTokenID
|
||||
UserID jcstypes.UserID
|
||||
TokenID jcstypes.AccessTokenID
|
||||
}
|
||||
type NotifyUserAccessTokenInvalidResp struct{}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ package hubrpc
|
|||
import (
|
||||
"context"
|
||||
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc"
|
||||
stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
)
|
||||
|
@ -16,7 +16,7 @@ type UserSpaceSvc interface {
|
|||
|
||||
// 列出指定BaseStore的指定位置内的所有文件
|
||||
type BaseStoreListAll struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Path string
|
||||
}
|
||||
type BaseStoreListAllResp struct {
|
||||
|
@ -35,7 +35,7 @@ func (s *Server) BaseStoreListAll(ctx context.Context, req *rpc.Request) (*rpc.R
|
|||
|
||||
// 批量在指定BaseStore中创建文件夹
|
||||
type BaseStoreMkdirs struct {
|
||||
UserSpace jcsypes.UserSpaceDetail
|
||||
UserSpace jcstypes.UserSpaceDetail
|
||||
Pathes []string
|
||||
}
|
||||
|
||||
|
|
|
@ -6,22 +6,22 @@ import (
|
|||
"time"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/utils/math2"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type HubStorageTransferStats struct {
|
||||
data HubStorageTransferStatsData
|
||||
fromHubID cortypes.HubID
|
||||
fromHubID jcstypes.HubID
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
type HubStorageTransferStatsData struct {
|
||||
Entries map[cortypes.StorageID]*HubStorageTransferStatsEntry
|
||||
Entries map[jcstypes.StorageID]*HubStorageTransferStatsEntry
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
type HubStorageTransferStatsEntry struct {
|
||||
DestStorageID cortypes.StorageID
|
||||
DestStorageID jcstypes.StorageID
|
||||
|
||||
OutputBytes int64
|
||||
MaxOutputBytes int64
|
||||
|
@ -36,7 +36,7 @@ type HubStorageTransferStatsEntry struct {
|
|||
SuccessInput int64
|
||||
}
|
||||
|
||||
func (s *HubStorageTransferStats) RecordUpload(dstStorageID cortypes.StorageID, transferBytes int64, isSuccess bool) {
|
||||
func (s *HubStorageTransferStats) RecordUpload(dstStorageID jcstypes.StorageID, transferBytes int64, isSuccess bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
|
@ -58,7 +58,7 @@ func (s *HubStorageTransferStats) RecordUpload(dstStorageID cortypes.StorageID,
|
|||
e.TotalOutput++
|
||||
}
|
||||
|
||||
func (s *HubStorageTransferStats) RecordDownload(dstStorageID cortypes.StorageID, transferBytes int64, isSuccess bool) {
|
||||
func (s *HubStorageTransferStats) RecordDownload(dstStorageID jcstypes.StorageID, transferBytes int64, isSuccess bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
|
@ -83,7 +83,7 @@ func (s *HubStorageTransferStats) Reset() time.Time {
|
|||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
s.data.Entries = make(map[cortypes.StorageID]*HubStorageTransferStatsEntry)
|
||||
s.data.Entries = make(map[jcstypes.StorageID]*HubStorageTransferStatsEntry)
|
||||
s.data.StartTime = time.Now()
|
||||
return s.data.StartTime
|
||||
}
|
||||
|
@ -93,7 +93,7 @@ func (s *HubStorageTransferStats) DumpData() HubStorageTransferStatsData {
|
|||
defer s.lock.Unlock()
|
||||
|
||||
data := s.data
|
||||
data.Entries = make(map[cortypes.StorageID]*HubStorageTransferStatsEntry)
|
||||
data.Entries = make(map[jcstypes.StorageID]*HubStorageTransferStatsEntry)
|
||||
for k, v := range s.data.Entries {
|
||||
v2 := *v
|
||||
data.Entries[k] = &v2
|
||||
|
|
|
@ -6,22 +6,22 @@ import (
|
|||
"time"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/utils/math2"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type HubTransferStats struct {
|
||||
data HubTransferStatsData
|
||||
fromHubID cortypes.HubID
|
||||
fromHubID jcstypes.HubID
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
type HubTransferStatsData struct {
|
||||
Entries map[cortypes.HubID]*HubTransferStatsEntry
|
||||
Entries map[jcstypes.HubID]*HubTransferStatsEntry
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
type HubTransferStatsEntry struct {
|
||||
DestHubID cortypes.HubID
|
||||
DestHubID jcstypes.HubID
|
||||
|
||||
OutputBytes int64
|
||||
MaxOutputBytes int64
|
||||
|
@ -36,7 +36,7 @@ type HubTransferStatsEntry struct {
|
|||
SuccessInput int64
|
||||
}
|
||||
|
||||
func (s *HubTransferStats) RecordOutput(dstHubID cortypes.HubID, transferBytes int64, isSuccess bool) {
|
||||
func (s *HubTransferStats) RecordOutput(dstHubID jcstypes.HubID, transferBytes int64, isSuccess bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
|
@ -58,7 +58,7 @@ func (s *HubTransferStats) RecordOutput(dstHubID cortypes.HubID, transferBytes i
|
|||
e.TotalOutput++
|
||||
}
|
||||
|
||||
func (s *HubTransferStats) RecordInput(dstHubID cortypes.HubID, transferBytes int64, isSuccess bool) {
|
||||
func (s *HubTransferStats) RecordInput(dstHubID jcstypes.HubID, transferBytes int64, isSuccess bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
|
@ -85,7 +85,7 @@ func (s *HubTransferStats) Reset() time.Time {
|
|||
defer s.lock.Unlock()
|
||||
|
||||
s.data.StartTime = time.Now()
|
||||
s.data.Entries = make(map[cortypes.HubID]*HubTransferStatsEntry)
|
||||
s.data.Entries = make(map[jcstypes.HubID]*HubTransferStatsEntry)
|
||||
return s.data.StartTime
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ func (s *HubTransferStats) DumpData() HubTransferStatsData {
|
|||
defer s.lock.Unlock()
|
||||
|
||||
data := s.data
|
||||
data.Entries = make(map[cortypes.HubID]*HubTransferStatsEntry)
|
||||
data.Entries = make(map[jcstypes.HubID]*HubTransferStatsEntry)
|
||||
for k, v := range s.data.Entries {
|
||||
v2 := *v
|
||||
data.Entries[k] = &v2
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type StatsHost struct {
|
||||
|
@ -14,24 +14,24 @@ type StatsHost struct {
|
|||
HubStorageTransfer *HubStorageTransferStats
|
||||
}
|
||||
|
||||
func (h *StatsHost) SetupHubTransfer(fromHubID cortypes.HubID) {
|
||||
func (h *StatsHost) SetupHubTransfer(fromHubID jcstypes.HubID) {
|
||||
h.HubTransfer = &HubTransferStats{
|
||||
fromHubID: fromHubID,
|
||||
lock: &sync.Mutex{},
|
||||
data: HubTransferStatsData{
|
||||
StartTime: time.Now(),
|
||||
Entries: make(map[cortypes.HubID]*HubTransferStatsEntry),
|
||||
Entries: make(map[jcstypes.HubID]*HubTransferStatsEntry),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *StatsHost) SetupHubStorageTransfer(fromHubID cortypes.HubID) {
|
||||
func (h *StatsHost) SetupHubStorageTransfer(fromHubID jcstypes.HubID) {
|
||||
h.HubStorageTransfer = &HubStorageTransferStats{
|
||||
fromHubID: fromHubID,
|
||||
lock: &sync.Mutex{},
|
||||
data: HubStorageTransferStatsData{
|
||||
StartTime: time.Now(),
|
||||
Entries: make(map[cortypes.StorageID]*HubStorageTransferStatsEntry),
|
||||
Entries: make(map[jcstypes.StorageID]*HubStorageTransferStatsEntry),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,14 +10,13 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/os2"
|
||||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type ECMultiplier struct {
|
||||
blder *builder
|
||||
url string
|
||||
feat *cortypes.ECMultiplierFeature
|
||||
feat *jcstypes.ECMultiplierFeature
|
||||
outputs []string
|
||||
}
|
||||
|
||||
|
@ -98,9 +97,9 @@ func (m *ECMultiplier) Multiply(coef [][]byte, inputs []types.HTTPRequest, chunk
|
|||
for i, data := range r.Data {
|
||||
ret[i] = types.FileInfo{
|
||||
// TODO 要确认一下output的格式
|
||||
Path: jcsypes.PathFromJcsPathString(m.outputs[i]),
|
||||
Path: jcstypes.PathFromJcsPathString(m.outputs[i]),
|
||||
Size: data.Size,
|
||||
Hash: jcsypes.NewFullHashFromString(data.Sha256),
|
||||
Hash: jcstypes.NewFullHashFromString(data.Sha256),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,12 +10,11 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reg.RegisterBuilder[*cortypes.EFileType](func(detail *jcsypes.UserSpaceDetail) types.StorageBuilder {
|
||||
reg.RegisterBuilder[*jcstypes.EFileType](func(detail *jcstypes.UserSpaceDetail) types.StorageBuilder {
|
||||
return &builder{
|
||||
detail: detail,
|
||||
}
|
||||
|
@ -24,15 +23,15 @@ func init() {
|
|||
|
||||
type builder struct {
|
||||
types.EmptyBuilder
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
token string
|
||||
tokenLock sync.Mutex
|
||||
getTokenTime time.Time
|
||||
}
|
||||
|
||||
func (b *builder) getToken() (string, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.EFileType)
|
||||
cred := b.detail.UserSpace.Credential.(*cortypes.EFileCred)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.EFileType)
|
||||
cred := b.detail.UserSpace.Credential.(*jcstypes.EFileCred)
|
||||
|
||||
b.tokenLock.Lock()
|
||||
defer b.tokenLock.Unlock()
|
||||
|
@ -91,12 +90,12 @@ func (b *builder) getToken() (string, error) {
|
|||
}
|
||||
|
||||
func (b *builder) CreateECMultiplier(typeOnly bool) (types.ECMultiplier, error) {
|
||||
feat := types.FindFeature[*cortypes.ECMultiplierFeature](b.detail)
|
||||
feat := types.FindFeature[*jcstypes.ECMultiplierFeature](b.detail)
|
||||
if feat == nil {
|
||||
return nil, fmt.Errorf("feature ECMultiplier not found")
|
||||
}
|
||||
|
||||
cred, ok := b.detail.UserSpace.Credential.(*cortypes.EFileCred)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*jcstypes.EFileCred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for efile storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
|
|
@ -6,12 +6,12 @@ import (
|
|||
_ "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
// 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder,
|
||||
// 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查)
|
||||
func GetBuilder(detail *jcsypes.UserSpaceDetail) types.StorageBuilder {
|
||||
func GetBuilder(detail *jcstypes.UserSpaceDetail) types.StorageBuilder {
|
||||
typ := reflect.TypeOf(detail.UserSpace.Storage)
|
||||
|
||||
ctor, ok := reg.StorageBuilders[typ]
|
||||
|
|
|
@ -5,23 +5,22 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/common/utils/reflect2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type BuilderCtor func(detail *jcsypes.UserSpaceDetail) types.StorageBuilder
|
||||
type BuilderCtor func(detail *jcstypes.UserSpaceDetail) types.StorageBuilder
|
||||
|
||||
var StorageBuilders = make(map[reflect.Type]BuilderCtor)
|
||||
|
||||
// 注册针对指定存储服务类型的Builder
|
||||
func RegisterBuilder[T cortypes.StorageType](ctor BuilderCtor) {
|
||||
func RegisterBuilder[T jcstypes.StorageType](ctor BuilderCtor) {
|
||||
StorageBuilders[reflect2.TypeOf[T]()] = ctor
|
||||
}
|
||||
|
||||
// 注:此函数只给storage包内部使用,外部包请使用外层的factory.GetBuilder
|
||||
// 此函数永远不会返回nil。如果找不到对应的Builder,则会返回EmptyBuilder,
|
||||
// 此Builder的所有函数都会返回否定值或者封装后的ErrUnsupported错误(需要使用errors.Is检查)
|
||||
func GetBuilderInternal(detail *jcsypes.UserSpaceDetail) types.StorageBuilder {
|
||||
func GetBuilderInternal(detail *jcstypes.UserSpaceDetail) types.StorageBuilder {
|
||||
typ := reflect.TypeOf(detail.UserSpace.Storage)
|
||||
|
||||
ctor, ok := StorageBuilders[typ]
|
||||
|
|
|
@ -9,22 +9,22 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
"gitlink.org.cn/cloudream/common/utils/io2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type BaseStore struct {
|
||||
root string
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
}
|
||||
|
||||
func NewBaseStore(root string, detail *jcsypes.UserSpaceDetail) (*BaseStore, error) {
|
||||
func NewBaseStore(root string, detail *jcstypes.UserSpaceDetail) (*BaseStore, error) {
|
||||
return &BaseStore{
|
||||
root: root,
|
||||
detail: detail,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) {
|
||||
func (s *BaseStore) Write(pat jcstypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) {
|
||||
log := s.getLogger()
|
||||
|
||||
absObjPath := filepath.Join(s.root, pat.String())
|
||||
|
@ -58,11 +58,11 @@ func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOp
|
|||
return types.FileInfo{
|
||||
Path: pat,
|
||||
Size: counter.Count(),
|
||||
Hash: jcsypes.NewFullHash(hasher.Sum()),
|
||||
Hash: jcstypes.NewFullHash(hasher.Sum()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *BaseStore) Read(objPath jcsypes.JPath, opt types.OpenOption) (io.ReadCloser, error) {
|
||||
func (s *BaseStore) Read(objPath jcstypes.JPath, opt types.OpenOption) (io.ReadCloser, error) {
|
||||
absObjPath := filepath.Join(s.root, objPath.JoinOSPath())
|
||||
file, err := os.Open(absObjPath)
|
||||
if err != nil {
|
||||
|
@ -86,7 +86,7 @@ func (s *BaseStore) Read(objPath jcsypes.JPath, opt types.OpenOption) (io.ReadCl
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (s *BaseStore) Mkdir(path jcsypes.JPath) error {
|
||||
func (s *BaseStore) Mkdir(path jcstypes.JPath) error {
|
||||
absObjPath := filepath.Join(s.root, path.JoinOSPath())
|
||||
err := os.MkdirAll(absObjPath, 0755)
|
||||
if err != nil {
|
||||
|
@ -96,7 +96,7 @@ func (s *BaseStore) Mkdir(path jcsypes.JPath) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *BaseStore) ReadDir(pat jcsypes.JPath) types.DirReader {
|
||||
func (s *BaseStore) ReadDir(pat jcstypes.JPath) types.DirReader {
|
||||
return &DirReader{
|
||||
absRootPath: filepath.Join(s.root, pat.JoinOSPath()),
|
||||
rootJPath: pat.Clone(),
|
||||
|
|
|
@ -6,14 +6,14 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type DirReader struct {
|
||||
// 完整的根路径(包括ReadDir的path参数),比如包括了盘符
|
||||
absRootPath string
|
||||
// ReadDir函数传递进来的path参数
|
||||
rootJPath jcsypes.JPath
|
||||
rootJPath jcstypes.JPath
|
||||
init bool
|
||||
curEntries []dirEntry
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func (r *DirReader) Next() (types.DirEntry, error) {
|
|||
|
||||
for _, e := range es {
|
||||
r.curEntries = append(r.curEntries, dirEntry{
|
||||
dir: jcsypes.JPath{},
|
||||
dir: jcstypes.JPath{},
|
||||
entry: e,
|
||||
})
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ func (r *DirReader) Close() {
|
|||
}
|
||||
|
||||
type dirEntry struct {
|
||||
dir jcsypes.JPath
|
||||
dir jcstypes.JPath
|
||||
entry os.DirEntry
|
||||
}
|
||||
|
||||
|
|
|
@ -5,12 +5,11 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reg.RegisterBuilder[*cortypes.LocalType](func(detail *jcsypes.UserSpaceDetail) types.StorageBuilder {
|
||||
reg.RegisterBuilder[*jcstypes.LocalType](func(detail *jcstypes.UserSpaceDetail) types.StorageBuilder {
|
||||
return &builder{
|
||||
detail: detail,
|
||||
}
|
||||
|
@ -19,7 +18,7 @@ func init() {
|
|||
|
||||
type builder struct {
|
||||
types.EmptyBuilder
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
}
|
||||
|
||||
func (b *builder) FeatureDesc() types.FeatureDesc {
|
||||
|
@ -27,7 +26,7 @@ func (b *builder) FeatureDesc() types.FeatureDesc {
|
|||
}
|
||||
|
||||
func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) {
|
||||
cred, ok := b.detail.UserSpace.Credential.(*cortypes.LocalCred)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*jcstypes.LocalCred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for local storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
@ -40,7 +39,7 @@ func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) {
|
|||
}
|
||||
|
||||
func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) {
|
||||
cred, ok := b.detail.UserSpace.Credential.(*cortypes.LocalCred)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*jcstypes.LocalCred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for local storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
@ -53,9 +52,9 @@ func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) {
|
|||
}
|
||||
|
||||
func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) {
|
||||
feat := types.FindFeature[*cortypes.MultipartUploadFeature](b.detail)
|
||||
feat := types.FindFeature[*jcstypes.MultipartUploadFeature](b.detail)
|
||||
if feat == nil {
|
||||
return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{})
|
||||
return nil, fmt.Errorf("feature %T not found", jcstypes.MultipartUploadFeature{})
|
||||
}
|
||||
|
||||
if typeOnly {
|
||||
|
@ -68,9 +67,9 @@ func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) {
|
|||
}
|
||||
|
||||
func (b *builder) CreateS2STransfer(typeOnly bool) (types.S2STransfer, error) {
|
||||
feat := types.FindFeature[*cortypes.S2STransferFeature](b.detail)
|
||||
feat := types.FindFeature[*jcstypes.S2STransferFeature](b.detail)
|
||||
if feat == nil {
|
||||
return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{})
|
||||
return nil, fmt.Errorf("feature %T not found", jcstypes.S2STransferFeature{})
|
||||
}
|
||||
|
||||
if typeOnly {
|
||||
|
|
|
@ -13,14 +13,13 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/os2"
|
||||
"gitlink.org.cn/cloudream/common/utils/sort2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type Multiparter struct {
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
localStg *cortypes.LocalCred
|
||||
feat *cortypes.MultipartUploadFeature
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
localStg *jcstypes.LocalCred
|
||||
feat *jcstypes.MultipartUploadFeature
|
||||
}
|
||||
|
||||
func (*Multiparter) MinPartSize() int64 {
|
||||
|
@ -80,7 +79,7 @@ type MultipartTask struct {
|
|||
absTempDir string // 应该要是绝对路径
|
||||
tempFileName string
|
||||
tempPartsDir string
|
||||
joinedFileJPath jcsypes.JPath
|
||||
joinedFileJPath jcstypes.JPath
|
||||
absJoinedFilePath string
|
||||
uploadID string
|
||||
}
|
||||
|
@ -118,7 +117,7 @@ func (i *MultipartTask) JoinParts(ctx context.Context, parts []types.UploadedPar
|
|||
return types.FileInfo{
|
||||
Path: i.joinedFileJPath,
|
||||
Size: size,
|
||||
Hash: jcsypes.NewFullHash(h),
|
||||
Hash: jcstypes.NewFullHash(h),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -7,24 +7,23 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type S2STransfer struct {
|
||||
feat *cortypes.S2STransferFeature
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
localStg *cortypes.LocalCred
|
||||
dstPath jcsypes.JPath
|
||||
feat *jcstypes.S2STransferFeature
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
localStg *jcstypes.LocalCred
|
||||
dstPath jcstypes.JPath
|
||||
}
|
||||
|
||||
// 只有同一个机器的存储之间才可以进行数据直传
|
||||
func (*S2STransfer) CanTransfer(src, dst *jcsypes.UserSpaceDetail) bool {
|
||||
if types.FindFeature[*cortypes.S2STransferFeature](dst) == nil {
|
||||
func (*S2STransfer) CanTransfer(src, dst *jcstypes.UserSpaceDetail) bool {
|
||||
if types.FindFeature[*jcstypes.S2STransferFeature](dst) == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := src.UserSpace.Storage.(*cortypes.LocalType)
|
||||
_, ok := src.UserSpace.Storage.(*jcstypes.LocalType)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
@ -37,7 +36,7 @@ func (*S2STransfer) CanTransfer(src, dst *jcsypes.UserSpaceDetail) bool {
|
|||
}
|
||||
|
||||
// 执行数据直传
|
||||
func (s *S2STransfer) Transfer(ctx context.Context, src *jcsypes.UserSpaceDetail, srcPath jcsypes.JPath, dstPath jcsypes.JPath) (types.FileInfo, error) {
|
||||
func (s *S2STransfer) Transfer(ctx context.Context, src *jcstypes.UserSpaceDetail, srcPath jcstypes.JPath, dstPath jcstypes.JPath) (types.FileInfo, error) {
|
||||
s.dstPath = dstPath
|
||||
|
||||
copy, err := os.OpenFile(filepath.Join(s.localStg.RootDir, s.dstPath.JoinOSPath()), os.O_WRONLY|os.O_CREATE, 0644)
|
||||
|
|
|
@ -10,18 +10,18 @@ import (
|
|||
|
||||
"gitlink.org.cn/cloudream/common/pkgs/logger"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type ShardStore struct {
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
stgRoot string
|
||||
storeAbsRoot string
|
||||
lock sync.Mutex
|
||||
done chan any
|
||||
}
|
||||
|
||||
func NewShardStore(root string, detail *jcsypes.UserSpaceDetail) (*ShardStore, error) {
|
||||
func NewShardStore(root string, detail *jcstypes.UserSpaceDetail) (*ShardStore, error) {
|
||||
storeAbsRoot, err := filepath.Abs(filepath.Join(root, detail.UserSpace.WorkingDir.JoinOSPath(), types.ShardStoreWorkingDir))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get abs root: %w", err)
|
||||
|
@ -43,7 +43,7 @@ func (s *ShardStore) Stop() {
|
|||
s.getLogger().Infof("component stop")
|
||||
}
|
||||
|
||||
func (s *ShardStore) Store(path jcsypes.JPath, hash jcsypes.FileHash, size int64) (types.FileInfo, error) {
|
||||
func (s *ShardStore) Store(path jcstypes.JPath, hash jcstypes.FileHash, size int64) (types.FileInfo, error) {
|
||||
fullTempPath := filepath.Join(s.stgRoot, path.JoinOSPath())
|
||||
|
||||
s.lock.Lock()
|
||||
|
@ -81,7 +81,7 @@ func (s *ShardStore) Store(path jcsypes.JPath, hash jcsypes.FileHash, size int64
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *ShardStore) Info(hash jcsypes.FileHash) (types.FileInfo, error) {
|
||||
func (s *ShardStore) Info(hash jcstypes.FileHash) (types.FileInfo, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
|
@ -118,7 +118,7 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) {
|
|||
return err
|
||||
}
|
||||
|
||||
fileHash, err := jcsypes.ParseHash(filepath.Base(info.Name()))
|
||||
fileHash, err := jcstypes.ParseHash(filepath.Base(info.Name()))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -137,11 +137,11 @@ func (s *ShardStore) ListAll() ([]types.FileInfo, error) {
|
|||
return infos, nil
|
||||
}
|
||||
|
||||
func (s *ShardStore) GC(avaiables []jcsypes.FileHash) error {
|
||||
func (s *ShardStore) GC(avaiables []jcstypes.FileHash) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
avais := make(map[jcsypes.FileHash]bool)
|
||||
avais := make(map[jcstypes.FileHash]bool)
|
||||
for _, hash := range avaiables {
|
||||
avais[hash] = true
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ func (s *ShardStore) GC(avaiables []jcsypes.FileHash) error {
|
|||
return err
|
||||
}
|
||||
|
||||
fileHash, err := jcsypes.ParseHash(filepath.Base(info.Name()))
|
||||
fileHash, err := jcstypes.ParseHash(filepath.Base(info.Name()))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -199,14 +199,14 @@ func (s *ShardStore) getLogger() logger.Logger {
|
|||
return logger.WithField("ShardStore", "Local").WithField("Storage", s.detail.UserSpace.Storage.String())
|
||||
}
|
||||
|
||||
func (s *ShardStore) getFileDirFromHash(hash jcsypes.FileHash) string {
|
||||
func (s *ShardStore) getFileDirFromHash(hash jcstypes.FileHash) string {
|
||||
return filepath.Join(s.storeAbsRoot, hash.GetHashPrefix(2))
|
||||
}
|
||||
|
||||
func (s *ShardStore) getFilePathFromHash(hash jcsypes.FileHash) string {
|
||||
func (s *ShardStore) getFilePathFromHash(hash jcstypes.FileHash) string {
|
||||
return filepath.Join(s.storeAbsRoot, hash.GetHashPrefix(2), string(hash))
|
||||
}
|
||||
|
||||
func (s *ShardStore) getJPathFromHash(hash jcsypes.FileHash) jcsypes.JPath {
|
||||
func (s *ShardStore) getJPathFromHash(hash jcstypes.FileHash) jcstypes.JPath {
|
||||
return s.detail.UserSpace.WorkingDir.ConcatCompsNew(types.ShardStoreWorkingDir, hash.GetHashPrefix(2), string(hash))
|
||||
}
|
||||
|
|
|
@ -9,20 +9,19 @@ import (
|
|||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg"
|
||||
s3stg "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/s3"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reg.RegisterBuilder[*cortypes.OBSType](newBuilder)
|
||||
reg.RegisterBuilder[*jcstypes.OBSType](newBuilder)
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
types.EmptyBuilder
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
}
|
||||
|
||||
func newBuilder(detail *jcsypes.UserSpaceDetail) types.StorageBuilder {
|
||||
func newBuilder(detail *jcstypes.UserSpaceDetail) types.StorageBuilder {
|
||||
return &builder{
|
||||
detail: detail,
|
||||
}
|
||||
|
@ -33,8 +32,8 @@ func (b *builder) FeatureDesc() types.FeatureDesc {
|
|||
}
|
||||
|
||||
func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
@ -52,8 +51,8 @@ func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) {
|
|||
}
|
||||
|
||||
func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
@ -70,7 +69,7 @@ func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) {
|
|||
return s3stg.NewBaseStore(b.detail, cli, bucket, s3stg.BaseStoreOption{UseAWSSha256: false})
|
||||
}
|
||||
|
||||
func createClient(stgType *cortypes.OBSType, cred *cortypes.OBSCred) (*s3.Client, string, error) {
|
||||
func createClient(stgType *jcstypes.OBSType, cred *jcstypes.OBSCred) (*s3.Client, string, error) {
|
||||
awsConfig := aws.Config{}
|
||||
|
||||
cre := aws.Credentials{
|
||||
|
@ -91,17 +90,17 @@ func createClient(stgType *cortypes.OBSType, cred *cortypes.OBSCred) (*s3.Client
|
|||
}
|
||||
|
||||
func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType)
|
||||
feat := types.FindFeature[*cortypes.MultipartUploadFeature](b.detail)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType)
|
||||
feat := types.FindFeature[*jcstypes.MultipartUploadFeature](b.detail)
|
||||
if feat == nil {
|
||||
return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{})
|
||||
return nil, fmt.Errorf("feature %T not found", jcstypes.MultipartUploadFeature{})
|
||||
}
|
||||
|
||||
if typeOnly {
|
||||
return (*s3stg.Multiparter)(nil), nil
|
||||
}
|
||||
|
||||
cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
@ -120,17 +119,17 @@ func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) {
|
|||
}
|
||||
|
||||
func (b *builder) CreateS2STransfer(typeOnly bool) (types.S2STransfer, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.OBSType)
|
||||
feat := types.FindFeature[*cortypes.S2STransferFeature](b.detail)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.OBSType)
|
||||
feat := types.FindFeature[*jcstypes.S2STransferFeature](b.detail)
|
||||
if feat == nil {
|
||||
return nil, fmt.Errorf("feature %T not found", cortypes.S2STransferFeature{})
|
||||
return nil, fmt.Errorf("feature %T not found", jcstypes.S2STransferFeature{})
|
||||
}
|
||||
|
||||
if typeOnly {
|
||||
return (*S2STransfer)(nil), nil
|
||||
}
|
||||
|
||||
cred, ok := b.detail.UserSpace.Credential.(*cortypes.OBSCred)
|
||||
cred, ok := b.detail.UserSpace.Credential.(*jcstypes.OBSCred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for obs storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
|
|
@ -5,40 +5,39 @@ import (
|
|||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func Test_S2S(t *testing.T) {
|
||||
Convey("OBS", t, func() {
|
||||
s2s := S2STransfer{
|
||||
stgType: &cortypes.OBSType{
|
||||
stgType: &jcstypes.OBSType{
|
||||
Region: "cn-north-4",
|
||||
Endpoint: "obs.cn-north-4.myhuaweicloud.com",
|
||||
Bucket: "pcm3-bucket3",
|
||||
ProjectID: "",
|
||||
},
|
||||
cred: &cortypes.OBSCred{
|
||||
cred: &jcstypes.OBSCred{
|
||||
AK: "",
|
||||
SK: "",
|
||||
},
|
||||
feat: &cortypes.S2STransferFeature{},
|
||||
feat: &jcstypes.S2STransferFeature{},
|
||||
}
|
||||
|
||||
_, err := s2s.Transfer(context.TODO(), &jcsypes.UserSpaceDetail{
|
||||
UserSpace: jcsypes.UserSpace{
|
||||
Storage: &cortypes.OBSType{
|
||||
_, err := s2s.Transfer(context.TODO(), &jcstypes.UserSpaceDetail{
|
||||
UserSpace: jcstypes.UserSpace{
|
||||
Storage: &jcstypes.OBSType{
|
||||
Region: "cn-north-4",
|
||||
Endpoint: "obs.cn-north-4.myhuaweicloud.com",
|
||||
Bucket: "pcm2-bucket2",
|
||||
ProjectID: "",
|
||||
},
|
||||
Credential: &cortypes.OBSCred{
|
||||
Credential: &jcstypes.OBSCred{
|
||||
AK: "",
|
||||
SK: "",
|
||||
},
|
||||
},
|
||||
}, jcsypes.PathFromComps("test_data/test03.txt"), jcsypes.PathFromComps("atest.txt"))
|
||||
}, jcstypes.PathFromComps("test_data/test03.txt"), jcstypes.PathFromComps("atest.txt"))
|
||||
defer s2s.Close()
|
||||
|
||||
So(err, ShouldEqual, nil)
|
||||
|
|
|
@ -15,20 +15,19 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/os2"
|
||||
stgs3 "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/s3"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type S2STransfer struct {
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
stgType *cortypes.OBSType
|
||||
cred *cortypes.OBSCred
|
||||
feat *cortypes.S2STransferFeature
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
stgType *jcstypes.OBSType
|
||||
cred *jcstypes.OBSCred
|
||||
feat *jcstypes.S2STransferFeature
|
||||
taskID *int64
|
||||
omsCli *oms.OmsClient
|
||||
}
|
||||
|
||||
func NewS2STransfer(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType, cred *cortypes.OBSCred, feat *cortypes.S2STransferFeature) *S2STransfer {
|
||||
func NewS2STransfer(detail *jcstypes.UserSpaceDetail, stgType *jcstypes.OBSType, cred *jcstypes.OBSCred, feat *jcstypes.S2STransferFeature) *S2STransfer {
|
||||
return &S2STransfer{
|
||||
detail: detail,
|
||||
stgType: stgType,
|
||||
|
@ -38,13 +37,13 @@ func NewS2STransfer(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType,
|
|||
}
|
||||
|
||||
// 判断是否能从指定的源存储中直传到当前存储的目的路径
|
||||
func (*S2STransfer) CanTransfer(src, dst *jcsypes.UserSpaceDetail) bool {
|
||||
req := makeRequest(src, jcsypes.JPath{})
|
||||
func (*S2STransfer) CanTransfer(src, dst *jcstypes.UserSpaceDetail) bool {
|
||||
req := makeRequest(src, jcstypes.JPath{})
|
||||
return req != nil
|
||||
}
|
||||
|
||||
// 执行数据直传。返回传输后的文件路径
|
||||
func (s *S2STransfer) Transfer(ctx context.Context, src *jcsypes.UserSpaceDetail, srcPath jcsypes.JPath, dstPath jcsypes.JPath) (types.FileInfo, error) {
|
||||
func (s *S2STransfer) Transfer(ctx context.Context, src *jcstypes.UserSpaceDetail, srcPath jcstypes.JPath, dstPath jcstypes.JPath) (types.FileInfo, error) {
|
||||
req := makeRequest(src, srcPath)
|
||||
if req == nil {
|
||||
return types.FileInfo{}, fmt.Errorf("unsupported source storage type: %T", src.UserSpace.Storage)
|
||||
|
@ -178,12 +177,12 @@ func (s *S2STransfer) Close() {
|
|||
}
|
||||
}
|
||||
|
||||
func makeRequest(srcStg *jcsypes.UserSpaceDetail, srcPath jcsypes.JPath) *model.SrcNodeReq {
|
||||
func makeRequest(srcStg *jcstypes.UserSpaceDetail, srcPath jcstypes.JPath) *model.SrcNodeReq {
|
||||
switch srcType := srcStg.UserSpace.Storage.(type) {
|
||||
case *cortypes.OBSType:
|
||||
case *jcstypes.OBSType:
|
||||
cloudType := "HuaweiCloud"
|
||||
|
||||
cred, ok := srcStg.UserSpace.Credential.(*cortypes.OBSCred)
|
||||
cred, ok := srcStg.UserSpace.Credential.(*jcstypes.OBSCred)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -5,17 +5,16 @@ import (
|
|||
"github.com/huaweicloud/huaweicloud-sdk-go-obs/obs"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/s3"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type ShardStore struct {
|
||||
*s3.ShardStore
|
||||
stgType *cortypes.OBSType
|
||||
cred *cortypes.OBSCred
|
||||
stgType *jcstypes.OBSType
|
||||
cred *jcstypes.OBSCred
|
||||
}
|
||||
|
||||
func NewShardStore(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType, cred *cortypes.OBSCred, s3Cli *awss3.Client, bkt string) (*ShardStore, error) {
|
||||
func NewShardStore(detail *jcstypes.UserSpaceDetail, stgType *jcstypes.OBSType, cred *jcstypes.OBSCred, s3Cli *awss3.Client, bkt string) (*ShardStore, error) {
|
||||
sd := ShardStore{
|
||||
stgType: stgType,
|
||||
cred: cred,
|
||||
|
@ -32,7 +31,7 @@ func NewShardStore(detail *jcsypes.UserSpaceDetail, stgType *cortypes.OBSType, c
|
|||
return &sd, nil
|
||||
}
|
||||
|
||||
func (s *ShardStore) MakeHTTPReadRequest(fileHash jcsypes.FileHash) (types.HTTPRequest, error) {
|
||||
func (s *ShardStore) MakeHTTPReadRequest(fileHash jcstypes.FileHash) (types.HTTPRequest, error) {
|
||||
cli, err := obs.New(s.cred.AK, s.cred.SK, s.stgType.Endpoint)
|
||||
if err != nil {
|
||||
return types.HTTPRequest{}, err
|
||||
|
|
|
@ -6,12 +6,11 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/async"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type userSpace struct {
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
store types.ShardStore
|
||||
}
|
||||
|
||||
|
@ -23,8 +22,8 @@ func (u *userSpace) Drop() {
|
|||
}
|
||||
|
||||
type userSpaceKey struct {
|
||||
UserID cortypes.UserID
|
||||
UserSpaceID jcsypes.UserSpaceID
|
||||
UserID jcstypes.UserID
|
||||
UserSpaceID jcstypes.UserSpaceID
|
||||
}
|
||||
|
||||
type Pool struct {
|
||||
|
@ -40,7 +39,7 @@ func NewPool() *Pool {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *Pool) Drop(userID cortypes.UserID, spaceID jcsypes.UserSpaceID) {
|
||||
func (p *Pool) Drop(userID jcstypes.UserID, spaceID jcstypes.UserSpaceID) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
|
@ -56,7 +55,7 @@ func (p *Pool) Drop(userID cortypes.UserID, spaceID jcsypes.UserSpaceID) {
|
|||
delete(p.spaces, key)
|
||||
}
|
||||
|
||||
func (p *Pool) GetShardStore(spaceDetail *jcsypes.UserSpaceDetail) (types.ShardStore, error) {
|
||||
func (p *Pool) GetShardStore(spaceDetail *jcstypes.UserSpaceDetail) (types.ShardStore, error) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
|
@ -91,18 +90,18 @@ func (p *Pool) GetShardStore(spaceDetail *jcsypes.UserSpaceDetail) (types.ShardS
|
|||
return space.store, nil
|
||||
}
|
||||
|
||||
func (p *Pool) GetBaseStore(spaceDetail *jcsypes.UserSpaceDetail) (types.BaseStore, error) {
|
||||
func (p *Pool) GetBaseStore(spaceDetail *jcstypes.UserSpaceDetail) (types.BaseStore, error) {
|
||||
return factory.GetBuilder(spaceDetail).CreateBaseStore(false)
|
||||
}
|
||||
|
||||
func (p *Pool) GetMultiparter(spaceDetail *jcsypes.UserSpaceDetail) (types.Multiparter, error) {
|
||||
func (p *Pool) GetMultiparter(spaceDetail *jcstypes.UserSpaceDetail) (types.Multiparter, error) {
|
||||
return factory.GetBuilder(spaceDetail).CreateMultiparter(false)
|
||||
}
|
||||
|
||||
func (p *Pool) GetS2STransfer(spaceDetail *jcsypes.UserSpaceDetail) (types.S2STransfer, error) {
|
||||
func (p *Pool) GetS2STransfer(spaceDetail *jcstypes.UserSpaceDetail) (types.S2STransfer, error) {
|
||||
return factory.GetBuilder(spaceDetail).CreateS2STransfer(false)
|
||||
}
|
||||
|
||||
func (p *Pool) GetECMultiplier(spaceDetail *jcsypes.UserSpaceDetail) (types.ECMultiplier, error) {
|
||||
func (p *Pool) GetECMultiplier(spaceDetail *jcstypes.UserSpaceDetail) (types.ECMultiplier, error) {
|
||||
return factory.GetBuilder(spaceDetail).CreateECMultiplier(false)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/io2"
|
||||
"gitlink.org.cn/cloudream/common/utils/math2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -24,7 +24,7 @@ const (
|
|||
)
|
||||
|
||||
type BaseStore struct {
|
||||
Detail *jcsypes.UserSpaceDetail
|
||||
Detail *jcstypes.UserSpaceDetail
|
||||
Bucket string
|
||||
cli *s3.Client
|
||||
opt BaseStoreOption
|
||||
|
@ -34,7 +34,7 @@ type BaseStoreOption struct {
|
|||
UseAWSSha256 bool // 能否直接使用AWS提供的SHA256校验,如果不行,则使用本地计算。默认使用本地计算。
|
||||
}
|
||||
|
||||
func NewBaseStore(detail *jcsypes.UserSpaceDetail, cli *s3.Client, bkt string, opt BaseStoreOption) (*BaseStore, error) {
|
||||
func NewBaseStore(detail *jcstypes.UserSpaceDetail, cli *s3.Client, bkt string, opt BaseStoreOption) (*BaseStore, error) {
|
||||
return &BaseStore{
|
||||
Detail: detail,
|
||||
Bucket: bkt,
|
||||
|
@ -43,7 +43,7 @@ func NewBaseStore(detail *jcsypes.UserSpaceDetail, cli *s3.Client, bkt string, o
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) {
|
||||
func (s *BaseStore) Write(pat jcstypes.JPath, stream io.Reader, opt types.WriteOption) (types.FileInfo, error) {
|
||||
key := pat
|
||||
meta := make(map[string]string)
|
||||
if opt.ModTime.IsZero() {
|
||||
|
@ -81,7 +81,7 @@ func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOp
|
|||
|
||||
return types.FileInfo{
|
||||
Path: key,
|
||||
Hash: jcsypes.NewFullHash(hash),
|
||||
Hash: jcstypes.NewFullHash(hash),
|
||||
Size: counter.Count(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -99,12 +99,12 @@ func (s *BaseStore) Write(pat jcsypes.JPath, stream io.Reader, opt types.WriteOp
|
|||
|
||||
return types.FileInfo{
|
||||
Path: key,
|
||||
Hash: jcsypes.NewFullHash(hashStr.Sum()),
|
||||
Hash: jcstypes.NewFullHash(hashStr.Sum()),
|
||||
Size: counter.Count(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *BaseStore) Read(pat jcsypes.JPath, opt types.OpenOption) (io.ReadCloser, error) {
|
||||
func (s *BaseStore) Read(pat jcstypes.JPath, opt types.OpenOption) (io.ReadCloser, error) {
|
||||
key := pat
|
||||
|
||||
input := &s3.GetObjectInput{
|
||||
|
@ -128,7 +128,7 @@ func (s *BaseStore) Read(pat jcsypes.JPath, opt types.OpenOption) (io.ReadCloser
|
|||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func (s *BaseStore) Mkdir(path jcsypes.JPath) error {
|
||||
func (s *BaseStore) Mkdir(path jcstypes.JPath) error {
|
||||
_, err := s.cli.PutObject(context.TODO(), &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.Bucket),
|
||||
Key: aws.String(path.String() + "/"),
|
||||
|
@ -137,7 +137,7 @@ func (s *BaseStore) Mkdir(path jcsypes.JPath) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *BaseStore) ReadDir(path jcsypes.JPath) types.DirReader {
|
||||
func (s *BaseStore) ReadDir(path jcstypes.JPath) types.DirReader {
|
||||
return &DirReader{
|
||||
cli: s.cli,
|
||||
bucket: s.Bucket,
|
||||
|
|
|
@ -7,13 +7,13 @@ import (
|
|||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type DirReader struct {
|
||||
cli *s3.Client
|
||||
bucket string
|
||||
rootPath jcsypes.JPath
|
||||
rootPath jcstypes.JPath
|
||||
marker *string
|
||||
curInfos []types.DirEntry
|
||||
eof bool
|
||||
|
@ -39,7 +39,7 @@ func (r *DirReader) Next() (types.DirEntry, error) {
|
|||
}
|
||||
|
||||
for _, obj := range resp.Contents {
|
||||
key := jcsypes.PathFromJcsPathString(*obj.Key)
|
||||
key := jcstypes.PathFromJcsPathString(*obj.Key)
|
||||
|
||||
r.curInfos = append(r.curInfos, types.DirEntry{
|
||||
Path: key,
|
||||
|
|
|
@ -12,18 +12,17 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/os2"
|
||||
"gitlink.org.cn/cloudream/common/utils/sort2"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
type Multiparter struct {
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
feat *cortypes.MultipartUploadFeature
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
feat *jcstypes.MultipartUploadFeature
|
||||
bucket string
|
||||
cli *s3.Client
|
||||
}
|
||||
|
||||
func NewMultiparter(detail *jcsypes.UserSpaceDetail, feat *cortypes.MultipartUploadFeature, bkt string, cli *s3.Client) *Multiparter {
|
||||
func NewMultiparter(detail *jcstypes.UserSpaceDetail, feat *jcstypes.MultipartUploadFeature, bkt string, cli *s3.Client) *Multiparter {
|
||||
return &Multiparter{
|
||||
detail: detail,
|
||||
feat: feat,
|
||||
|
@ -89,9 +88,9 @@ func (m *Multiparter) UploadPart(ctx context.Context, init types.MultipartInitSt
|
|||
type MultipartTask struct {
|
||||
cli *s3.Client
|
||||
bucket string
|
||||
tempDir jcsypes.JPath
|
||||
tempDir jcstypes.JPath
|
||||
tempFileName string
|
||||
tempFilePath jcsypes.JPath
|
||||
tempFilePath jcstypes.JPath
|
||||
uploadID string
|
||||
}
|
||||
|
||||
|
@ -140,7 +139,7 @@ func (i *MultipartTask) JoinParts(ctx context.Context, parts []types.UploadedPar
|
|||
return types.FileInfo{}, err
|
||||
}
|
||||
|
||||
hash := jcsypes.CalculateCompositeHash(partHashes)
|
||||
hash := jcstypes.CalculateCompositeHash(partHashes)
|
||||
|
||||
return types.FileInfo{
|
||||
Path: i.tempFilePath,
|
||||
|
|
|
@ -8,20 +8,19 @@ import (
|
|||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg"
|
||||
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types"
|
||||
jcsypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
cortypes "gitlink.org.cn/cloudream/jcs-pub/coordinator/types"
|
||||
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reg.RegisterBuilder[*cortypes.S3Type](newBuilder)
|
||||
reg.RegisterBuilder[*jcstypes.S3Type](newBuilder)
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
types.EmptyBuilder
|
||||
detail *jcsypes.UserSpaceDetail
|
||||
detail *jcstypes.UserSpaceDetail
|
||||
}
|
||||
|
||||
func newBuilder(detail *jcsypes.UserSpaceDetail) types.StorageBuilder {
|
||||
func newBuilder(detail *jcstypes.UserSpaceDetail) types.StorageBuilder {
|
||||
return &builder{
|
||||
detail: detail,
|
||||
}
|
||||
|
@ -32,8 +31,8 @@ func (b *builder) FeatureDesc() types.FeatureDesc {
|
|||
}
|
||||
|
||||
func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type)
|
||||
s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.S3Type)
|
||||
s3Cred, ok := b.detail.UserSpace.Credential.(*jcstypes.S3Cred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
@ -51,8 +50,8 @@ func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) {
|
|||
}
|
||||
|
||||
func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type)
|
||||
s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.S3Type)
|
||||
s3Cred, ok := b.detail.UserSpace.Credential.(*jcstypes.S3Cred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for s3 storage", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
@ -69,7 +68,7 @@ func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) {
|
|||
return NewBaseStore(b.detail, cli, bkt, BaseStoreOption{UseAWSSha256: false})
|
||||
}
|
||||
|
||||
func createClient(stgType *cortypes.S3Type, cred *cortypes.S3Cred) (*s3.Client, string, error) {
|
||||
func createClient(stgType *jcstypes.S3Type, cred *jcstypes.S3Cred) (*s3.Client, string, error) {
|
||||
awsConfig := aws.Config{}
|
||||
|
||||
if cred.AK != "" && cred.SK != "" {
|
||||
|
@ -92,13 +91,13 @@ func createClient(stgType *cortypes.S3Type, cred *cortypes.S3Cred) (*s3.Client,
|
|||
}
|
||||
|
||||
func (b *builder) CreateMultiparter(typeOnly bool) (types.Multiparter, error) {
|
||||
stgType := b.detail.UserSpace.Storage.(*cortypes.S3Type)
|
||||
feat := types.FindFeature[*cortypes.MultipartUploadFeature](b.detail)
|
||||
stgType := b.detail.UserSpace.Storage.(*jcstypes.S3Type)
|
||||
feat := types.FindFeature[*jcstypes.MultipartUploadFeature](b.detail)
|
||||
if feat == nil {
|
||||
return nil, fmt.Errorf("feature %T not found", cortypes.MultipartUploadFeature{})
|
||||
return nil, fmt.Errorf("feature %T not found", jcstypes.MultipartUploadFeature{})
|
||||
}
|
||||
|
||||
s3Cred, ok := b.detail.UserSpace.Credential.(*cortypes.S3Cred)
|
||||
s3Cred, ok := b.detail.UserSpace.Credential.(*jcstypes.S3Cred)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid storage credential type %T for s3 base store", b.detail.UserSpace.Credential)
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue