JCS-pub/client/internal/uploader/create_load.go

135 lines
3.2 KiB
Go

package uploader
import (
"context"
"fmt"
"io"
"sync"
"time"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock"
jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types"
)
type CreateUploader struct {
pkg jcstypes.Package
targetSpaces []jcstypes.UserSpaceDetail
copyRoots []jcstypes.JPath
uploader *Uploader
pubLock *publock.Mutex
successes []db.AddObjectEntry
lock sync.Mutex
commited bool
}
type CreateUploadResult struct {
Package jcstypes.Package
Objects map[string]jcstypes.Object
}
func (u *CreateUploader) Upload(pa jcstypes.JPath, stream io.Reader, opts ...UploadOption) error {
opt := UploadOption{}
if len(opts) > 0 {
opt = opts[0]
}
if opt.CreateTime.IsZero() {
opt.CreateTime = time.Now()
}
spaceIDs := make([]jcstypes.UserSpaceID, 0, len(u.targetSpaces))
ft := ioswitch2.FromTo{}
fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream())
ft.AddFrom(fromExec)
for i, space := range u.targetSpaces {
space2 := space
space2.RecommendHub = nil
ft.AddTo(ioswitch2.NewToShardStore(space2, ioswitch2.RawStream(), "shardInfo"))
ft.AddTo(ioswitch2.NewToBaseStore(space2, u.copyRoots[i].ConcatNew(pa)))
spaceIDs = append(spaceIDs, space2.UserSpace.UserSpaceID)
}
plans := exec.NewPlanBuilder()
err := parser.Parse(ft, plans)
if err != nil {
return fmt.Errorf("parsing plan: %w", err)
}
exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, u.uploader.stgPool)
exec := plans.Execute(exeCtx)
exec.BeginWrite(io.NopCloser(stream), hd)
ret, err := exec.Wait(context.TODO())
if err != nil {
return fmt.Errorf("executing plan: %w", err)
}
u.lock.Lock()
defer u.lock.Unlock()
// 记录上传结果
shardInfo := ret.Get("fileHash").(*ops2.FileInfoValue)
u.successes = append(u.successes, db.AddObjectEntry{
Path: pa.String(),
Size: shardInfo.Size,
FileHash: shardInfo.Hash,
CreateTime: opt.CreateTime,
UserSpaceIDs: spaceIDs,
})
return nil
}
func (u *CreateUploader) Commit() (CreateUploadResult, error) {
u.lock.Lock()
defer u.lock.Unlock()
if u.commited {
return CreateUploadResult{}, fmt.Errorf("package already commited")
}
u.commited = true
defer u.pubLock.Unlock()
var addedObjs []jcstypes.Object
err := u.uploader.db.DoTx(func(tx db.SQLContext) error {
var err error
addedObjs, err = u.uploader.db.Object().BatchAdd(tx, u.pkg.PackageID, u.successes)
return err
})
if err != nil {
return CreateUploadResult{}, fmt.Errorf("adding objects: %w", err)
}
ret := CreateUploadResult{
Package: u.pkg,
Objects: make(map[string]jcstypes.Object),
}
for _, entry := range addedObjs {
ret.Objects[entry.Path] = entry
}
return ret, nil
}
func (u *CreateUploader) Abort() {
u.lock.Lock()
defer u.lock.Unlock()
if u.commited {
return
}
u.commited = true
u.pubLock.Unlock()
db2 := u.uploader.db
db.DoTx10(db2, db2.Package().DeleteComplete, u.pkg.PackageID)
}