JCS-pub/client/internal/uploader/create_load.go

134 lines
3.1 KiB
Go

package uploader
import (
"context"
"fmt"
"io"
"path"
"sync"
"time"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/jcs-pub/client/internal/db"
"gitlink.org.cn/cloudream/jcs-pub/client/types"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/distlock"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser"
)
type CreateLoadUploader struct {
pkg types.Package
targetSpaces []types.UserSpaceDetail
loadRoots []string
uploader *Uploader
pubLock *distlock.Mutex
successes []db.AddObjectEntry
lock sync.Mutex
commited bool
}
type CreateLoadResult struct {
Package types.Package
Objects map[string]types.Object
}
func (u *CreateLoadUploader) Upload(pa string, stream io.Reader, opts ...UploadOption) error {
opt := UploadOption{}
if len(opts) > 0 {
opt = opts[0]
}
if opt.CreateTime.IsZero() {
opt.CreateTime = time.Now()
}
spaceIDs := make([]types.UserSpaceID, 0, len(u.targetSpaces))
ft := ioswitch2.FromTo{}
fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream())
ft.AddFrom(fromExec)
for i, space := range u.targetSpaces {
ft.AddTo(ioswitch2.NewToShardStore(*space.MasterHub, space, ioswitch2.RawStream(), "shardInfo"))
ft.AddTo(ioswitch2.NewToPublicStore(*space.MasterHub, space, path.Join(u.loadRoots[i], pa)))
spaceIDs = append(spaceIDs, space.UserSpace.UserSpaceID)
}
plans := exec.NewPlanBuilder()
err := parser.Parse(ft, plans)
if err != nil {
return fmt.Errorf("parsing plan: %w", err)
}
exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, u.uploader.stgPool)
exec := plans.Execute(exeCtx)
exec.BeginWrite(io.NopCloser(stream), hd)
ret, err := exec.Wait(context.TODO())
if err != nil {
return fmt.Errorf("executing plan: %w", err)
}
u.lock.Lock()
defer u.lock.Unlock()
// 记录上传结果
shardInfo := ret["fileHash"].(*ops2.ShardInfoValue)
u.successes = append(u.successes, db.AddObjectEntry{
Path: pa,
Size: shardInfo.Size,
FileHash: shardInfo.Hash,
CreateTime: opt.CreateTime,
UserSpaceIDs: spaceIDs,
})
return nil
}
func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) {
u.lock.Lock()
defer u.lock.Unlock()
if u.commited {
return CreateLoadResult{}, fmt.Errorf("package already commited")
}
u.commited = true
defer u.pubLock.Unlock()
var addedObjs []types.Object
err := u.uploader.db.DoTx(func(tx db.SQLContext) error {
var err error
addedObjs, err = u.uploader.db.Object().BatchAdd(tx, u.pkg.PackageID, u.successes)
return err
})
if err != nil {
return CreateLoadResult{}, fmt.Errorf("adding objects: %w", err)
}
ret := CreateLoadResult{
Package: u.pkg,
Objects: make(map[string]types.Object),
}
for _, entry := range addedObjs {
ret.Objects[entry.Path] = entry
}
return ret, nil
}
func (u *CreateLoadUploader) Abort() {
u.lock.Lock()
defer u.lock.Unlock()
if u.commited {
return
}
u.commited = true
u.pubLock.Unlock()
db2 := u.uploader.db
db.DoTx10(db2, db2.Package().DeleteComplete, u.pkg.PackageID)
}