From 8e62dabfb70c4224be97d7cdffa833a9b23527a6 Mon Sep 17 00:00:00 2001 From: Sydonian <794346190@qq.com> Date: Mon, 21 Jul 2025 10:29:27 +0800 Subject: [PATCH] =?UTF-8?q?=E5=A2=9E=E5=8A=A0=E5=85=B1=E4=BA=AB=E5=88=86?= =?UTF-8?q?=E7=89=87=E5=AD=98=E5=82=A8=E7=9A=84=E6=9C=BA=E5=88=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- client/internal/cmdline/serve.go | 46 +- client/internal/cmdline/test.go | 44 +- client/internal/cmdline/vfstest.go | 45 +- client/internal/db/bucket.go | 32 +- client/internal/db/object.go | 234 ++++---- client/internal/db/object_access_stat.go | 32 +- client/internal/db/object_block.go | 44 +- client/internal/db/package.go | 98 +-- client/internal/db/package_access_stat.go | 28 +- client/internal/db/pinned_object.go | 48 +- client/internal/db/space_sync_task.go | 16 +- client/internal/db/user_space.go | 34 +- client/internal/downloader/iterator.go | 6 +- .../internal/downloader/strategy/selector.go | 71 ++- client/internal/downloader/strip_iterator.go | 12 +- client/internal/http/types/config.go | 10 +- client/internal/http/v1/pub_shards.go | 112 ++++ client/internal/http/v1/server.go | 3 + client/internal/metacache/user_space_meta.go | 21 +- client/internal/services/bucket.go | 14 +- client/internal/services/object.go | 150 ++--- client/internal/services/package.go | 30 +- client/internal/services/service.go | 4 + client/internal/spacesyncer/execute.go | 6 +- client/internal/spacesyncer/execute_full.go | 4 +- .../ticktock/redundancy_shrink_test.go | 24 +- client/internal/ticktock/user_space_gc.go | 10 +- client/internal/uploader/create_load.go | 20 +- client/internal/uploader/update.go | 22 +- client/sdk/api/v1/object.go | 108 ++-- client/sdk/api/v1/pub_shards.go | 72 +++ common/assets/confs/hub.config.json | 3 + common/globals/globals.go | 6 +- common/pkgs/rpc/coordinator/coordinator.pb.go | 75 ++- common/pkgs/rpc/coordinator/coordinator.proto | 4 + .../rpc/coordinator/coordinator_grpc.pb.go | 111 ++++ common/pkgs/rpc/coordinator/hub.go | 14 + common/pkgs/rpc/coordinator/pub_shards.go | 89 +++ common/pkgs/rpc/coordinator/server.go | 1 + common/pkgs/rpc/hub/hub.pb.go | 80 ++- common/pkgs/rpc/hub/hub.proto | 6 + common/pkgs/rpc/hub/hub_grpc.pb.go | 185 ++++++ common/pkgs/rpc/hub/pool.go | 44 +- common/pkgs/rpc/hub/pub_shards.go | 127 ++++ common/pkgs/rpc/hub/server.go | 1 + common/pkgs/rpc/hub/user_space.go | 55 -- common/pkgs/rpc/utils.go | 7 +- common/pkgs/storage/all.go | 1 + common/pkgs/storage/pubshards/pubshards.go | 53 ++ common/pkgs/storage/pubshards/shard_store.go | 100 ++++ common/types/client.go | 2 +- common/types/coordinator.go | 36 +- common/types/filehash.go | 2 +- common/types/location.go | 2 +- common/types/path.go | 2 +- common/types/redundancy.go | 2 +- common/types/space_syncer.go | 2 +- common/types/status.go | 2 +- common/types/storage.go | 59 +- common/types/storage_credential.go | 2 +- common/types/storage_feature.go | 2 +- common/types/utils.go | 2 +- coordinator/internal/cmd/migrate.go | 1 + coordinator/internal/db/db.go | 10 + coordinator/internal/db/pub_shards.go | 24 + coordinator/internal/db/string_serializer.go | 99 ++++ coordinator/internal/rpc/pub_shards.go | 100 ++++ coordinator/internal/rpc/storage.go | 22 +- go.mod | 12 +- go.sum | 23 + hub/internal/cmd/serve.go | 8 +- hub/internal/config/config.go | 2 + hub/internal/pubshards/config.go | 6 + hub/internal/pubshards/pool.go | 193 ++++++ hub/internal/pubshards/pub_shards.go | 98 +++ hub/internal/pubshards/string_serializer.go | 99 ++++ hub/internal/rpc/pub_shards.go | 104 ++++ hub/internal/rpc/rpc.go | 5 +- hub/internal/rpc/user_space.go | 49 -- jcsctl/cmd/all/all.go | 1 + jcsctl/cmd/pubshards/create.go | 105 ++++ jcsctl/cmd/pubshards/join.go | 56 ++ jcsctl/cmd/pubshards/pubshards.go | 15 + jcsctl/cmd/userspace/create.go | 557 +++--------------- jcsctl/cmd/utils.go | 145 ++++- 85 files changed, 2953 insertions(+), 1258 deletions(-) create mode 100644 client/internal/http/v1/pub_shards.go create mode 100644 client/sdk/api/v1/pub_shards.go create mode 100644 common/pkgs/rpc/coordinator/pub_shards.go create mode 100644 common/pkgs/rpc/hub/pub_shards.go delete mode 100644 common/pkgs/rpc/hub/user_space.go create mode 100644 common/pkgs/storage/pubshards/pubshards.go create mode 100644 common/pkgs/storage/pubshards/shard_store.go create mode 100644 coordinator/internal/db/pub_shards.go create mode 100644 coordinator/internal/db/string_serializer.go create mode 100644 coordinator/internal/rpc/pub_shards.go create mode 100644 hub/internal/pubshards/config.go create mode 100644 hub/internal/pubshards/pool.go create mode 100644 hub/internal/pubshards/pub_shards.go create mode 100644 hub/internal/pubshards/string_serializer.go create mode 100644 hub/internal/rpc/pub_shards.go delete mode 100644 hub/internal/rpc/user_space.go create mode 100644 jcsctl/cmd/pubshards/create.go create mode 100644 jcsctl/cmd/pubshards/join.go create mode 100644 jcsctl/cmd/pubshards/pubshards.go diff --git a/client/internal/cmdline/serve.go b/client/internal/cmdline/serve.go index b1f06e7..f35ebe0 100644 --- a/client/internal/cmdline/serve.go +++ b/client/internal/cmdline/serve.go @@ -26,6 +26,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" ) @@ -72,8 +73,22 @@ func serveHTTP(configPath string, opts serveHTTPOptions) { stgglb.InitLocal(config.Cfg().Local) + // 数据库 + db, err := db.NewDB(&config.Cfg().DB) + if err != nil { + logger.Fatalf("new db failed, err: %s", err.Error()) + } + + // 元数据缓存 + metaCacheHost := metacache.NewHost(db) + go metaCacheHost.Serve() + spaceMeta := metaCacheHost.AddStorageMeta() + hubMeta := metaCacheHost.AddHubMeta() + conMeta := metaCacheHost.AddConnectivity() + stgglb.StandaloneMode = opts.Standalone || config.Cfg().AccessToken == nil + // 访问令牌 var accToken *accesstoken.Keeper if !stgglb.StandaloneMode { tempCli, err := config.Cfg().CoordinatorRPC.BuildTempClient() @@ -101,6 +116,22 @@ func serveHTTP(configPath string, opts serveHTTPOptions) { os.Exit(1) } + // 通过HubID获取Hub的地址 + hubRPCCfg.AddrProvider = func(id jcstypes.HubID) string { + hub := hubMeta.Get(id) + if hub == nil { + return "" + } + + addr, ok := hub.Address.(*jcstypes.GRPCAddressInfo) + if !ok { + return "" + } + + ip, port := stgglb.SelectGRPCAddress(hub, addr) + return fmt.Sprintf("%v:%v", ip, port) + } + stgglb.UserID = accToken.GetToken().UserID stgglb.InitPools(hubRPCCfg, corRPCCfg) } else { @@ -110,12 +141,6 @@ func serveHTTP(configPath string, opts serveHTTPOptions) { accTokenChan := accToken.Start() defer accToken.Stop() - // 数据库 - db, err := db.NewDB(&config.Cfg().DB) - if err != nil { - logger.Fatalf("new db failed, err: %s", err.Error()) - } - // 初始化系统事件发布器 evtPub, err := sysevent.NewPublisher(config.Cfg().SysEvent, &datamap.SourceClient{ UserID: stgglb.UserID, @@ -138,13 +163,6 @@ func serveHTTP(configPath string, opts serveHTTPOptions) { defer conCol.Stop() conCol.CollectInPlace() - // 元数据缓存 - metaCacheHost := metacache.NewHost(db) - go metaCacheHost.Serve() - spaceMeta := metaCacheHost.AddStorageMeta() - hubMeta := metaCacheHost.AddHubMeta() - conMeta := metaCacheHost.AddConnectivity() - // 公共锁 publock := publock.NewService() @@ -198,7 +216,7 @@ func serveHTTP(configPath string, opts serveHTTPOptions) { mntChan := mnt.Start() defer mnt.Stop() - svc := services.NewService(publock, dlder, acStat, uploader, strgSel, spaceMeta, db, evtPub, mnt, stgPool, spaceSync, tktk, spdStats) + svc := services.NewService(publock, dlder, acStat, uploader, strgSel, spaceMeta, db, evtPub, mnt, stgPool, spaceSync, tktk, spdStats, accToken) // HTTP接口 httpCfgJSON := config.Cfg().HTTP diff --git a/client/internal/cmdline/test.go b/client/internal/cmdline/test.go index f3bc714..ca6724c 100644 --- a/client/internal/cmdline/test.go +++ b/client/internal/cmdline/test.go @@ -87,6 +87,19 @@ func test(configPath string) { stgglb.StandaloneMode = config.Cfg().AccessToken == nil + // 数据库 + db, err := db.NewDB(&config.Cfg().DB) + if err != nil { + logger.Fatalf("new db failed, err: %s", err.Error()) + } + + // 元数据缓存 + metaCacheHost := metacache.NewHost(db) + go metaCacheHost.Serve() + stgMeta := metaCacheHost.AddStorageMeta() + hubMeta := metaCacheHost.AddHubMeta() + conMeta := metaCacheHost.AddConnectivity() + var accToken *accesstoken.Keeper if !stgglb.StandaloneMode { tempCli, err := config.Cfg().CoordinatorRPC.BuildTempClient() @@ -114,6 +127,22 @@ func test(configPath string) { os.Exit(1) } + // 通过HubID获取Hub的地址 + hubRPCCfg.AddrProvider = func(id jcstypes.HubID) string { + hub := hubMeta.Get(id) + if hub == nil { + return "" + } + + addr, ok := hub.Address.(*jcstypes.GRPCAddressInfo) + if !ok { + return "" + } + + ip, port := stgglb.SelectGRPCAddress(hub, addr) + return fmt.Sprintf("%v:%v", ip, port) + } + stgglb.UserID = accToken.GetToken().UserID stgglb.InitPools(hubRPCCfg, corRPCCfg) } else { @@ -123,12 +152,6 @@ func test(configPath string) { accTokenChan := accToken.Start() defer accToken.Stop() - // 数据库 - db, err := db.NewDB(&config.Cfg().DB) - if err != nil { - logger.Fatalf("new db failed, err: %s", err.Error()) - } - // 初始化系统事件发布器 evtPub, err := sysevent.NewPublisher(config.Cfg().SysEvent, &datamap.SourceClient{ UserID: stgglb.UserID, @@ -151,13 +174,6 @@ func test(configPath string) { defer conCol.Stop() conCol.CollectInPlace() - // 元数据缓存 - metaCacheHost := metacache.NewHost(db) - go metaCacheHost.Serve() - stgMeta := metaCacheHost.AddStorageMeta() - hubMeta := metaCacheHost.AddHubMeta() - conMeta := metaCacheHost.AddConnectivity() - // 公共锁 publock := publock.NewService() @@ -189,7 +205,7 @@ func test(configPath string) { spaceSyncChan := spaceSync.Start() defer spaceSync.Stop() - svc := services.NewService(publock, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, nil, stgPool, spaceSync, nil, spdStats) + svc := services.NewService(publock, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, nil, stgPool, spaceSync, nil, spdStats, accToken) go func() { doTest(svc) diff --git a/client/internal/cmdline/vfstest.go b/client/internal/cmdline/vfstest.go index dfd1055..90a5386 100644 --- a/client/internal/cmdline/vfstest.go +++ b/client/internal/cmdline/vfstest.go @@ -27,6 +27,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" ) @@ -66,6 +67,19 @@ func vfsTest(configPath string, opts serveHTTPOptions) { stgglb.StandaloneMode = opts.Standalone || config.Cfg().AccessToken == nil + // 数据库 + db, err := db.NewDB(&config.Cfg().DB) + if err != nil { + logger.Fatalf("new db failed, err: %s", err.Error()) + } + + // 元数据缓存 + metaCacheHost := metacache.NewHost(db) + go metaCacheHost.Serve() + stgMeta := metaCacheHost.AddStorageMeta() + hubMeta := metaCacheHost.AddHubMeta() + conMeta := metaCacheHost.AddConnectivity() + var accToken *accesstoken.Keeper if !opts.Standalone { tempCli, err := config.Cfg().CoordinatorRPC.BuildTempClient() @@ -93,6 +107,22 @@ func vfsTest(configPath string, opts serveHTTPOptions) { os.Exit(1) } + // 通过HubID获取Hub的地址 + hubRPCCfg.AddrProvider = func(id jcstypes.HubID) string { + hub := hubMeta.Get(id) + if hub == nil { + return "" + } + + addr, ok := hub.Address.(*jcstypes.GRPCAddressInfo) + if !ok { + return "" + } + + ip, port := stgglb.SelectGRPCAddress(hub, addr) + return fmt.Sprintf("%v:%v", ip, port) + } + stgglb.UserID = accToken.GetToken().UserID stgglb.InitPools(hubRPCCfg, corRPCCfg) } else { @@ -102,12 +132,6 @@ func vfsTest(configPath string, opts serveHTTPOptions) { accTokenChan := accToken.Start() defer accToken.Stop() - // 数据库 - db, err := db.NewDB(&config.Cfg().DB) - if err != nil { - logger.Fatalf("new db failed, err: %s", err.Error()) - } - // 初始化系统事件发布器 evtPub, err := sysevent.NewPublisher(config.Cfg().SysEvent, &datamap.SourceClient{ UserID: stgglb.UserID, @@ -130,13 +154,6 @@ func vfsTest(configPath string, opts serveHTTPOptions) { defer conCol.Stop() conCol.CollectInPlace() - // 元数据缓存 - metaCacheHost := metacache.NewHost(db) - go metaCacheHost.Serve() - stgMeta := metaCacheHost.AddStorageMeta() - hubMeta := metaCacheHost.AddHubMeta() - conMeta := metaCacheHost.AddConnectivity() - // 公共锁 publock := publock.NewService() @@ -181,7 +198,7 @@ func vfsTest(configPath string, opts serveHTTPOptions) { mntChan := mnt.Start() defer mnt.Stop() - svc := services.NewService(publock, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, mnt, stgPool, spaceSync, nil, spdStats) + svc := services.NewService(publock, dlder, acStat, uploader, strgSel, stgMeta, db, evtPub, mnt, stgPool, spaceSync, nil, spdStats, accToken) // HTTP接口 httpCfgJSON := config.Cfg().HTTP diff --git a/client/internal/db/bucket.go b/client/internal/db/bucket.go index 0695a03..1dc9c8b 100644 --- a/client/internal/db/bucket.go +++ b/client/internal/db/bucket.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -16,25 +16,25 @@ func (db *DB) Bucket() *BucketDB { return &BucketDB{DB: db} } -func (db *BucketDB) GetByID(ctx SQLContext, bucketID types.BucketID) (types.Bucket, error) { - var ret types.Bucket +func (db *BucketDB) GetByID(ctx SQLContext, bucketID jcstypes.BucketID) (jcstypes.Bucket, error) { + var ret jcstypes.Bucket err := ctx.Table("Bucket").Where("BucketID = ?", bucketID).First(&ret).Error return ret, err } -func (db *BucketDB) GetByName(ctx SQLContext, bucketName string) (types.Bucket, error) { - var ret types.Bucket +func (db *BucketDB) GetByName(ctx SQLContext, bucketName string) (jcstypes.Bucket, error) { + var ret jcstypes.Bucket err := ctx.Table("Bucket").Where("Name = ?", bucketName).First(&ret).Error return ret, err } -func (*BucketDB) GetAll(ctx SQLContext) ([]types.Bucket, error) { - var ret []types.Bucket +func (*BucketDB) GetAll(ctx SQLContext) ([]jcstypes.Bucket, error) { + var ret []jcstypes.Bucket err := ctx.Table("Bucket").Find(&ret).Error return ret, err } -func (db *BucketDB) Create(ctx SQLContext, bucketName string, createTime time.Time) (types.Bucket, error) { +func (db *BucketDB) Create(ctx SQLContext, bucketName string, createTime time.Time) (jcstypes.Bucket, error) { var bucketID int64 err := ctx.Table("Bucket"). Select("Bucket.BucketID"). @@ -42,30 +42,30 @@ func (db *BucketDB) Create(ctx SQLContext, bucketName string, createTime time.Ti Scan(&bucketID).Error if err != nil { - return types.Bucket{}, err + return jcstypes.Bucket{}, err } if bucketID > 0 { - return types.Bucket{}, gorm.ErrDuplicatedKey + return jcstypes.Bucket{}, gorm.ErrDuplicatedKey } - newBucket := types.Bucket{Name: bucketName, CreateTime: createTime} + newBucket := jcstypes.Bucket{Name: bucketName, CreateTime: createTime} if err := ctx.Table("Bucket").Create(&newBucket).Error; err != nil { - return types.Bucket{}, fmt.Errorf("insert bucket failed, err: %w", err) + return jcstypes.Bucket{}, fmt.Errorf("insert bucket failed, err: %w", err) } return newBucket, nil } -func (db *BucketDB) Rename(ctx SQLContext, bucketID types.BucketID, bucketName string) error { +func (db *BucketDB) Rename(ctx SQLContext, bucketID jcstypes.BucketID, bucketName string) error { return ctx.Table("Bucket").Where("BucketID = ?", bucketID).Update("Name", bucketName).Error } -func (db *BucketDB) Delete(ctx SQLContext, bucketID types.BucketID) error { - return ctx.Delete(&types.Bucket{}, "BucketID = ?", bucketID).Error +func (db *BucketDB) Delete(ctx SQLContext, bucketID jcstypes.BucketID) error { + return ctx.Delete(&jcstypes.Bucket{}, "BucketID = ?", bucketID).Error } -func (db *BucketDB) DeleteComplete(tx SQLContext, bucketID types.BucketID) error { +func (db *BucketDB) DeleteComplete(tx SQLContext, bucketID jcstypes.BucketID) error { pkgs, err := db.Package().GetBucketPackages(tx, bucketID) if err != nil { return err diff --git a/client/internal/db/object.go b/client/internal/db/object.go index 0738c33..2c7462c 100644 --- a/client/internal/db/object.go +++ b/client/internal/db/object.go @@ -10,7 +10,7 @@ import ( "gorm.io/gorm/clause" "gitlink.org.cn/cloudream/common/utils/sort2" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ObjectDB struct { @@ -21,20 +21,20 @@ func (db *DB) Object() *ObjectDB { return &ObjectDB{DB: db} } -func (db *ObjectDB) GetByID(ctx SQLContext, objectID types.ObjectID) (types.Object, error) { - var ret types.Object +func (db *ObjectDB) GetByID(ctx SQLContext, objectID jcstypes.ObjectID) (jcstypes.Object, error) { + var ret jcstypes.Object err := ctx.Table("Object").Where("ObjectID = ?", objectID).First(&ret).Error return ret, err } -func (db *ObjectDB) GetByPath(ctx SQLContext, packageID types.PackageID, path string) (types.Object, error) { - var ret types.Object +func (db *ObjectDB) GetByPath(ctx SQLContext, packageID jcstypes.PackageID, path string) (jcstypes.Object, error) { + var ret jcstypes.Object err := ctx.Table("Object").Where("PackageID = ? AND Path = ?", packageID, path).First(&ret).Error return ret, err } -func (db *ObjectDB) GetByFullPath(ctx SQLContext, bktName string, pkgName string, path string) (types.Object, error) { - var ret types.Object +func (db *ObjectDB) GetByFullPath(ctx SQLContext, bktName string, pkgName string, path string) (jcstypes.Object, error) { + var ret jcstypes.Object err := ctx.Table("Object"). Joins("join Package on Package.PackageID = Object.PackageID and Package.Name = ?", pkgName). Joins("join Bucket on Bucket.BucketID = Package.BucketID and Bucket.Name = ?", bktName). @@ -42,29 +42,29 @@ func (db *ObjectDB) GetByFullPath(ctx SQLContext, bktName string, pkgName string return ret, err } -func (db *ObjectDB) GetWithPathPrefix(ctx SQLContext, packageID types.PackageID, pathPrefix string) ([]types.Object, error) { - var ret []types.Object +func (db *ObjectDB) GetWithPathPrefix(ctx SQLContext, packageID jcstypes.PackageID, pathPrefix string) ([]jcstypes.Object, error) { + var ret []jcstypes.Object err := ctx.Table("Object").Where("PackageID = ? AND Path LIKE ?", packageID, escapeLike("", "%", pathPrefix)).Order("ObjectID ASC").Find(&ret).Error return ret, err } // 查询结果将按照Path升序,而不是ObjectID升序 -func (db *ObjectDB) GetWithPathPrefixPaged(ctx SQLContext, packageID types.PackageID, pathPrefix string, startPath string, limit int) ([]types.Object, error) { - var ret []types.Object +func (db *ObjectDB) GetWithPathPrefixPaged(ctx SQLContext, packageID jcstypes.PackageID, pathPrefix string, startPath string, limit int) ([]jcstypes.Object, error) { + var ret []jcstypes.Object err := ctx.Table("Object").Where("PackageID = ? AND Path > ? AND Path LIKE ?", packageID, startPath, pathPrefix+"%").Order("Path ASC").Limit(limit).Find(&ret).Error return ret, err } -func (db *ObjectDB) GetByPrefixGrouped(ctx SQLContext, packageID types.PackageID, pathPrefix string) (objs []types.Object, commonPrefixes []string, err error) { +func (db *ObjectDB) GetByPrefixGrouped(ctx SQLContext, packageID jcstypes.PackageID, pathPrefix string) (objs []jcstypes.Object, commonPrefixes []string, err error) { type ObjectOrDir struct { - types.Object + jcstypes.Object IsObject bool `gorm:"IsObject"` Prefix string `gorm:"Prefix"` } - sepCnt := strings.Count(pathPrefix, types.ObjectPathSeparator) + 1 + sepCnt := strings.Count(pathPrefix, jcstypes.ObjectPathSeparator) + 1 - prefixStatm := fmt.Sprintf("Substring_Index(Path, '%s', %d)", types.ObjectPathSeparator, sepCnt) + prefixStatm := fmt.Sprintf("Substring_Index(Path, '%s', %d)", jcstypes.ObjectPathSeparator, sepCnt) grouping := ctx.Table("Object"). Select(fmt.Sprintf("%s as Prefix, Max(ObjectID) as ObjectID, %s = Path as IsObject", prefixStatm, prefixStatm)). Where("PackageID = ?", packageID). @@ -85,23 +85,23 @@ func (db *ObjectDB) GetByPrefixGrouped(ctx SQLContext, packageID types.PackageID if o.IsObject { objs = append(objs, o.Object) } else { - commonPrefixes = append(commonPrefixes, o.Prefix+types.ObjectPathSeparator) + commonPrefixes = append(commonPrefixes, o.Prefix+jcstypes.ObjectPathSeparator) } } return } -func (db *ObjectDB) GetByPrefixGroupedPaged(ctx SQLContext, packageID types.PackageID, pathPrefix string, startPath string, limit int) (objs []types.Object, commonPrefixes []string, nextStartPath string, err error) { +func (db *ObjectDB) GetByPrefixGroupedPaged(ctx SQLContext, packageID jcstypes.PackageID, pathPrefix string, startPath string, limit int) (objs []jcstypes.Object, commonPrefixes []string, nextStartPath string, err error) { type ObjectOrDir struct { - types.Object + jcstypes.Object IsObject bool `gorm:"IsObject"` Prefix string `gorm:"Prefix"` } - sepCnt := strings.Count(pathPrefix, types.ObjectPathSeparator) + 1 + sepCnt := strings.Count(pathPrefix, jcstypes.ObjectPathSeparator) + 1 - prefixStatm := fmt.Sprintf("Substring_Index(Path, '%s', %d)", types.ObjectPathSeparator, sepCnt) + prefixStatm := fmt.Sprintf("Substring_Index(Path, '%s', %d)", jcstypes.ObjectPathSeparator, sepCnt) grouping := ctx.Table("Object"). Select(fmt.Sprintf("%s as Prefix, Max(ObjectID) as ObjectID, %s = Path as IsObject", prefixStatm, prefixStatm)). Where("PackageID = ?", packageID). @@ -124,7 +124,7 @@ func (db *ObjectDB) GetByPrefixGroupedPaged(ctx SQLContext, packageID types.Pack if o.IsObject { objs = append(objs, o.Object) } else { - commonPrefixes = append(commonPrefixes, o.Prefix+types.ObjectPathSeparator) + commonPrefixes = append(commonPrefixes, o.Prefix+jcstypes.ObjectPathSeparator) } nextStartPath = o.Prefix } @@ -133,23 +133,23 @@ func (db *ObjectDB) GetByPrefixGroupedPaged(ctx SQLContext, packageID types.Pack } // 返回gorm.ErrRecordNotFound表示没有对象,nil表示有对象 -func (db *ObjectDB) HasObjectWithPrefix(ctx SQLContext, packageID types.PackageID, pathPrefix string) error { - var obj types.Object +func (db *ObjectDB) HasObjectWithPrefix(ctx SQLContext, packageID jcstypes.PackageID, pathPrefix string) error { + var obj jcstypes.Object return ctx.Table("Object").Where("PackageID = ? AND Path LIKE ?", packageID, escapeLike("", "%", pathPrefix)).First(&obj).Error } -func (db *ObjectDB) BatchTestObjectID(ctx SQLContext, objectIDs []types.ObjectID) (map[types.ObjectID]bool, error) { +func (db *ObjectDB) BatchTestObjectID(ctx SQLContext, objectIDs []jcstypes.ObjectID) (map[jcstypes.ObjectID]bool, error) { if len(objectIDs) == 0 { - return make(map[types.ObjectID]bool), nil + return make(map[jcstypes.ObjectID]bool), nil } - var avaiIDs []types.ObjectID + var avaiIDs []jcstypes.ObjectID err := ctx.Table("Object").Where("ObjectID IN ?", objectIDs).Pluck("ObjectID", &avaiIDs).Error if err != nil { return nil, err } - avaiIDMap := make(map[types.ObjectID]bool) + avaiIDMap := make(map[jcstypes.ObjectID]bool) for _, pkgID := range avaiIDs { avaiIDMap[pkgID] = true } @@ -157,12 +157,12 @@ func (db *ObjectDB) BatchTestObjectID(ctx SQLContext, objectIDs []types.ObjectID return avaiIDMap, nil } -func (db *ObjectDB) BatchGet(ctx SQLContext, objectIDs []types.ObjectID) ([]types.Object, error) { +func (db *ObjectDB) BatchGet(ctx SQLContext, objectIDs []jcstypes.ObjectID) ([]jcstypes.Object, error) { if len(objectIDs) == 0 { return nil, nil } - var objs []types.Object + var objs []jcstypes.Object err := ctx.Table("Object").Where("ObjectID IN ?", objectIDs).Order("ObjectID ASC").Find(&objs).Error if err != nil { return nil, err @@ -171,12 +171,12 @@ func (db *ObjectDB) BatchGet(ctx SQLContext, objectIDs []types.ObjectID) ([]type return objs, nil } -func (db *ObjectDB) BatchGetByPackagePath(ctx SQLContext, pkgID types.PackageID, pathes []string) ([]types.Object, error) { +func (db *ObjectDB) BatchGetByPackagePath(ctx SQLContext, pkgID jcstypes.PackageID, pathes []string) ([]jcstypes.Object, error) { if len(pathes) == 0 { return nil, nil } - var objs []types.Object + var objs []jcstypes.Object err := ctx.Table("Object").Where("PackageID = ? AND Path IN ?", pkgID, pathes).Find(&objs).Error if err != nil { return nil, err @@ -185,33 +185,33 @@ func (db *ObjectDB) BatchGetByPackagePath(ctx SQLContext, pkgID types.PackageID, return objs, nil } -func (db *ObjectDB) GetDetail(ctx SQLContext, objectID types.ObjectID) (types.ObjectDetail, error) { - var obj types.Object +func (db *ObjectDB) GetDetail(ctx SQLContext, objectID jcstypes.ObjectID) (jcstypes.ObjectDetail, error) { + var obj jcstypes.Object err := ctx.Table("Object").Where("ObjectID = ?", objectID).First(&obj).Error if err != nil { - return types.ObjectDetail{}, fmt.Errorf("getting object: %w", err) + return jcstypes.ObjectDetail{}, fmt.Errorf("getting object: %w", err) } // 获取所有的 ObjectBlock - var allBlocks []types.ObjectBlock + var allBlocks []jcstypes.ObjectBlock err = ctx.Table("ObjectBlock").Where("ObjectID = ?", objectID).Order("`Index` ASC").Find(&allBlocks).Error if err != nil { - return types.ObjectDetail{}, fmt.Errorf("getting all object blocks: %w", err) + return jcstypes.ObjectDetail{}, fmt.Errorf("getting all object blocks: %w", err) } // 获取所有的 PinnedObject - var allPinnedObjs []types.PinnedObject + var allPinnedObjs []jcstypes.PinnedObject err = ctx.Table("PinnedObject").Where("ObjectID = ?", objectID).Order("ObjectID ASC").Find(&allPinnedObjs).Error if err != nil { - return types.ObjectDetail{}, fmt.Errorf("getting all pinned objects: %w", err) + return jcstypes.ObjectDetail{}, fmt.Errorf("getting all pinned objects: %w", err) } - pinnedAt := make([]types.UserSpaceID, len(allPinnedObjs)) + pinnedAt := make([]jcstypes.UserSpaceID, len(allPinnedObjs)) for i, po := range allPinnedObjs { pinnedAt[i] = po.UserSpaceID } - return types.ObjectDetail{ + return jcstypes.ObjectDetail{ Object: obj, Blocks: allBlocks, PinnedAt: pinnedAt, @@ -219,8 +219,8 @@ func (db *ObjectDB) GetDetail(ctx SQLContext, objectID types.ObjectID) (types.Ob } // 仅返回查询到的对象 -func (db *ObjectDB) BatchGetDetails(ctx SQLContext, objectIDs []types.ObjectID) ([]types.ObjectDetail, error) { - var objs []types.Object +func (db *ObjectDB) BatchGetDetails(ctx SQLContext, objectIDs []jcstypes.ObjectID) ([]jcstypes.ObjectDetail, error) { + var objs []jcstypes.Object err := ctx.Table("Object").Where("ObjectID IN ?", objectIDs).Order("ObjectID ASC").Find(&objs).Error if err != nil { @@ -228,71 +228,71 @@ func (db *ObjectDB) BatchGetDetails(ctx SQLContext, objectIDs []types.ObjectID) } // 获取所有的 ObjectBlock - var allBlocks []types.ObjectBlock + var allBlocks []jcstypes.ObjectBlock err = ctx.Table("ObjectBlock").Where("ObjectID IN ?", objectIDs).Order("ObjectID, `Index` ASC").Find(&allBlocks).Error if err != nil { return nil, err } // 获取所有的 PinnedObject - var allPinnedObjs []types.PinnedObject + var allPinnedObjs []jcstypes.PinnedObject err = ctx.Table("PinnedObject").Where("ObjectID IN ?", objectIDs).Order("ObjectID ASC").Find(&allPinnedObjs).Error if err != nil { return nil, err } - details := make([]types.ObjectDetail, len(objs)) + details := make([]jcstypes.ObjectDetail, len(objs)) for i, obj := range objs { - details[i] = types.ObjectDetail{ + details[i] = jcstypes.ObjectDetail{ Object: obj, } } - types.DetailsFillObjectBlocks(details, allBlocks) - types.DetailsFillPinnedAt(details, allPinnedObjs) + jcstypes.DetailsFillObjectBlocks(details, allBlocks) + jcstypes.DetailsFillPinnedAt(details, allPinnedObjs) return details, nil } -func (db *ObjectDB) BatchGetDetailsPaged(ctx SQLContext, pkgID types.PackageID, lastObjID types.ObjectID, maxCnt int) ([]types.ObjectDetail, error) { - var objs []types.Object +func (db *ObjectDB) BatchGetDetailsPaged(ctx SQLContext, pkgID jcstypes.PackageID, lastObjID jcstypes.ObjectID, maxCnt int) ([]jcstypes.ObjectDetail, error) { + var objs []jcstypes.Object err := ctx.Table("Object").Where("ObjectID > ? ORDER BY ObjectID ASC LIMIT ?", lastObjID, maxCnt).Find(&objs).Error if err != nil { return nil, err } - objIDs := make([]types.ObjectID, len(objs)) + objIDs := make([]jcstypes.ObjectID, len(objs)) for i, obj := range objs { objIDs[i] = obj.ObjectID } // 获取所有的 ObjectBlock - var allBlocks []types.ObjectBlock + var allBlocks []jcstypes.ObjectBlock err = ctx.Table("ObjectBlock").Where("ObjectID IN ?", objIDs).Order("ObjectID, `Index` ASC").Find(&allBlocks).Error if err != nil { return nil, err } // 获取所有的 PinnedObject - var allPinnedObjs []types.PinnedObject + var allPinnedObjs []jcstypes.PinnedObject err = ctx.Table("PinnedObject").Where("ObjectID IN ?", objIDs).Order("ObjectID ASC").Find(&allPinnedObjs).Error if err != nil { return nil, err } - details := make([]types.ObjectDetail, len(objs)) + details := make([]jcstypes.ObjectDetail, len(objs)) for i, obj := range objs { - details[i] = types.ObjectDetail{ + details[i] = jcstypes.ObjectDetail{ Object: obj, } } - types.DetailsFillObjectBlocks(details, allBlocks) - types.DetailsFillPinnedAt(details, allPinnedObjs) + jcstypes.DetailsFillObjectBlocks(details, allBlocks) + jcstypes.DetailsFillPinnedAt(details, allPinnedObjs) return details, nil } -func (db *ObjectDB) Create(ctx SQLContext, obj types.Object) (types.ObjectID, error) { +func (db *ObjectDB) Create(ctx SQLContext, obj jcstypes.Object) (jcstypes.ObjectID, error) { err := ctx.Table("Object").Create(&obj).Error if err != nil { return 0, fmt.Errorf("insert object failed, err: %w", err) @@ -301,7 +301,7 @@ func (db *ObjectDB) Create(ctx SQLContext, obj types.Object) (types.ObjectID, er } // 批量创建对象,创建完成后会填充ObjectID。 -func (db *ObjectDB) BatchCreate(ctx SQLContext, objs *[]types.Object) error { +func (db *ObjectDB) BatchCreate(ctx SQLContext, objs *[]jcstypes.Object) error { if len(*objs) == 0 { return nil } @@ -310,7 +310,7 @@ func (db *ObjectDB) BatchCreate(ctx SQLContext, objs *[]types.Object) error { } // 批量更新对象所有属性,objs中的对象必须包含ObjectID -func (db *ObjectDB) BatchUpdate(ctx SQLContext, objs []types.Object) error { +func (db *ObjectDB) BatchUpdate(ctx SQLContext, objs []jcstypes.Object) error { if len(objs) == 0 { return nil } @@ -324,7 +324,7 @@ func (db *ObjectDB) BatchUpdate(ctx SQLContext, objs []types.Object) error { // 批量更新对象指定属性,objs中的对象只需设置需要更新的属性即可,但: // 1. 必须包含ObjectID // 2. 日期类型属性不能设置为0值 -func (db *ObjectDB) BatchUpdateColumns(ctx SQLContext, objs []types.Object, columns []string) error { +func (db *ObjectDB) BatchUpdateColumns(ctx SQLContext, objs []jcstypes.Object, columns []string) error { if len(objs) == 0 { return nil } @@ -335,21 +335,21 @@ func (db *ObjectDB) BatchUpdateColumns(ctx SQLContext, objs []types.Object, colu }).Create(objs).Error } -func (db *ObjectDB) GetPackageObjects(ctx SQLContext, packageID types.PackageID) ([]types.Object, error) { - var ret []types.Object +func (db *ObjectDB) GetPackageObjects(ctx SQLContext, packageID jcstypes.PackageID) ([]jcstypes.Object, error) { + var ret []jcstypes.Object err := ctx.Table("Object").Where("PackageID = ?", packageID).Order("ObjectID ASC").Find(&ret).Error return ret, err } -func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID types.PackageID) ([]types.ObjectDetail, error) { - var objs []types.Object +func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID jcstypes.PackageID) ([]jcstypes.ObjectDetail, error) { + var objs []jcstypes.Object err := ctx.Table("Object").Where("PackageID = ?", packageID).Order("ObjectID ASC").Find(&objs).Error if err != nil { return nil, fmt.Errorf("getting objects: %w", err) } // 获取所有的 ObjectBlock - var allBlocks []types.ObjectBlock + var allBlocks []jcstypes.ObjectBlock err = ctx.Table("ObjectBlock"). Select("ObjectBlock.*"). Joins("JOIN Object ON ObjectBlock.ObjectID = Object.ObjectID"). @@ -361,7 +361,7 @@ func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID types.Pack } // 获取所有的 PinnedObject - var allPinnedObjs []types.PinnedObject + var allPinnedObjs []jcstypes.PinnedObject err = ctx.Table("PinnedObject"). Select("PinnedObject.*"). Joins("JOIN Object ON PinnedObject.ObjectID = Object.ObjectID"). @@ -372,20 +372,20 @@ func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID types.Pack return nil, fmt.Errorf("getting all pinned objects: %w", err) } - details := make([]types.ObjectDetail, len(objs)) + details := make([]jcstypes.ObjectDetail, len(objs)) for i, obj := range objs { - details[i] = types.ObjectDetail{ + details[i] = jcstypes.ObjectDetail{ Object: obj, } } - types.DetailsFillObjectBlocks(details, allBlocks) - types.DetailsFillPinnedAt(details, allPinnedObjs) + jcstypes.DetailsFillObjectBlocks(details, allBlocks) + jcstypes.DetailsFillPinnedAt(details, allPinnedObjs) return details, nil } -func (db *ObjectDB) GetObjectsIfAnyBlockOnStorage(ctx SQLContext, spaceID types.UserSpaceID) ([]types.Object, error) { - var objs []types.Object +func (db *ObjectDB) GetObjectsIfAnyBlockOnStorage(ctx SQLContext, spaceID jcstypes.UserSpaceID) ([]jcstypes.Object, error) { + var objs []jcstypes.Object err := ctx.Table("Object").Where("ObjectID IN (SELECT ObjectID FROM ObjectBlock WHERE UserSpaceID = ?)", spaceID).Order("ObjectID ASC").Find(&objs).Error if err != nil { return nil, fmt.Errorf("getting objects: %w", err) @@ -395,14 +395,14 @@ func (db *ObjectDB) GetObjectsIfAnyBlockOnStorage(ctx SQLContext, spaceID types. } type AddObjectEntry struct { - Path string `json:"path"` - Size int64 `json:"size,string"` - FileHash types.FileHash `json:"fileHash"` - CreateTime time.Time `json:"createTime"` // 开始上传文件的时间 - UserSpaceIDs []types.UserSpaceID `json:"userSpaceIDs"` + Path string `json:"path"` + Size int64 `json:"size,string"` + FileHash jcstypes.FileHash `json:"fileHash"` + CreateTime time.Time `json:"createTime"` // 开始上传文件的时间 + UserSpaceIDs []jcstypes.UserSpaceID `json:"userSpaceIDs"` } -func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID types.PackageID, adds []AddObjectEntry) ([]types.Object, error) { +func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID jcstypes.PackageID, adds []AddObjectEntry) ([]jcstypes.Object, error) { if len(adds) == 0 { return nil, nil } @@ -419,20 +419,20 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID types.PackageID, adds []A return nil, fmt.Errorf("batch get object by path: %w", err) } - existsObjsMap := make(map[string]types.Object) + existsObjsMap := make(map[string]jcstypes.Object) for _, obj := range existsObjs { existsObjsMap[obj.Path] = obj } - var updatingObjs []types.Object - var addingObjs []types.Object + var updatingObjs []jcstypes.Object + var addingObjs []jcstypes.Object for i := range adds { - o := types.Object{ + o := jcstypes.Object{ PackageID: packageID, Path: adds[i].Path, Size: adds[i].Size, FileHash: adds[i].FileHash, - Redundancy: types.NewNoneRedundancy(), // 首次上传默认使用不分块的none模式 + Redundancy: jcstypes.NewNoneRedundancy(), // 首次上传默认使用不分块的none模式 CreateTime: adds[i].CreateTime, UpdateTime: adds[i].CreateTime, } @@ -461,15 +461,15 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID types.PackageID, adds []A } // 按照add参数的顺序返回结果 - affectedObjsMp := make(map[string]types.Object) + affectedObjsMp := make(map[string]jcstypes.Object) for _, o := range updatingObjs { affectedObjsMp[o.Path] = o } for _, o := range addingObjs { affectedObjsMp[o.Path] = o } - affectedObjs := make([]types.Object, 0, len(affectedObjsMp)) - affectedObjIDs := make([]types.ObjectID, 0, len(affectedObjsMp)) + affectedObjs := make([]jcstypes.Object, 0, len(affectedObjsMp)) + affectedObjIDs := make([]jcstypes.ObjectID, 0, len(affectedObjsMp)) for i := range adds { obj := affectedObjsMp[adds[i].Path] affectedObjs = append(affectedObjs, obj) @@ -489,10 +489,10 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID types.PackageID, adds []A } // 创建 ObjectBlock - objBlocks := make([]types.ObjectBlock, 0, len(adds)) + objBlocks := make([]jcstypes.ObjectBlock, 0, len(adds)) for i, add := range adds { for _, spaceID := range add.UserSpaceIDs { - objBlocks = append(objBlocks, types.ObjectBlock{ + objBlocks = append(objBlocks, jcstypes.ObjectBlock{ ObjectID: affectedObjIDs[i], Index: 0, UserSpaceID: spaceID, @@ -508,32 +508,32 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID types.PackageID, adds []A return affectedObjs, nil } -func (db *ObjectDB) BatchDelete(ctx SQLContext, ids []types.ObjectID) error { +func (db *ObjectDB) BatchDelete(ctx SQLContext, ids []jcstypes.ObjectID) error { if len(ids) == 0 { return nil } - return ctx.Table("Object").Where("ObjectID IN ?", ids).Delete(&types.Object{}).Error + return ctx.Table("Object").Where("ObjectID IN ?", ids).Delete(&jcstypes.Object{}).Error } -func (db *ObjectDB) DeleteInPackage(ctx SQLContext, packageID types.PackageID) error { - return ctx.Table("Object").Where("PackageID = ?", packageID).Delete(&types.Object{}).Error +func (db *ObjectDB) DeleteInPackage(ctx SQLContext, packageID jcstypes.PackageID) error { + return ctx.Table("Object").Where("PackageID = ?", packageID).Delete(&jcstypes.Object{}).Error } type UpdatingObjectRedundancy struct { - ObjectID types.ObjectID `json:"objectID"` - FileHash types.FileHash `json:"fileHash"` - Size int64 `json:"size"` - Redundancy types.Redundancy `json:"redundancy"` - PinnedAt []types.UserSpaceID `json:"pinnedAt"` - Blocks []types.ObjectBlock `json:"blocks"` + ObjectID jcstypes.ObjectID `json:"objectID"` + FileHash jcstypes.FileHash `json:"fileHash"` + Size int64 `json:"size"` + Redundancy jcstypes.Redundancy `json:"redundancy"` + PinnedAt []jcstypes.UserSpaceID `json:"pinnedAt"` + Blocks []jcstypes.ObjectBlock `json:"blocks"` } func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, updates []UpdatingObjectRedundancy) error { objs := updates nowTime := time.Now() - objIDs := make([]types.ObjectID, 0, len(objs)) + objIDs := make([]jcstypes.ObjectID, 0, len(objs)) for _, obj := range objs { objIDs = append(objIDs, obj.ObjectID) } @@ -549,9 +549,9 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, updates []UpdatingObje return avaiIDs[obj.ObjectID] }) - dummyObjs := make([]types.Object, 0, len(objs)) + dummyObjs := make([]jcstypes.Object, 0, len(objs)) for _, obj := range objs { - dummyObjs = append(dummyObjs, types.Object{ + dummyObjs = append(dummyObjs, jcstypes.Object{ ObjectID: obj.ObjectID, FileHash: obj.FileHash, Size: obj.Size, @@ -578,7 +578,7 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, updates []UpdatingObje return fmt.Errorf("batch delete pinned object: %w", err) } - blocks := make([]types.ObjectBlock, 0, len(objs)) + blocks := make([]jcstypes.ObjectBlock, 0, len(objs)) for _, obj := range objs { blocks = append(blocks, obj.Blocks...) } @@ -587,10 +587,10 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, updates []UpdatingObje return fmt.Errorf("batch create object blocks: %w", err) } - pinneds := make([]types.PinnedObject, 0, len(objs)) + pinneds := make([]jcstypes.PinnedObject, 0, len(objs)) for _, obj := range objs { for _, p := range obj.PinnedAt { - pinneds = append(pinneds, types.PinnedObject{ + pinneds = append(pinneds, jcstypes.PinnedObject{ ObjectID: obj.ObjectID, UserSpaceID: p, CreateTime: nowTime, @@ -605,7 +605,7 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, updates []UpdatingObje return nil } -func (*ObjectDB) BatchUpdateUpdateTimeByPath(ctx SQLContext, packageID types.PackageID, pathes []string, updateTimes []time.Time) error { +func (*ObjectDB) BatchUpdateUpdateTimeByPath(ctx SQLContext, packageID jcstypes.PackageID, pathes []string, updateTimes []time.Time) error { if len(pathes) != len(updateTimes) { return fmt.Errorf("pathes and updateTimes must have the same length") } @@ -628,7 +628,7 @@ func (*ObjectDB) BatchUpdateUpdateTimeByPath(ctx SQLContext, packageID types.Pac return ctx.Exec(sb.String(), args...).Error } -func (db *ObjectDB) MoveByPrefix(ctx SQLContext, oldPkgID types.PackageID, oldPrefix string, newPkgID types.PackageID, newPrefix string) error { +func (db *ObjectDB) MoveByPrefix(ctx SQLContext, oldPkgID jcstypes.PackageID, oldPrefix string, newPkgID jcstypes.PackageID, newPrefix string) error { return ctx.Table("Object").Where("PackageID = ? AND Path LIKE ?", oldPkgID, escapeLike("", "%", oldPrefix)). Updates(map[string]any{ "PackageID": newPkgID, @@ -636,26 +636,26 @@ func (db *ObjectDB) MoveByPrefix(ctx SQLContext, oldPkgID types.PackageID, oldPr }).Error } -func (db *ObjectDB) AppendPart(tx SQLContext, block types.ObjectBlock) error { +func (db *ObjectDB) AppendPart(tx SQLContext, block jcstypes.ObjectBlock) error { obj, err := db.Object().GetByID(tx, block.ObjectID) if err != nil { return fmt.Errorf("getting object by id: %w", err) } - _, ok := obj.Redundancy.(*types.MultipartUploadRedundancy) + _, ok := obj.Redundancy.(*jcstypes.MultipartUploadRedundancy) if !ok { return fmt.Errorf("object is not a multipart upload object") } - blks, err := db.ObjectBlock().BatchGetByObjectID(tx, []types.ObjectID{obj.ObjectID}) + blks, err := db.ObjectBlock().BatchGetByObjectID(tx, []jcstypes.ObjectID{obj.ObjectID}) if err != nil { return fmt.Errorf("batch getting object blocks: %w", err) } - blks = lo.Reject(blks, func(blk types.ObjectBlock, idx int) bool { return blk.Index == block.Index }) + blks = lo.Reject(blks, func(blk jcstypes.ObjectBlock, idx int) bool { return blk.Index == block.Index }) blks = append(blks, block) - blks = sort2.Sort(blks, func(a, b types.ObjectBlock) int { return a.Index - b.Index }) + blks = sort2.Sort(blks, func(a, b jcstypes.ObjectBlock) int { return a.Index - b.Index }) totalSize := int64(0) var hashes [][]byte @@ -664,7 +664,7 @@ func (db *ObjectDB) AppendPart(tx SQLContext, block types.ObjectBlock) error { hashes = append(hashes, blk.FileHash.GetHashBytes()) } - newObjHash := types.CalculateCompositeHash(hashes) + newObjHash := jcstypes.CalculateCompositeHash(hashes) obj.Size = totalSize obj.FileHash = newObjHash obj.UpdateTime = time.Now() @@ -679,7 +679,7 @@ func (db *ObjectDB) AppendPart(tx SQLContext, block types.ObjectBlock) error { return fmt.Errorf("create object block: %w", err) } - err = db.Object().BatchUpdate(tx, []types.Object{obj}) + err = db.Object().BatchUpdate(tx, []jcstypes.Object{obj}) if err != nil { return fmt.Errorf("update object: %w", err) } @@ -687,7 +687,7 @@ func (db *ObjectDB) AppendPart(tx SQLContext, block types.ObjectBlock) error { return nil } -func (db *ObjectDB) BatchDeleteComplete(ctx SQLContext, objectIDs []types.ObjectID) error { +func (db *ObjectDB) BatchDeleteComplete(ctx SQLContext, objectIDs []jcstypes.ObjectID) error { err := db.Object().BatchDelete(ctx, objectIDs) if err != nil { return fmt.Errorf("batch deleting objects: %w", err) @@ -711,11 +711,11 @@ func (db *ObjectDB) BatchDeleteComplete(ctx SQLContext, objectIDs []types.Object return nil } -func (db *ObjectDB) DeleteCompleteByPath(ctx SQLContext, packageID types.PackageID, path string) error { +func (db *ObjectDB) DeleteCompleteByPath(ctx SQLContext, packageID jcstypes.PackageID, path string) error { obj, err := db.Object().GetByPath(ctx, packageID, path) if err != nil { return err } - return db.BatchDeleteComplete(ctx, []types.ObjectID{obj.ObjectID}) + return db.BatchDeleteComplete(ctx, []jcstypes.ObjectID{obj.ObjectID}) } diff --git a/client/internal/db/object_access_stat.go b/client/internal/db/object_access_stat.go index 901f215..a8c02ba 100644 --- a/client/internal/db/object_access_stat.go +++ b/client/internal/db/object_access_stat.go @@ -1,7 +1,7 @@ package db import ( - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -14,40 +14,40 @@ func (db *DB) ObjectAccessStat() *ObjectAccessStatDB { return &ObjectAccessStatDB{db} } -func (*ObjectAccessStatDB) Get(ctx SQLContext, objID types.ObjectID, spaceID types.UserSpaceID) (types.ObjectAccessStat, error) { - var ret types.ObjectAccessStat +func (*ObjectAccessStatDB) Get(ctx SQLContext, objID jcstypes.ObjectID, spaceID jcstypes.UserSpaceID) (jcstypes.ObjectAccessStat, error) { + var ret jcstypes.ObjectAccessStat err := ctx.Table("ObjectAccessStat"). Where("ObjectID = ? AND UserSpaceID = ?", objID, spaceID). First(&ret).Error return ret, err } -func (*ObjectAccessStatDB) GetByObjectID(ctx SQLContext, objID types.ObjectID) ([]types.ObjectAccessStat, error) { - var ret []types.ObjectAccessStat +func (*ObjectAccessStatDB) GetByObjectID(ctx SQLContext, objID jcstypes.ObjectID) ([]jcstypes.ObjectAccessStat, error) { + var ret []jcstypes.ObjectAccessStat err := ctx.Table("ObjectAccessStat"). Where("ObjectID = ?", objID). Find(&ret).Error return ret, err } -func (*ObjectAccessStatDB) BatchGetByObjectID(ctx SQLContext, objIDs []types.ObjectID) ([]types.ObjectAccessStat, error) { +func (*ObjectAccessStatDB) BatchGetByObjectID(ctx SQLContext, objIDs []jcstypes.ObjectID) ([]jcstypes.ObjectAccessStat, error) { if len(objIDs) == 0 { return nil, nil } - var ret []types.ObjectAccessStat + var ret []jcstypes.ObjectAccessStat err := ctx.Table("ObjectAccessStat"). Where("ObjectID IN ?", objIDs). Find(&ret).Error return ret, err } -func (*ObjectAccessStatDB) BatchGetByObjectIDOnStorage(ctx SQLContext, objIDs []types.ObjectID, spaceID types.UserSpaceID) ([]types.ObjectAccessStat, error) { +func (*ObjectAccessStatDB) BatchGetByObjectIDOnStorage(ctx SQLContext, objIDs []jcstypes.ObjectID, spaceID jcstypes.UserSpaceID) ([]jcstypes.ObjectAccessStat, error) { if len(objIDs) == 0 { return nil, nil } - var ret []types.ObjectAccessStat + var ret []jcstypes.ObjectAccessStat err := ctx.Table("ObjectAccessStat"). Where("ObjectID IN ? AND UserSpaceID = ?", objIDs, spaceID). Find(&ret).Error @@ -60,7 +60,7 @@ func (*ObjectAccessStatDB) BatchAddCounter(ctx SQLContext, entries []AddAccessSt } for _, entry := range entries { - acc := types.ObjectAccessStat{ + acc := jcstypes.ObjectAccessStat{ ObjectID: entry.ObjectID, UserSpaceID: entry.UserSpaceID, Counter: entry.Counter, @@ -80,7 +80,7 @@ func (*ObjectAccessStatDB) BatchAddCounter(ctx SQLContext, entries []AddAccessSt return nil } -func (*ObjectAccessStatDB) BatchUpdateAmountInPackage(ctx SQLContext, pkgIDs []types.PackageID, historyWeight float64) error { +func (*ObjectAccessStatDB) BatchUpdateAmountInPackage(ctx SQLContext, pkgIDs []jcstypes.PackageID, historyWeight float64) error { if len(pkgIDs) == 0 { return nil } @@ -94,12 +94,12 @@ func (*ObjectAccessStatDB) UpdateAllAmount(ctx SQLContext, historyWeight float64 return err } -func (*ObjectAccessStatDB) DeleteByObjectID(ctx SQLContext, objID types.ObjectID) error { +func (*ObjectAccessStatDB) DeleteByObjectID(ctx SQLContext, objID jcstypes.ObjectID) error { err := ctx.Table("ObjectAccessStat").Where("ObjectID = ?", objID).Delete(nil).Error return err } -func (*ObjectAccessStatDB) BatchDeleteByObjectID(ctx SQLContext, objIDs []types.ObjectID) error { +func (*ObjectAccessStatDB) BatchDeleteByObjectID(ctx SQLContext, objIDs []jcstypes.ObjectID) error { if len(objIDs) == 0 { return nil } @@ -108,12 +108,12 @@ func (*ObjectAccessStatDB) BatchDeleteByObjectID(ctx SQLContext, objIDs []types. return err } -func (*ObjectAccessStatDB) DeleteInPackage(ctx SQLContext, packageID types.PackageID) error { +func (*ObjectAccessStatDB) DeleteInPackage(ctx SQLContext, packageID jcstypes.PackageID) error { err := ctx.Exec("DELETE o FROM ObjectAccessStat o INNER JOIN Object obj ON o.ObjectID = obj.ObjectID WHERE obj.PackageID = ?", packageID).Error return err } -func (*ObjectAccessStatDB) DeleteByUserSpaceID(ctx SQLContext, spaceID types.UserSpaceID) error { - err := ctx.Table("ObjectAccessStat").Where("UserSpaceID = ?", spaceID).Delete(&types.ObjectAccessStat{}).Error +func (*ObjectAccessStatDB) DeleteByUserSpaceID(ctx SQLContext, spaceID jcstypes.UserSpaceID) error { + err := ctx.Table("ObjectAccessStat").Where("UserSpaceID = ?", spaceID).Delete(&jcstypes.ObjectAccessStat{}).Error return err } diff --git a/client/internal/db/object_block.go b/client/internal/db/object_block.go index 96ccbb4..08d357d 100644 --- a/client/internal/db/object_block.go +++ b/client/internal/db/object_block.go @@ -1,7 +1,7 @@ package db import ( - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm/clause" ) @@ -13,24 +13,24 @@ func (db *DB) ObjectBlock() *ObjectBlockDB { return &ObjectBlockDB{DB: db} } -func (db *ObjectBlockDB) GetByUserSpaceID(ctx SQLContext, spaceID types.UserSpaceID) ([]types.ObjectBlock, error) { - var rets []types.ObjectBlock +func (db *ObjectBlockDB) GetByUserSpaceID(ctx SQLContext, spaceID jcstypes.UserSpaceID) ([]jcstypes.ObjectBlock, error) { + var rets []jcstypes.ObjectBlock err := ctx.Table("ObjectBlock").Where("UserSpaceID = ?", spaceID).Find(&rets).Error return rets, err } -func (db *ObjectBlockDB) BatchGetByObjectID(ctx SQLContext, objectIDs []types.ObjectID) ([]types.ObjectBlock, error) { +func (db *ObjectBlockDB) BatchGetByObjectID(ctx SQLContext, objectIDs []jcstypes.ObjectID) ([]jcstypes.ObjectBlock, error) { if len(objectIDs) == 0 { return nil, nil } - var blocks []types.ObjectBlock + var blocks []jcstypes.ObjectBlock err := ctx.Table("ObjectBlock").Where("ObjectID IN (?)", objectIDs).Order("ObjectID, `Index` ASC").Find(&blocks).Error return blocks, err } -func (*ObjectBlockDB) GetInPackageID(ctx SQLContext, packageID types.PackageID) ([]types.ObjectBlock, error) { - var rets []types.ObjectBlock +func (*ObjectBlockDB) GetInPackageID(ctx SQLContext, packageID jcstypes.PackageID) ([]jcstypes.ObjectBlock, error) { + var rets []jcstypes.ObjectBlock err := ctx.Table("ObjectBlock"). Joins("INNER JOIN Object ON ObjectBlock.ObjectID = Object.ObjectID"). Where("Object.PackageID = ?", packageID). @@ -39,12 +39,12 @@ func (*ObjectBlockDB) GetInPackageID(ctx SQLContext, packageID types.PackageID) return rets, err } -func (db *ObjectBlockDB) Create(ctx SQLContext, objectID types.ObjectID, index int, spaceID types.UserSpaceID, fileHash types.FileHash, size int64) error { - block := types.ObjectBlock{ObjectID: objectID, Index: index, UserSpaceID: spaceID, FileHash: fileHash, Size: size} +func (db *ObjectBlockDB) Create(ctx SQLContext, objectID jcstypes.ObjectID, index int, spaceID jcstypes.UserSpaceID, fileHash jcstypes.FileHash, size int64) error { + block := jcstypes.ObjectBlock{ObjectID: objectID, Index: index, UserSpaceID: spaceID, FileHash: fileHash, Size: size} return ctx.Table("ObjectBlock").Create(&block).Error } -func (db *ObjectBlockDB) BatchCreate(ctx SQLContext, blocks []types.ObjectBlock) error { +func (db *ObjectBlockDB) BatchCreate(ctx SQLContext, blocks []jcstypes.ObjectBlock) error { if len(blocks) == 0 { return nil } @@ -52,36 +52,36 @@ func (db *ObjectBlockDB) BatchCreate(ctx SQLContext, blocks []types.ObjectBlock) return ctx.Clauses(clause.Insert{Modifier: "ignore"}).Create(&blocks).Error } -func (db *ObjectBlockDB) DeleteByObjectID(ctx SQLContext, objectID types.ObjectID) error { - return ctx.Table("ObjectBlock").Where("ObjectID = ?", objectID).Delete(&types.ObjectBlock{}).Error +func (db *ObjectBlockDB) DeleteByObjectID(ctx SQLContext, objectID jcstypes.ObjectID) error { + return ctx.Table("ObjectBlock").Where("ObjectID = ?", objectID).Delete(&jcstypes.ObjectBlock{}).Error } -func (db *ObjectBlockDB) DeleteByObjectIDIndex(ctx SQLContext, objectID types.ObjectID, index int) error { - return ctx.Table("ObjectBlock").Where("ObjectID = ? AND `Index` = ?", objectID, index).Delete(&types.ObjectBlock{}).Error +func (db *ObjectBlockDB) DeleteByObjectIDIndex(ctx SQLContext, objectID jcstypes.ObjectID, index int) error { + return ctx.Table("ObjectBlock").Where("ObjectID = ? AND `Index` = ?", objectID, index).Delete(&jcstypes.ObjectBlock{}).Error } -func (db *ObjectBlockDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []types.ObjectID) error { +func (db *ObjectBlockDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []jcstypes.ObjectID) error { if len(objectIDs) == 0 { return nil } - return ctx.Table("ObjectBlock").Where("ObjectID IN (?)", objectIDs).Delete(&types.ObjectBlock{}).Error + return ctx.Table("ObjectBlock").Where("ObjectID IN (?)", objectIDs).Delete(&jcstypes.ObjectBlock{}).Error } -func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID types.PackageID) error { - return ctx.Table("ObjectBlock").Where("ObjectID IN (SELECT ObjectID FROM Object WHERE PackageID = ?)", packageID).Delete(&types.ObjectBlock{}).Error +func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID jcstypes.PackageID) error { + return ctx.Table("ObjectBlock").Where("ObjectID IN (SELECT ObjectID FROM Object WHERE PackageID = ?)", packageID).Delete(&jcstypes.ObjectBlock{}).Error } -func (db *ObjectBlockDB) BatchDeleteByFileHash(ctx SQLContext, spaceID types.UserSpaceID, fileHashes []types.FileHash) error { +func (db *ObjectBlockDB) BatchDeleteByFileHash(ctx SQLContext, spaceID jcstypes.UserSpaceID, fileHashes []jcstypes.FileHash) error { if len(fileHashes) == 0 { return nil } - return ctx.Table("ObjectBlock").Where("UserSpaceID = ? AND FileHash IN (?)", spaceID, fileHashes).Delete(&types.ObjectBlock{}).Error + return ctx.Table("ObjectBlock").Where("UserSpaceID = ? AND FileHash IN (?)", spaceID, fileHashes).Delete(&jcstypes.ObjectBlock{}).Error } -func (*ObjectBlockDB) DeleteByUserSpaceID(ctx SQLContext, spaceID types.UserSpaceID) error { - return ctx.Table("ObjectBlock").Where("UserSpaceID = ?", spaceID).Delete(&types.ObjectBlock{}).Error +func (*ObjectBlockDB) DeleteByUserSpaceID(ctx SQLContext, spaceID jcstypes.UserSpaceID) error { + return ctx.Table("ObjectBlock").Where("UserSpaceID = ?", spaceID).Delete(&jcstypes.ObjectBlock{}).Error } func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (int, error) { diff --git a/client/internal/db/package.go b/client/internal/db/package.go index 574ee7f..7d21dca 100644 --- a/client/internal/db/package.go +++ b/client/internal/db/package.go @@ -5,7 +5,7 @@ import ( "time" "github.com/samber/lo" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" ) @@ -17,23 +17,23 @@ func (db *DB) Package() *PackageDB { return &PackageDB{DB: db} } -func (db *PackageDB) GetByID(ctx SQLContext, packageID types.PackageID) (types.Package, error) { - var ret types.Package +func (db *PackageDB) GetByID(ctx SQLContext, packageID jcstypes.PackageID) (jcstypes.Package, error) { + var ret jcstypes.Package err := ctx.Table("Package").Where("PackageID = ?", packageID).First(&ret).Error return ret, err } -func (db *PackageDB) GetByName(ctx SQLContext, bucketID types.BucketID, name string) (types.Package, error) { - var ret types.Package +func (db *PackageDB) GetByName(ctx SQLContext, bucketID jcstypes.BucketID, name string) (jcstypes.Package, error) { + var ret jcstypes.Package err := ctx.Table("Package").Where("BucketID = ? AND Name = ?", bucketID, name).First(&ret).Error return ret, err } -func (db *PackageDB) GetDetail(ctx SQLContext, packageID types.PackageID) (types.PackageDetail, error) { - var pkg types.Package +func (db *PackageDB) GetDetail(ctx SQLContext, packageID jcstypes.PackageID) (jcstypes.PackageDetail, error) { + var pkg jcstypes.Package err := ctx.Table("Package").Where("PackageID = ?", packageID).First(&pkg).Error if err != nil { - return types.PackageDetail{}, err + return jcstypes.PackageDetail{}, err } var ret struct { @@ -47,18 +47,18 @@ func (db *PackageDB) GetDetail(ctx SQLContext, packageID types.PackageID) (types First(&ret). Error if err != nil { - return types.PackageDetail{}, err + return jcstypes.PackageDetail{}, err } - return types.PackageDetail{ + return jcstypes.PackageDetail{ Package: pkg, ObjectCount: ret.ObjectCount, TotalSize: ret.TotalSize, }, nil } -func (db *PackageDB) BatchGetDetailPaged(ctx SQLContext, lastPkgID types.PackageID, count int) ([]types.PackageDetail, error) { - var pkgs []types.Package +func (db *PackageDB) BatchGetDetailPaged(ctx SQLContext, lastPkgID jcstypes.PackageID, count int) ([]jcstypes.PackageDetail, error) { + var pkgs []jcstypes.Package err := ctx.Table("Package"). Where("PackageID > ?", lastPkgID). Order("PackageID ASC"). @@ -93,9 +93,9 @@ func (db *PackageDB) BatchGetDetailPaged(ctx SQLContext, lastPkgID types.Package return nil, err } - ret := make([]types.PackageDetail, len(pkgs)) + ret := make([]jcstypes.PackageDetail, len(pkgs)) for i := range pkgs { - ret[i] = types.PackageDetail{ + ret[i] = jcstypes.PackageDetail{ Package: pkgs[i], ObjectCount: details[i].ObjectCount, TotalSize: details[i].TotalSize, @@ -105,12 +105,12 @@ func (db *PackageDB) BatchGetDetailPaged(ctx SQLContext, lastPkgID types.Package return ret, nil } -func (db *PackageDB) BatchTestPackageID(ctx SQLContext, pkgIDs []types.PackageID) (map[types.PackageID]bool, error) { +func (db *PackageDB) BatchTestPackageID(ctx SQLContext, pkgIDs []jcstypes.PackageID) (map[jcstypes.PackageID]bool, error) { if len(pkgIDs) == 0 { - return make(map[types.PackageID]bool), nil + return make(map[jcstypes.PackageID]bool), nil } - var avaiIDs []types.PackageID + var avaiIDs []jcstypes.PackageID err := ctx.Table("Package"). Select("PackageID"). Where("PackageID IN ?", pkgIDs). @@ -119,7 +119,7 @@ func (db *PackageDB) BatchTestPackageID(ctx SQLContext, pkgIDs []types.PackageID return nil, err } - avaiIDMap := make(map[types.PackageID]bool) + avaiIDMap := make(map[jcstypes.PackageID]bool) for _, pkgID := range avaiIDs { avaiIDMap[pkgID] = true } @@ -127,14 +127,14 @@ func (db *PackageDB) BatchTestPackageID(ctx SQLContext, pkgIDs []types.PackageID return avaiIDMap, nil } -func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([]types.PackageID, error) { - var ret []types.PackageID +func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([]jcstypes.PackageID, error) { + var ret []jcstypes.PackageID err := ctx.Table("Package").Select("PackageID").Limit(count).Offset(start).Find(&ret).Error return ret, err } -func (db *PackageDB) GetBucketPackages(ctx SQLContext, bucketID types.BucketID) ([]types.Package, error) { - var ret []types.Package +func (db *PackageDB) GetBucketPackages(ctx SQLContext, bucketID jcstypes.BucketID) ([]jcstypes.Package, error) { + var ret []jcstypes.Package err := ctx.Table("Package"). Select("Package.*"). Where("BucketID = ?", bucketID). @@ -142,8 +142,8 @@ func (db *PackageDB) GetBucketPackages(ctx SQLContext, bucketID types.BucketID) return ret, err } -func (db *PackageDB) GetBucketPackagesByName(ctx SQLContext, bucketName string) ([]types.Package, error) { - var ret []types.Package +func (db *PackageDB) GetBucketPackagesByName(ctx SQLContext, bucketName string) ([]jcstypes.Package, error) { + var ret []jcstypes.Package err := ctx.Table("Package"). Select("Package.*"). Joins("JOIN Bucket ON Package.BucketID = Bucket.BucketID"). @@ -153,8 +153,8 @@ func (db *PackageDB) GetBucketPackagesByName(ctx SQLContext, bucketName string) } // 在指定名称的Bucket中查找指定名称的Package -func (*PackageDB) GetByFullName(ctx SQLContext, bucketName string, packageName string) (types.Package, error) { - var ret types.Package +func (*PackageDB) GetByFullName(ctx SQLContext, bucketName string, packageName string) (jcstypes.Package, error) { + var ret jcstypes.Package err := ctx.Table("Package"). Select("Package.*"). Joins("JOIN Bucket ON Package.BucketID = Bucket.BucketID"). @@ -163,7 +163,7 @@ func (*PackageDB) GetByFullName(ctx SQLContext, bucketName string, packageName s return ret, err } -func (db *PackageDB) Create(ctx SQLContext, bucketID types.BucketID, name string, createTime time.Time) (types.Package, error) { +func (db *PackageDB) Create(ctx SQLContext, bucketID jcstypes.BucketID, name string, createTime time.Time) (jcstypes.Package, error) { var packageID int64 err := ctx.Table("Package"). Select("PackageID"). @@ -171,27 +171,27 @@ func (db *PackageDB) Create(ctx SQLContext, bucketID types.BucketID, name string Scan(&packageID).Error if err != nil { - return types.Package{}, err + return jcstypes.Package{}, err } if packageID != 0 { - return types.Package{}, gorm.ErrDuplicatedKey + return jcstypes.Package{}, gorm.ErrDuplicatedKey } - newPackage := types.Package{Name: name, BucketID: bucketID, CreateTime: createTime} + newPackage := jcstypes.Package{Name: name, BucketID: bucketID, CreateTime: createTime} if err := ctx.Create(&newPackage).Error; err != nil { - return types.Package{}, fmt.Errorf("insert package failed, err: %w", err) + return jcstypes.Package{}, fmt.Errorf("insert package failed, err: %w", err) } return newPackage, nil } -func (*PackageDB) Delete(ctx SQLContext, packageID types.PackageID) error { - err := ctx.Delete(&types.Package{}, "PackageID = ?", packageID).Error +func (*PackageDB) Delete(ctx SQLContext, packageID jcstypes.PackageID) error { + err := ctx.Delete(&jcstypes.Package{}, "PackageID = ?", packageID).Error return err } // 删除与Package相关的所有数据 -func (db *PackageDB) DeleteComplete(ctx SQLContext, packageID types.PackageID) error { +func (db *PackageDB) DeleteComplete(ctx SQLContext, packageID jcstypes.PackageID) error { if err := db.Package().Delete(ctx, packageID); err != nil { return fmt.Errorf("delete package state: %w", err) } @@ -219,32 +219,32 @@ func (db *PackageDB) DeleteComplete(ctx SQLContext, packageID types.PackageID) e return nil } -func (*PackageDB) ChangeState(ctx SQLContext, packageID types.PackageID, state string) error { +func (*PackageDB) ChangeState(ctx SQLContext, packageID jcstypes.PackageID, state string) error { err := ctx.Exec("UPDATE Package SET State = ? WHERE PackageID = ?", state, packageID).Error return err } // 返回ErrRecordNotFound表示没有找到指定名称的Bucket,nil表示找到了 -func (*PackageDB) HasPackageIn(ctx SQLContext, bucketID types.BucketID) error { - var pkg types.Package +func (*PackageDB) HasPackageIn(ctx SQLContext, bucketID jcstypes.BucketID) error { + var pkg jcstypes.Package return ctx.Table("Package").Where("BucketID = ?", bucketID).First(&pkg).Error } -func (*PackageDB) Move(ctx SQLContext, packageID types.PackageID, newBktID types.BucketID, newName string) error { +func (*PackageDB) Move(ctx SQLContext, packageID jcstypes.PackageID, newBktID jcstypes.BucketID, newName string) error { err := ctx.Table("Package").Where("PackageID = ?", packageID).Update("BucketID", newBktID).Update("Name", newName).Error return err } type AddAccessStatEntry struct { - ObjectID types.ObjectID `json:"objectID"` - PackageID types.PackageID `json:"packageID"` - UserSpaceID types.UserSpaceID `json:"userSpaceID"` - Counter float64 `json:"counter"` + ObjectID jcstypes.ObjectID `json:"objectID"` + PackageID jcstypes.PackageID `json:"packageID"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID"` + Counter float64 `json:"counter"` } func (db *PackageDB) BatchAddPackageAccessStat(ctx SQLContext, entries []AddAccessStatEntry) error { - pkgIDs := make(map[types.PackageID]bool) - objIDs := make(map[types.ObjectID]bool) + pkgIDs := make(map[jcstypes.PackageID]bool) + objIDs := make(map[jcstypes.ObjectID]bool) for _, e := range entries { pkgIDs[e.PackageID] = true objIDs[e.ObjectID] = true @@ -283,15 +283,15 @@ func (db *PackageDB) BatchAddPackageAccessStat(ctx SQLContext, entries []AddAcce } // 尝试创建指定名称的Bucket和Package,如果Bucket不存在,则创建Bucket,如果Package已存在,则直接返回已有的Package -func (db *PackageDB) TryCreateAll(ctx SQLContext, bktName string, pkgName string) (types.Package, error) { +func (db *PackageDB) TryCreateAll(ctx SQLContext, bktName string, pkgName string) (jcstypes.Package, error) { bkt, err := db.Bucket().GetByName(ctx, bktName) if err == gorm.ErrRecordNotFound { bkt, err = db.Bucket().Create(ctx, bktName, time.Now()) if err != nil { - return types.Package{}, fmt.Errorf("create bucket: %w", err) + return jcstypes.Package{}, fmt.Errorf("create bucket: %w", err) } } else if err != nil { - return types.Package{}, fmt.Errorf("get bucket by name: %w", err) + return jcstypes.Package{}, fmt.Errorf("get bucket by name: %w", err) } pkg, err := db.GetByName(ctx, bkt.BucketID, pkgName) @@ -299,12 +299,12 @@ func (db *PackageDB) TryCreateAll(ctx SQLContext, bktName string, pkgName string return pkg, nil } if err != gorm.ErrRecordNotFound { - return types.Package{}, fmt.Errorf("get package by name: %w", err) + return jcstypes.Package{}, fmt.Errorf("get package by name: %w", err) } pkg, err = db.Create(ctx, bkt.BucketID, pkgName, time.Now()) if err != nil { - return types.Package{}, fmt.Errorf("create package: %w", err) + return jcstypes.Package{}, fmt.Errorf("create package: %w", err) } return pkg, nil diff --git a/client/internal/db/package_access_stat.go b/client/internal/db/package_access_stat.go index fa001a6..e1c276b 100644 --- a/client/internal/db/package_access_stat.go +++ b/client/internal/db/package_access_stat.go @@ -1,7 +1,7 @@ package db import ( - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -14,24 +14,24 @@ func (db *DB) PackageAccessStat() *PackageAccessStatDB { return &PackageAccessStatDB{db} } -func (*PackageAccessStatDB) Get(ctx SQLContext, pkgID types.PackageID, spaceID types.UserSpaceID) (types.PackageAccessStat, error) { - var ret types.PackageAccessStat +func (*PackageAccessStatDB) Get(ctx SQLContext, pkgID jcstypes.PackageID, spaceID jcstypes.UserSpaceID) (jcstypes.PackageAccessStat, error) { + var ret jcstypes.PackageAccessStat err := ctx.Table("PackageAccessStat").Where("PackageID = ? AND UserSpaceID = ?", pkgID, spaceID).First(&ret).Error return ret, err } -func (*PackageAccessStatDB) GetByPackageID(ctx SQLContext, pkgID types.PackageID) ([]types.PackageAccessStat, error) { - var ret []types.PackageAccessStat +func (*PackageAccessStatDB) GetByPackageID(ctx SQLContext, pkgID jcstypes.PackageID) ([]jcstypes.PackageAccessStat, error) { + var ret []jcstypes.PackageAccessStat err := ctx.Table("PackageAccessStat").Where("PackageID = ?", pkgID).Find(&ret).Error return ret, err } -func (*PackageAccessStatDB) BatchGetByPackageID(ctx SQLContext, pkgIDs []types.PackageID) ([]types.PackageAccessStat, error) { +func (*PackageAccessStatDB) BatchGetByPackageID(ctx SQLContext, pkgIDs []jcstypes.PackageID) ([]jcstypes.PackageAccessStat, error) { if len(pkgIDs) == 0 { return nil, nil } - var ret []types.PackageAccessStat + var ret []jcstypes.PackageAccessStat err := ctx.Table("PackageAccessStat").Where("PackageID IN (?)", pkgIDs).Find(&ret).Error return ret, err } @@ -41,9 +41,9 @@ func (*PackageAccessStatDB) BatchAddCounter(ctx SQLContext, entries []AddAccessS return nil } - accs := make([]types.PackageAccessStat, len(entries)) + accs := make([]jcstypes.PackageAccessStat, len(entries)) for i, e := range entries { - accs[i] = types.PackageAccessStat{ + accs[i] = jcstypes.PackageAccessStat{ PackageID: e.PackageID, UserSpaceID: e.UserSpaceID, Counter: e.Counter, @@ -58,7 +58,7 @@ func (*PackageAccessStatDB) BatchAddCounter(ctx SQLContext, entries []AddAccessS }).Table("PackageAccessStat").Create(&accs).Error } -func (*PackageAccessStatDB) BatchUpdateAmount(ctx SQLContext, pkgIDs []types.PackageID, historyWeight float64) error { +func (*PackageAccessStatDB) BatchUpdateAmount(ctx SQLContext, pkgIDs []jcstypes.PackageID, historyWeight float64) error { if len(pkgIDs) == 0 { return nil } @@ -72,10 +72,10 @@ func (*PackageAccessStatDB) UpdateAllAmount(ctx SQLContext, historyWeight float6 return ctx.Exec(sql, historyWeight, historyWeight).Error } -func (*PackageAccessStatDB) DeleteByPackageID(ctx SQLContext, pkgID types.PackageID) error { - return ctx.Table("PackageAccessStat").Where("PackageID = ?", pkgID).Delete(&types.PackageAccessStat{}).Error +func (*PackageAccessStatDB) DeleteByPackageID(ctx SQLContext, pkgID jcstypes.PackageID) error { + return ctx.Table("PackageAccessStat").Where("PackageID = ?", pkgID).Delete(&jcstypes.PackageAccessStat{}).Error } -func (*PackageAccessStatDB) DeleteByUserSpaceID(ctx SQLContext, spaceID types.UserSpaceID) error { - return ctx.Table("PackageAccessStat").Where("UserSpaceID = ?", spaceID).Delete(&types.PackageAccessStat{}).Error +func (*PackageAccessStatDB) DeleteByUserSpaceID(ctx SQLContext, spaceID jcstypes.UserSpaceID) error { + return ctx.Table("PackageAccessStat").Where("UserSpaceID = ?", spaceID).Delete(&jcstypes.PackageAccessStat{}).Error } diff --git a/client/internal/db/pinned_object.go b/client/internal/db/pinned_object.go index f18e188..6e157c5 100644 --- a/client/internal/db/pinned_object.go +++ b/client/internal/db/pinned_object.go @@ -3,7 +3,7 @@ package db import ( "time" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm/clause" ) @@ -15,40 +15,40 @@ func (db *DB) PinnedObject() *PinnedObjectDB { return &PinnedObjectDB{DB: db} } -func (*PinnedObjectDB) GetByUserSpaceID(ctx SQLContext, spaceID types.UserSpaceID) ([]types.PinnedObject, error) { - var ret []types.PinnedObject +func (*PinnedObjectDB) GetByUserSpaceID(ctx SQLContext, spaceID jcstypes.UserSpaceID) ([]jcstypes.PinnedObject, error) { + var ret []jcstypes.PinnedObject err := ctx.Table("PinnedObject").Find(&ret, "UserSpaceID = ?", spaceID).Error return ret, err } -func (*PinnedObjectDB) GetObjectsByUserSpaceID(ctx SQLContext, spaceID types.UserSpaceID) ([]types.Object, error) { - var ret []types.Object +func (*PinnedObjectDB) GetObjectsByUserSpaceID(ctx SQLContext, spaceID jcstypes.UserSpaceID) ([]jcstypes.Object, error) { + var ret []jcstypes.Object err := ctx.Table("Object").Joins("inner join PinnedObject on Object.ObjectID = PinnedObject.ObjectID").Where("UserSpaceID = ?", spaceID).Find(&ret).Error return ret, err } -func (*PinnedObjectDB) Create(ctx SQLContext, spaceID types.UserSpaceID, objectID types.ObjectID, createTime time.Time) error { - return ctx.Table("PinnedObject").Create(&types.PinnedObject{UserSpaceID: spaceID, ObjectID: objectID, CreateTime: createTime}).Error +func (*PinnedObjectDB) Create(ctx SQLContext, spaceID jcstypes.UserSpaceID, objectID jcstypes.ObjectID, createTime time.Time) error { + return ctx.Table("PinnedObject").Create(&jcstypes.PinnedObject{UserSpaceID: spaceID, ObjectID: objectID, CreateTime: createTime}).Error } -func (*PinnedObjectDB) BatchGetByObjectID(ctx SQLContext, objectIDs []types.ObjectID) ([]types.PinnedObject, error) { +func (*PinnedObjectDB) BatchGetByObjectID(ctx SQLContext, objectIDs []jcstypes.ObjectID) ([]jcstypes.PinnedObject, error) { if len(objectIDs) == 0 { return nil, nil } - var pinneds []types.PinnedObject + var pinneds []jcstypes.PinnedObject err := ctx.Table("PinnedObject").Where("ObjectID in (?)", objectIDs).Order("ObjectID asc").Find(&pinneds).Error return pinneds, err } -func (*PinnedObjectDB) TryCreate(ctx SQLContext, spaceID types.UserSpaceID, objectID types.ObjectID, createTime time.Time) error { +func (*PinnedObjectDB) TryCreate(ctx SQLContext, spaceID jcstypes.UserSpaceID, objectID jcstypes.ObjectID, createTime time.Time) error { return ctx.Clauses(clause.OnConflict{ Columns: []clause.Column{{Name: "ObjectID"}, {Name: "UserSpaceID"}}, DoUpdates: clause.AssignmentColumns([]string{"CreateTime"}), - }).Create(&types.PinnedObject{UserSpaceID: spaceID, ObjectID: objectID, CreateTime: createTime}).Error + }).Create(&jcstypes.PinnedObject{UserSpaceID: spaceID, ObjectID: objectID, CreateTime: createTime}).Error } -func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []types.PinnedObject) error { +func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []jcstypes.PinnedObject) error { if len(pinneds) == 0 { return nil } @@ -59,7 +59,7 @@ func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []types.PinnedObje }).Create(&pinneds).Error } -func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID types.PackageID, spaceID types.UserSpaceID) error { +func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID jcstypes.PackageID, spaceID jcstypes.UserSpaceID) error { err := ctx.Exec( "insert ignore into PinnedObject(UserSpaceID, ObjectID, CreateTime) select ? as UserSpaceID, ObjectID, ? as CreateTime from Object where PackageID = ?", spaceID, @@ -69,7 +69,7 @@ func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID types.Package return err } -func (db *PinnedObjectDB) ObjectBatchCreate(ctx SQLContext, objectID types.ObjectID, spaceIDs []types.UserSpaceID) error { +func (db *PinnedObjectDB) ObjectBatchCreate(ctx SQLContext, objectID jcstypes.ObjectID, spaceIDs []jcstypes.UserSpaceID) error { if len(spaceIDs) == 0 { return nil } @@ -83,45 +83,45 @@ func (db *PinnedObjectDB) ObjectBatchCreate(ctx SQLContext, objectID types.Objec return nil } -func (*PinnedObjectDB) Delete(ctx SQLContext, spaceID types.UserSpaceID, objectID types.ObjectID) error { +func (*PinnedObjectDB) Delete(ctx SQLContext, spaceID jcstypes.UserSpaceID, objectID jcstypes.ObjectID) error { err := ctx.Exec("delete from PinnedObject where UserSpaceID = ? and ObjectID = ?", spaceID, objectID).Error return err } -func (*PinnedObjectDB) DeleteByObjectID(ctx SQLContext, objectID types.ObjectID) error { +func (*PinnedObjectDB) DeleteByObjectID(ctx SQLContext, objectID jcstypes.ObjectID) error { err := ctx.Exec("delete from PinnedObject where ObjectID = ?", objectID).Error return err } -func (*PinnedObjectDB) DeleteByUserSpaceID(ctx SQLContext, spaceID types.UserSpaceID) error { +func (*PinnedObjectDB) DeleteByUserSpaceID(ctx SQLContext, spaceID jcstypes.UserSpaceID) error { err := ctx.Exec("delete from PinnedObject where UserSpaceID = ?", spaceID).Error return err } -func (*PinnedObjectDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []types.ObjectID) error { +func (*PinnedObjectDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []jcstypes.ObjectID) error { if len(objectIDs) == 0 { return nil } - err := ctx.Table("PinnedObject").Where("ObjectID in (?)", objectIDs).Delete(&types.PinnedObject{}).Error + err := ctx.Table("PinnedObject").Where("ObjectID in (?)", objectIDs).Delete(&jcstypes.PinnedObject{}).Error return err } -func (*PinnedObjectDB) DeleteInPackage(ctx SQLContext, packageID types.PackageID) error { - err := ctx.Table("PinnedObject").Where("ObjectID in (select ObjectID from Object where PackageID = ?)", packageID).Delete(&types.PinnedObject{}).Error +func (*PinnedObjectDB) DeleteInPackage(ctx SQLContext, packageID jcstypes.PackageID) error { + err := ctx.Table("PinnedObject").Where("ObjectID in (select ObjectID from Object where PackageID = ?)", packageID).Delete(&jcstypes.PinnedObject{}).Error return err } -func (*PinnedObjectDB) DeleteInPackageAtStorage(ctx SQLContext, packageID types.PackageID, spaceID types.UserSpaceID) error { +func (*PinnedObjectDB) DeleteInPackageAtStorage(ctx SQLContext, packageID jcstypes.PackageID, spaceID jcstypes.UserSpaceID) error { err := ctx.Exec("delete PinnedObject from PinnedObject inner join Object on PinnedObject.ObjectID = Object.ObjectID where PackageID = ? and UserSpaceID = ?", packageID, spaceID).Error return err } -func (*PinnedObjectDB) BatchDelete(ctx SQLContext, spaceID types.UserSpaceID, objectIDs []types.ObjectID) error { +func (*PinnedObjectDB) BatchDelete(ctx SQLContext, spaceID jcstypes.UserSpaceID, objectIDs []jcstypes.ObjectID) error { if len(objectIDs) == 0 { return nil } - err := ctx.Table("PinnedObject").Where("UserSpaceID = ? and ObjectID in (?)", spaceID, objectIDs).Delete(&types.PinnedObject{}).Error + err := ctx.Table("PinnedObject").Where("UserSpaceID = ? and ObjectID in (?)", spaceID, objectIDs).Delete(&jcstypes.PinnedObject{}).Error return err } diff --git a/client/internal/db/space_sync_task.go b/client/internal/db/space_sync_task.go index f378938..9acf88e 100644 --- a/client/internal/db/space_sync_task.go +++ b/client/internal/db/space_sync_task.go @@ -1,6 +1,6 @@ package db -import "gitlink.org.cn/cloudream/jcs-pub/common/types" +import jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" type SpaceSyncTaskDB struct { *DB @@ -10,24 +10,24 @@ func (db *DB) SpaceSyncTask() *SpaceSyncTaskDB { return &SpaceSyncTaskDB{db} } -func (db *SpaceSyncTaskDB) Create(ctx SQLContext, task *types.SpaceSyncTask) error { +func (db *SpaceSyncTaskDB) Create(ctx SQLContext, task *jcstypes.SpaceSyncTask) error { return ctx.Create(task).Error } -func (db *SpaceSyncTaskDB) GetAll(ctx SQLContext) ([]types.SpaceSyncTask, error) { - var tasks []types.SpaceSyncTask +func (db *SpaceSyncTaskDB) GetAll(ctx SQLContext) ([]jcstypes.SpaceSyncTask, error) { + var tasks []jcstypes.SpaceSyncTask err := ctx.Find(&tasks).Order("TaskID ASC").Error return tasks, err } -func (*SpaceSyncTaskDB) Delete(ctx SQLContext, taskID types.SpaceSyncTaskID) error { - return ctx.Delete(&types.SpaceSyncTask{}, taskID).Error +func (*SpaceSyncTaskDB) Delete(ctx SQLContext, taskID jcstypes.SpaceSyncTaskID) error { + return ctx.Delete(&jcstypes.SpaceSyncTask{}, taskID).Error } -func (*SpaceSyncTaskDB) BatchDelete(ctx SQLContext, taskIDs []types.SpaceSyncTaskID) error { +func (*SpaceSyncTaskDB) BatchDelete(ctx SQLContext, taskIDs []jcstypes.SpaceSyncTaskID) error { if len(taskIDs) == 0 { return nil } - return ctx.Where("TaskID IN (?)", taskIDs).Delete(&types.SpaceSyncTask{}).Error + return ctx.Where("TaskID IN (?)", taskIDs).Delete(&jcstypes.SpaceSyncTask{}).Error } diff --git a/client/internal/db/user_space.go b/client/internal/db/user_space.go index ba06631..fbde451 100644 --- a/client/internal/db/user_space.go +++ b/client/internal/db/user_space.go @@ -1,7 +1,7 @@ package db import ( - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gorm.io/gorm/clause" ) @@ -13,53 +13,53 @@ func (db *DB) UserSpace() *UserSpaceDB { return &UserSpaceDB{DB: db} } -func (db *UserSpaceDB) GetByID(ctx SQLContext, spaceID types.UserSpaceID) (types.UserSpace, error) { - var stg types.UserSpace +func (db *UserSpaceDB) GetByID(ctx SQLContext, spaceID jcstypes.UserSpaceID) (jcstypes.UserSpace, error) { + var stg jcstypes.UserSpace err := ctx.Table("UserSpace").First(&stg, spaceID).Error return stg, err } -func (UserSpaceDB) GetAllIDs(ctx SQLContext) ([]types.UserSpaceID, error) { - var stgs []types.UserSpaceID +func (UserSpaceDB) GetAllIDs(ctx SQLContext) ([]jcstypes.UserSpaceID, error) { + var stgs []jcstypes.UserSpaceID err := ctx.Table("UserSpace").Select("UserSpaceID").Find(&stgs).Error return stgs, err } -func (db *UserSpaceDB) BatchGetByID(ctx SQLContext, spaceIDs []types.UserSpaceID) ([]types.UserSpace, error) { - var stgs []types.UserSpace +func (db *UserSpaceDB) BatchGetByID(ctx SQLContext, spaceIDs []jcstypes.UserSpaceID) ([]jcstypes.UserSpace, error) { + var stgs []jcstypes.UserSpace err := ctx.Table("UserSpace").Find(&stgs, "UserSpaceID IN (?)", spaceIDs).Error return stgs, err } -func (db *UserSpaceDB) GetAll(ctx SQLContext) ([]types.UserSpace, error) { - var stgs []types.UserSpace +func (db *UserSpaceDB) GetAll(ctx SQLContext) ([]jcstypes.UserSpace, error) { + var stgs []jcstypes.UserSpace err := ctx.Table("UserSpace").Find(&stgs).Error return stgs, err } -func (db *UserSpaceDB) BatchGetAllSpaceIDs(ctx SQLContext, start int, count int) ([]types.UserSpaceID, error) { - var ret []types.UserSpaceID +func (db *UserSpaceDB) BatchGetAllSpaceIDs(ctx SQLContext, start int, count int) ([]jcstypes.UserSpaceID, error) { + var ret []jcstypes.UserSpaceID err := ctx.Table("UserSpace").Select("UserSpaceID").Find(&ret).Limit(count).Offset(start).Error return ret, err } -func (db *UserSpaceDB) GetByName(ctx SQLContext, name string) (types.UserSpace, error) { - var stg types.UserSpace +func (db *UserSpaceDB) GetByName(ctx SQLContext, name string) (jcstypes.UserSpace, error) { + var stg jcstypes.UserSpace err := ctx.Table("UserSpace").Where("Name = ?", name).First(&stg).Error return stg, err } -func (*UserSpaceDB) Create(ctx SQLContext, space *types.UserSpace) error { +func (*UserSpaceDB) Create(ctx SQLContext, space *jcstypes.UserSpace) error { return ctx.Table("UserSpace").Create(space).Error } -func (*UserSpaceDB) UpdateColumns(ctx SQLContext, space types.UserSpace, columns ...string) error { +func (*UserSpaceDB) UpdateColumns(ctx SQLContext, space jcstypes.UserSpace, columns ...string) error { return ctx.Clauses(clause.OnConflict{ Columns: []clause.Column{{Name: "UserSpaceID"}}, DoUpdates: clause.AssignmentColumns(columns), }).Create(space).Error } -func (*UserSpaceDB) DeleteByID(ctx SQLContext, spaceID types.UserSpaceID) error { - return ctx.Table("UserSpace").Delete(types.UserSpace{}, "UserSpaceID = ?", spaceID).Error +func (*UserSpaceDB) DeleteByID(ctx SQLContext, spaceID jcstypes.UserSpaceID) error { + return ctx.Table("UserSpace").Delete(jcstypes.UserSpace{}, "UserSpaceID = ?", spaceID).Error } diff --git a/client/internal/downloader/iterator.go b/client/internal/downloader/iterator.go index 177ac4d..fa0e5f6 100644 --- a/client/internal/downloader/iterator.go +++ b/client/internal/downloader/iterator.go @@ -18,13 +18,13 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/iterator" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type downloadSpaceInfo struct { - Space types.UserSpaceDetail + Space jcstypes.UserSpaceDetail ObjectPinned bool - Blocks []types.ObjectBlock + Blocks []jcstypes.ObjectBlock Distance float64 } diff --git a/client/internal/downloader/strategy/selector.go b/client/internal/downloader/strategy/selector.go index bdeb2f7..98f81c5 100644 --- a/client/internal/downloader/strategy/selector.go +++ b/client/internal/downloader/strategy/selector.go @@ -11,50 +11,49 @@ import ( "gitlink.org.cn/cloudream/common/utils/sort2" "gitlink.org.cn/cloudream/jcs-pub/client/internal/metacache" "gitlink.org.cn/cloudream/jcs-pub/common/consts" - "gitlink.org.cn/cloudream/jcs-pub/common/types" jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type Request struct { - Detail types.ObjectDetail + Detail jcstypes.ObjectDetail Range math2.Range DestLocation jcstypes.Location } type Strategy interface { - GetDetail() types.ObjectDetail + GetDetail() jcstypes.ObjectDetail } // 直接下载完整对象 type DirectStrategy struct { - Detail types.ObjectDetail - UserSpace types.UserSpaceDetail + Detail jcstypes.ObjectDetail + UserSpace jcstypes.UserSpaceDetail } -func (s *DirectStrategy) GetDetail() types.ObjectDetail { +func (s *DirectStrategy) GetDetail() jcstypes.ObjectDetail { return s.Detail } // 从指定对象重建对象 type ECReconstructStrategy struct { - Detail types.ObjectDetail - Redundancy types.ECRedundancy - Blocks []types.ObjectBlock - UserSpaces []types.UserSpaceDetail + Detail jcstypes.ObjectDetail + Redundancy jcstypes.ECRedundancy + Blocks []jcstypes.ObjectBlock + UserSpaces []jcstypes.UserSpaceDetail } -func (s *ECReconstructStrategy) GetDetail() types.ObjectDetail { +func (s *ECReconstructStrategy) GetDetail() jcstypes.ObjectDetail { return s.Detail } type LRCReconstructStrategy struct { - Detail types.ObjectDetail - Redundancy types.LRCRedundancy - Blocks []types.ObjectBlock - Spaces []types.UserSpaceDetail + Detail jcstypes.ObjectDetail + Redundancy jcstypes.LRCRedundancy + Blocks []jcstypes.ObjectBlock + Spaces []jcstypes.UserSpaceDetail } -func (s *LRCReconstructStrategy) GetDetail() types.ObjectDetail { +func (s *LRCReconstructStrategy) GetDetail() jcstypes.ObjectDetail { return s.Detail } @@ -82,16 +81,16 @@ func (s *Selector) Select(req Request) (Strategy, error) { } switch red := req.Detail.Object.Redundancy.(type) { - case *types.NoneRedundancy: + case *jcstypes.NoneRedundancy: return s.selectForNoneOrRep(req2) - case *types.RepRedundancy: + case *jcstypes.RepRedundancy: return s.selectForNoneOrRep(req2) - case *types.ECRedundancy: + case *jcstypes.ECRedundancy: return s.selectForEC(req2, *red) - case *types.LRCRedundancy: + case *jcstypes.LRCRedundancy: return s.selectForLRC(req2, *red) } @@ -99,19 +98,19 @@ func (s *Selector) Select(req Request) (Strategy, error) { } type downloadSpaceInfo struct { - Space types.UserSpaceDetail + Space jcstypes.UserSpaceDetail ObjectPinned bool - Blocks []types.ObjectBlock + Blocks []jcstypes.ObjectBlock Distance float64 } type downloadBlock struct { - Space types.UserSpaceDetail - Block types.ObjectBlock + Space jcstypes.UserSpaceDetail + Block jcstypes.ObjectBlock } type request2 struct { - Detail types.ObjectDetail + Detail jcstypes.ObjectDetail Range math2.Range DestLocation jcstypes.Location } @@ -133,7 +132,7 @@ func (s *Selector) selectForNoneOrRep(req request2) (Strategy, error) { }, nil } -func (s *Selector) selectForEC(req request2, red types.ECRedundancy) (Strategy, error) { +func (s *Selector) selectForEC(req request2, red jcstypes.ECRedundancy) (Strategy, error) { sortedStgs := s.sortDownloadStorages(req) if len(sortedStgs) == 0 { return nil, fmt.Errorf("no storage available for download") @@ -143,8 +142,8 @@ func (s *Selector) selectForEC(req request2, red types.ECRedundancy) (Strategy, osc, stg := s.getMinReadingObjectSolution(sortedStgs, red.K) if bsc < osc { - bs := make([]types.ObjectBlock, len(blocks)) - ss := make([]types.UserSpaceDetail, len(blocks)) + bs := make([]jcstypes.ObjectBlock, len(blocks)) + ss := make([]jcstypes.UserSpaceDetail, len(blocks)) for i, b := range blocks { bs[i] = b.Block ss[i] = b.Space @@ -169,7 +168,7 @@ func (s *Selector) selectForEC(req request2, red types.ECRedundancy) (Strategy, }, nil } -func (s *Selector) selectForLRC(req request2, red types.LRCRedundancy) (Strategy, error) { +func (s *Selector) selectForLRC(req request2, red jcstypes.LRCRedundancy) (Strategy, error) { sortedStgs := s.sortDownloadStorages(req) if len(sortedStgs) == 0 { return nil, fmt.Errorf("no storage available for download") @@ -193,8 +192,8 @@ func (s *Selector) selectForLRC(req request2, red types.LRCRedundancy) (Strategy return nil, fmt.Errorf("not enough blocks to download lrc object") } - bs := make([]types.ObjectBlock, len(blocks)) - ss := make([]types.UserSpaceDetail, len(blocks)) + bs := make([]jcstypes.ObjectBlock, len(blocks)) + ss := make([]jcstypes.UserSpaceDetail, len(blocks)) for i, b := range blocks { bs[i] = b.Block ss[i] = b.Space @@ -209,7 +208,7 @@ func (s *Selector) selectForLRC(req request2, red types.LRCRedundancy) (Strategy } func (s *Selector) sortDownloadStorages(req request2) []*downloadSpaceInfo { - var spaceIDs []types.UserSpaceID + var spaceIDs []jcstypes.UserSpaceID for _, id := range req.Detail.PinnedAt { if !lo.Contains(spaceIDs, id) { spaceIDs = append(spaceIDs, id) @@ -221,7 +220,7 @@ func (s *Selector) sortDownloadStorages(req request2) []*downloadSpaceInfo { } } - downloadSpaceMap := make(map[types.UserSpaceID]*downloadSpaceInfo) + downloadSpaceMap := make(map[jcstypes.UserSpaceID]*downloadSpaceInfo) for _, id := range req.Detail.PinnedAt { storage, ok := downloadSpaceMap[id] if !ok { @@ -264,7 +263,7 @@ func (s *Selector) sortDownloadStorages(req request2) []*downloadSpaceInfo { }) } -func (s *Selector) getStorageDistance(req request2, src types.UserSpaceDetail) float64 { +func (s *Selector) getStorageDistance(req request2, src jcstypes.UserSpaceDetail) float64 { // TODO 重新设计计算方式 // if req.DestHub != nil { @@ -315,9 +314,9 @@ func (s *Selector) getMinReadingBlockSolution(sortedStgs []*downloadSpaceInfo, k return math.MaxFloat64, gotBlocks } -func (s *Selector) getMinReadingObjectSolution(sortedStgs []*downloadSpaceInfo, k int) (float64, types.UserSpaceDetail) { +func (s *Selector) getMinReadingObjectSolution(sortedStgs []*downloadSpaceInfo, k int) (float64, jcstypes.UserSpaceDetail) { dist := math.MaxFloat64 - var downloadSpace types.UserSpaceDetail + var downloadSpace jcstypes.UserSpaceDetail for _, n := range sortedStgs { if n.ObjectPinned && float64(k)*n.Distance < dist { dist = float64(k) * n.Distance diff --git a/client/internal/downloader/strip_iterator.go b/client/internal/downloader/strip_iterator.go index c8fc24a..68b7b67 100644 --- a/client/internal/downloader/strip_iterator.go +++ b/client/internal/downloader/strip_iterator.go @@ -12,12 +12,12 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type downloadBlock struct { - Space types.UserSpaceDetail - Block types.ObjectBlock + Space jcstypes.UserSpaceDetail + Block jcstypes.ObjectBlock } type Strip struct { @@ -27,9 +27,9 @@ type Strip struct { type StripIterator struct { downloader *Downloader - object types.Object + object jcstypes.Object blocks []downloadBlock - red types.ECRedundancy + red jcstypes.ECRedundancy curStripIndex int64 cache *StripCache dataChan chan dataChanEntry @@ -47,7 +47,7 @@ type dataChanEntry struct { Error error } -func NewStripIterator(downloader *Downloader, object types.Object, blocks []downloadBlock, red types.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator { +func NewStripIterator(downloader *Downloader, object jcstypes.Object, blocks []downloadBlock, red jcstypes.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator { if maxPrefetch <= 0 { maxPrefetch = 1 } diff --git a/client/internal/http/types/config.go b/client/internal/http/types/config.go index e6c1697..d28c81a 100644 --- a/client/internal/http/types/config.go +++ b/client/internal/http/types/config.go @@ -9,7 +9,7 @@ import ( "os" "gitlink.org.cn/cloudream/jcs-pub/client/sdk/signer" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ConfigJSON struct { @@ -19,9 +19,9 @@ type ConfigJSON struct { ServerCert string `json:"serverCert"` ServerKey string `json:"serverKey"` // 可信的客户端证书列表,在进行预签名接口中会用到。 - ClientCerts []string `json:"clientCerts"` - MaxBodySize int64 `json:"maxBodySize"` - UserSpaceID types.UserSpaceID `json:"userSpaceID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。 + ClientCerts []string `json:"clientCerts"` + MaxBodySize int64 `json:"maxBodySize"` + UserSpaceID jcstypes.UserSpaceID `json:"userSpaceID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。 } func (c *ConfigJSON) Build() (Config, error) { @@ -97,7 +97,7 @@ type Config struct { ServerCert tls.Certificate ClientCerts map[string]*ClientCert MaxBodySize int64 - UserSpaceID types.UserSpaceID + UserSpaceID jcstypes.UserSpaceID } type ClientCert struct { diff --git a/client/internal/http/v1/pub_shards.go b/client/internal/http/v1/pub_shards.go new file mode 100644 index 0000000..0d6f342 --- /dev/null +++ b/client/internal/http/v1/pub_shards.go @@ -0,0 +1,112 @@ +package http + +import ( + "fmt" + "net/http" + + "github.com/gin-gonic/gin" + "gitlink.org.cn/cloudream/common/pkgs/logger" + "gitlink.org.cn/cloudream/jcs-pub/client/internal/http/types" + cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" + "gitlink.org.cn/cloudream/jcs-pub/common/ecode" + stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" + corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" +) + +type PubShardsService struct { + *Server +} + +func (s *Server) PubShards() *PubShardsService { + return &PubShardsService{s} +} + +func (s *PubShardsService) Create(ctx *gin.Context) { + log := logger.WithField("HTTP", "PubShards.Create") + + req, err := types.ShouldBindJSONEx[cliapi.PubShardsCreate](ctx) + if err != nil { + log.Warnf("binding body: %s", err.Error()) + ctx.JSON(http.StatusBadRequest, types.Failed(ecode.BadArgument, "%v", err)) + return + } + + if stgglb.StandaloneMode { + ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, "client is not online")) + return + } + + corCli := stgglb.CoordinatorRPCPool.Get() + defer corCli.Release() + + resp, cerr := corCli.CreatePubShards(ctx.Request.Context(), &corrpc.CreatePubShards{ + Password: req.Password, + MasterHub: req.MasterHub, + Name: req.Name, + Storage: req.Storage, + Credential: req.Credential, + ShardStore: req.ShardStore, + Features: req.Features, + WorkingDir: jcstypes.PathFromJcsPathString(req.WorkingDir), + }) + if cerr != nil { + ctx.JSON(http.StatusOK, types.Failed(ecode.ErrorCode(cerr.Code), cerr.Message)) + return + } + ctx.JSON(http.StatusOK, types.OK(cliapi.PubShardsCreateResp{ + PubShards: resp.PubShardStore, + })) +} + +func (s *PubShardsService) Join(ctx *gin.Context) { + log := logger.WithField("HTTP", "PubShards.Join") + + var req cliapi.PubShardsJoin + if err := ctx.ShouldBindJSON(&req); err != nil { + log.Warnf("binding body: %s", err.Error()) + ctx.JSON(http.StatusBadRequest, types.Failed(ecode.BadArgument, "%v", err)) + return + } + + if stgglb.StandaloneMode { + ctx.JSON(http.StatusOK, types.Failed(ecode.OperationFailed, "client is not online")) + return + } + + corCli := stgglb.CoordinatorRPCPool.Get() + defer corCli.Release() + + resp, cerr := corCli.UserGetPubShards(ctx.Request.Context(), &corrpc.UserGetPubShards{ + PubShardsID: req.PubShardsID, + Password: req.Password, + }) + if cerr != nil { + ctx.JSON(http.StatusOK, types.Failed(ecode.ErrorCode(cerr.Code), cerr.Message)) + return + } + + resp2, cerr2 := s.svc.UserSpaceSvc().Create(cliapi.UserSpaceCreate{ + Name: req.Name, + Storage: &jcstypes.PubShardsType{ + Type: "PubShards", + Base: resp.PubShards.Storage, + PubShardsID: req.PubShardsID, + Password: req.Password, + MasterHub: resp.MasterHub.HubID, + }, + Credential: resp.PubShards.Credential, + ShardStore: &resp.PubShards.ShardStore, + Features: resp.PubShards.Features, + WorkingDir: resp.PubShards.WorkingDir.ConcatCompsNew("parts", fmt.Sprintf("%v", s.svc.AccToken.GetToken().UserID)).String(), + }) + if cerr2 != nil { + ctx.JSON(http.StatusOK, types.Failed(ecode.ErrorCode(cerr2.Code), cerr2.Message)) + return + } + + ctx.JSON(http.StatusOK, types.OK(cliapi.PubShardsJoinResp{ + PubShards: resp.PubShards, + UserSpace: resp2.UserSpace, + })) +} diff --git a/client/internal/http/v1/server.go b/client/internal/http/v1/server.go index 7b9b8db..2fdc349 100644 --- a/client/internal/http/v1/server.go +++ b/client/internal/http/v1/server.go @@ -86,4 +86,7 @@ func (s *Server) InitRouters(rt gin.IRoutes, ah *auth.Auth) { rt.POST(cliapi.TickTockRunJobPath, certAuth, s.TickTock().RunJob) rt.GET(cliapi.SystemStatusPath, certAuth, s.System().Status) + + rt.POST(cliapi.PubShardsCreatePath, certAuth, s.PubShards().Create) + rt.POST(cliapi.PubShardsJoinPath, certAuth, s.PubShards().Join) } diff --git a/client/internal/metacache/user_space_meta.go b/client/internal/metacache/user_space_meta.go index eb1a9c7..10c2ce5 100644 --- a/client/internal/metacache/user_space_meta.go +++ b/client/internal/metacache/user_space_meta.go @@ -7,7 +7,6 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" - "gitlink.org.cn/cloudream/jcs-pub/common/types" jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) @@ -15,7 +14,7 @@ func (m *MetaCacheHost) AddStorageMeta() *UserSpaceMeta { meta := &UserSpaceMeta{ host: m, } - meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[types.UserSpaceID, types.UserSpaceDetail]{ + meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[jcstypes.UserSpaceID, jcstypes.UserSpaceDetail]{ Getter: meta.load, Expire: time.Minute * 5, }) @@ -26,10 +25,10 @@ func (m *MetaCacheHost) AddStorageMeta() *UserSpaceMeta { type UserSpaceMeta struct { host *MetaCacheHost - cache *SimpleMetaCache[types.UserSpaceID, types.UserSpaceDetail] + cache *SimpleMetaCache[jcstypes.UserSpaceID, jcstypes.UserSpaceDetail] } -func (s *UserSpaceMeta) Get(spaceID types.UserSpaceID) *types.UserSpaceDetail { +func (s *UserSpaceMeta) Get(spaceID jcstypes.UserSpaceID) *jcstypes.UserSpaceDetail { v, ok := s.cache.Get(spaceID) if ok { return &v @@ -37,9 +36,9 @@ func (s *UserSpaceMeta) Get(spaceID types.UserSpaceID) *types.UserSpaceDetail { return nil } -func (s *UserSpaceMeta) GetMany(spaceIDs []types.UserSpaceID) []*types.UserSpaceDetail { +func (s *UserSpaceMeta) GetMany(spaceIDs []jcstypes.UserSpaceID) []*jcstypes.UserSpaceDetail { vs, oks := s.cache.GetMany(spaceIDs) - ret := make([]*types.UserSpaceDetail, len(vs)) + ret := make([]*jcstypes.UserSpaceDetail, len(vs)) for i := range vs { if oks[i] { ret[i] = &vs[i] @@ -52,12 +51,12 @@ func (s *UserSpaceMeta) ClearOutdated() { s.cache.ClearOutdated() } -func (s *UserSpaceMeta) Drop(keys []types.UserSpaceID) { +func (s *UserSpaceMeta) Drop(keys []jcstypes.UserSpaceID) { s.cache.Drop(keys) } -func (s *UserSpaceMeta) load(keys []types.UserSpaceID) ([]types.UserSpaceDetail, []bool) { - vs := make([]types.UserSpaceDetail, len(keys)) +func (s *UserSpaceMeta) load(keys []jcstypes.UserSpaceID) ([]jcstypes.UserSpaceDetail, []bool) { + vs := make([]jcstypes.UserSpaceDetail, len(keys)) oks := make([]bool, len(keys)) spaces, err := s.host.db.UserSpace().BatchGetByID(s.host.db.DefCtx(), keys) @@ -66,9 +65,9 @@ func (s *UserSpaceMeta) load(keys []types.UserSpaceID) ([]types.UserSpaceDetail, return vs, oks } - detailMap := make(map[types.UserSpaceID]*types.UserSpaceDetail) + detailMap := make(map[jcstypes.UserSpaceID]*jcstypes.UserSpaceDetail) for i := range spaces { - detailMap[spaces[i].UserSpaceID] = &types.UserSpaceDetail{ + detailMap[spaces[i].UserSpaceID] = &jcstypes.UserSpaceDetail{ UserID: stgglb.UserID, UserSpace: spaces[i], } diff --git a/client/internal/services/bucket.go b/client/internal/services/bucket.go index fbfbbbd..cd2db0e 100644 --- a/client/internal/services/bucket.go +++ b/client/internal/services/bucket.go @@ -3,7 +3,7 @@ package services import ( "time" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) // BucketService 是对存储桶进行操作的服务类 @@ -20,15 +20,15 @@ func (svc *Service) BucketSvc() *BucketService { // userID: 用户的唯一标识 // bucketID: 桶的唯一标识 // 返回值: 桶的信息和可能发生的错误 -func (svc *BucketService) GetBucket(bucketID types.BucketID) (types.Bucket, error) { +func (svc *BucketService) GetBucket(bucketID jcstypes.BucketID) (jcstypes.Bucket, error) { return svc.DB.Bucket().GetByID(svc.DB.DefCtx(), bucketID) } -func (svc *BucketService) GetBucketByName(bucketName string) (types.Bucket, error) { +func (svc *BucketService) GetBucketByName(bucketName string) (jcstypes.Bucket, error) { return svc.DB.Bucket().GetByName(svc.DB.DefCtx(), bucketName) } -func (svc *BucketService) ListAllBuckets() ([]types.Bucket, error) { +func (svc *BucketService) ListAllBuckets() ([]jcstypes.Bucket, error) { return svc.DB.Bucket().GetAll(svc.DB.DefCtx()) } @@ -36,11 +36,11 @@ func (svc *BucketService) ListAllBuckets() ([]types.Bucket, error) { // userID: 用户的唯一标识 // bucketID: 桶的唯一标识 // 返回值: 桶的所有包列表和可能发生的错误 -func (svc *BucketService) GetBucketPackages(bucketID types.BucketID) ([]types.Package, error) { +func (svc *BucketService) GetBucketPackages(bucketID jcstypes.BucketID) ([]jcstypes.Package, error) { return svc.DB.Package().GetBucketPackages(svc.DB.DefCtx(), bucketID) } -func (svc *BucketService) CreateBucket(bucketName string, createTime time.Time) (types.Bucket, error) { +func (svc *BucketService) CreateBucket(bucketName string, createTime time.Time) (jcstypes.Bucket, error) { return svc.DB.Bucket().Create(svc.DB.DefCtx(), bucketName, createTime) } @@ -48,6 +48,6 @@ func (svc *BucketService) CreateBucket(bucketName string, createTime time.Time) // userID: 用户的唯一标识 // bucketID: 桶的唯一标识 // 返回值: 可能发生的错误 -func (svc *BucketService) DeleteBucket(bucketID types.BucketID) error { +func (svc *BucketService) DeleteBucket(bucketID jcstypes.BucketID) error { return svc.DB.Bucket().DeleteComplete(svc.DB.DefCtx(), bucketID) } diff --git a/client/internal/services/object.go b/client/internal/services/object.go index 122d90e..9de519d 100644 --- a/client/internal/services/object.go +++ b/client/internal/services/object.go @@ -16,7 +16,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/plans" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" "gorm.io/gorm" ) @@ -76,15 +76,15 @@ func (svc *ObjectService) GetByPath(req api.ObjectListByPath) (api.ObjectListByP return resp, err } -func (svc *ObjectService) GetByIDs(objectIDs []types.ObjectID) ([]*types.Object, error) { - var ret []*types.Object +func (svc *ObjectService) GetByIDs(objectIDs []jcstypes.ObjectID) ([]*jcstypes.Object, error) { + var ret []*jcstypes.Object err := svc.DB.DoTx(func(tx db.SQLContext) error { objs, err := svc.DB.Object().BatchGet(tx, objectIDs) if err != nil { return err } - objMp := make(map[types.ObjectID]types.Object) + objMp := make(map[jcstypes.ObjectID]jcstypes.Object) for _, obj := range objs { objMp[obj.ObjectID] = obj } @@ -103,14 +103,14 @@ func (svc *ObjectService) GetByIDs(objectIDs []types.ObjectID) ([]*types.Object, return ret, err } -func (svc *ObjectService) UpdateInfo(updatings []api.UpdatingObject) ([]types.ObjectID, error) { - var sucs []types.ObjectID +func (svc *ObjectService) UpdateInfo(updatings []api.UpdatingObject) ([]jcstypes.ObjectID, error) { + var sucs []jcstypes.ObjectID err := svc.DB.DoTx(func(tx db.SQLContext) error { updatings = sort2.Sort(updatings, func(o1, o2 api.UpdatingObject) int { return sort2.Cmp(o1.ObjectID, o2.ObjectID) }) - objIDs := make([]types.ObjectID, len(updatings)) + objIDs := make([]jcstypes.ObjectID, len(updatings)) for i, obj := range updatings { objIDs[i] = obj.ObjectID } @@ -119,17 +119,17 @@ func (svc *ObjectService) UpdateInfo(updatings []api.UpdatingObject) ([]types.Ob if err != nil { return fmt.Errorf("batch getting objects: %w", err) } - oldObjIDs := make([]types.ObjectID, len(oldObjs)) + oldObjIDs := make([]jcstypes.ObjectID, len(oldObjs)) for i, obj := range oldObjs { oldObjIDs[i] = obj.ObjectID } - avaiUpdatings, notExistsObjs := pickByObjectIDs(updatings, oldObjIDs, func(obj api.UpdatingObject) types.ObjectID { return obj.ObjectID }) + avaiUpdatings, notExistsObjs := pickByObjectIDs(updatings, oldObjIDs, func(obj api.UpdatingObject) jcstypes.ObjectID { return obj.ObjectID }) if len(notExistsObjs) > 0 { // TODO 部分对象已经不存在 } - newObjs := make([]types.Object, len(avaiUpdatings)) + newObjs := make([]jcstypes.Object, len(avaiUpdatings)) for i := range newObjs { newObjs[i] = oldObjs[i] avaiUpdatings[i].ApplyTo(&newObjs[i]) @@ -140,7 +140,7 @@ func (svc *ObjectService) UpdateInfo(updatings []api.UpdatingObject) ([]types.Ob return fmt.Errorf("batch create or update: %w", err) } - sucs = lo.Map(newObjs, func(obj types.Object, _ int) types.ObjectID { return obj.ObjectID }) + sucs = lo.Map(newObjs, func(obj jcstypes.Object, _ int) jcstypes.ObjectID { return obj.ObjectID }) return nil }) return sucs, err @@ -148,7 +148,7 @@ func (svc *ObjectService) UpdateInfo(updatings []api.UpdatingObject) ([]types.Ob // 根据objIDs从objs中挑选Object。 // len(objs) >= len(objIDs) -func pickByObjectIDs[T any](objs []T, objIDs []types.ObjectID, getID func(T) types.ObjectID) (picked []T, notFound []T) { +func pickByObjectIDs[T any](objs []T, objIDs []jcstypes.ObjectID, getID func(T) jcstypes.ObjectID) (picked []T, notFound []T) { objIdx := 0 idIdx := 0 @@ -167,8 +167,8 @@ func pickByObjectIDs[T any](objs []T, objIDs []types.ObjectID, getID func(T) typ return } -func (svc *ObjectService) Move(movings []api.MovingObject) ([]types.ObjectID, error) { - var sucs []types.ObjectID +func (svc *ObjectService) Move(movings []api.MovingObject) ([]jcstypes.ObjectID, error) { + var sucs []jcstypes.ObjectID var evt []*datamap.BodyObjectInfoUpdated err := svc.DB.DoTx(func(tx db.SQLContext) error { @@ -176,7 +176,7 @@ func (svc *ObjectService) Move(movings []api.MovingObject) ([]types.ObjectID, er return sort2.Cmp(o1.ObjectID, o2.ObjectID) }) - objIDs := make([]types.ObjectID, len(movings)) + objIDs := make([]jcstypes.ObjectID, len(movings)) for i, obj := range movings { objIDs[i] = obj.ObjectID } @@ -185,20 +185,20 @@ func (svc *ObjectService) Move(movings []api.MovingObject) ([]types.ObjectID, er if err != nil { return fmt.Errorf("batch getting objects: %w", err) } - oldObjIDs := make([]types.ObjectID, len(oldObjs)) + oldObjIDs := make([]jcstypes.ObjectID, len(oldObjs)) for i, obj := range oldObjs { oldObjIDs[i] = obj.ObjectID } // 找出仍在数据库的Object - avaiMovings, notExistsObjs := pickByObjectIDs(movings, oldObjIDs, func(obj api.MovingObject) types.ObjectID { return obj.ObjectID }) + avaiMovings, notExistsObjs := pickByObjectIDs(movings, oldObjIDs, func(obj api.MovingObject) jcstypes.ObjectID { return obj.ObjectID }) if len(notExistsObjs) > 0 { // TODO 部分对象已经不存在 } // 筛选出PackageID变化、Path变化的对象,这两种对象要检测改变后是否有冲突 - var pkgIDChangedObjs []types.Object - var pathChangedObjs []types.Object + var pkgIDChangedObjs []jcstypes.Object + var pathChangedObjs []jcstypes.Object for i := range avaiMovings { if avaiMovings[i].PackageID != oldObjs[i].PackageID { newObj := oldObjs[i] @@ -211,7 +211,7 @@ func (svc *ObjectService) Move(movings []api.MovingObject) ([]types.ObjectID, er } } - var newObjs []types.Object + var newObjs []jcstypes.Object // 对于PackageID发生变化的对象,需要检查目标Package内是否存在同Path的对象 checkedObjs, err := svc.checkPackageChangedObjects(tx, pkgIDChangedObjs) if err != nil { @@ -231,8 +231,8 @@ func (svc *ObjectService) Move(movings []api.MovingObject) ([]types.ObjectID, er return fmt.Errorf("batch create or update: %w", err) } - sucs = lo.Map(newObjs, func(obj types.Object, _ int) types.ObjectID { return obj.ObjectID }) - evt = lo.Map(newObjs, func(obj types.Object, _ int) *datamap.BodyObjectInfoUpdated { + sucs = lo.Map(newObjs, func(obj jcstypes.Object, _ int) jcstypes.ObjectID { return obj.ObjectID }) + evt = lo.Map(newObjs, func(obj jcstypes.Object, _ int) *datamap.BodyObjectInfoUpdated { return &datamap.BodyObjectInfoUpdated{ Object: obj, } @@ -266,23 +266,23 @@ func (svc *ObjectService) Download(req downloader.DownloadReqeust) (*downloader. return downloading, nil } -func (svc *Service) checkPackageChangedObjects(tx db.SQLContext, objs []types.Object) ([]types.Object, error) { +func (svc *Service) checkPackageChangedObjects(tx db.SQLContext, objs []jcstypes.Object) ([]jcstypes.Object, error) { if len(objs) == 0 { return nil, nil } type PackageObjects struct { - PackageID types.PackageID - ObjectByPath map[string]*types.Object + PackageID jcstypes.PackageID + ObjectByPath map[string]*jcstypes.Object } - packages := make(map[types.PackageID]*PackageObjects) + packages := make(map[jcstypes.PackageID]*PackageObjects) for _, obj := range objs { pkg, ok := packages[obj.PackageID] if !ok { pkg = &PackageObjects{ PackageID: obj.PackageID, - ObjectByPath: make(map[string]*types.Object), + ObjectByPath: make(map[string]*jcstypes.Object), } packages[obj.PackageID] = pkg } @@ -295,7 +295,7 @@ func (svc *Service) checkPackageChangedObjects(tx db.SQLContext, objs []types.Ob } } - var willUpdateObjs []types.Object + var willUpdateObjs []jcstypes.Object for _, pkg := range packages { _, err := svc.DB.Package().GetByID(tx, pkg.PackageID) if errors.Is(err, gorm.ErrRecordNotFound) { @@ -327,12 +327,12 @@ func (svc *Service) checkPackageChangedObjects(tx db.SQLContext, objs []types.Ob return willUpdateObjs, nil } -func (svc *Service) checkPathChangedObjects(tx db.SQLContext, objs []types.Object) ([]types.Object, error) { +func (svc *Service) checkPathChangedObjects(tx db.SQLContext, objs []jcstypes.Object) ([]jcstypes.Object, error) { if len(objs) == 0 { return nil, nil } - objByPath := make(map[string]*types.Object) + objByPath := make(map[string]*jcstypes.Object) for _, obj := range objs { if objByPath[obj.Path] == nil { o := obj @@ -351,7 +351,7 @@ func (svc *Service) checkPathChangedObjects(tx db.SQLContext, objs []types.Objec return nil, fmt.Errorf("getting package by id: %w", err) } - existsObjs, err := svc.DB.Object().BatchGetByPackagePath(tx, objs[0].PackageID, lo.Map(objs, func(obj types.Object, idx int) string { return obj.Path })) + existsObjs, err := svc.DB.Object().BatchGetByPackagePath(tx, objs[0].PackageID, lo.Map(objs, func(obj jcstypes.Object, idx int) string { return obj.Path })) if err != nil { return nil, fmt.Errorf("batch getting objects by package path: %w", err) } @@ -361,7 +361,7 @@ func (svc *Service) checkPathChangedObjects(tx db.SQLContext, objs []types.Objec objByPath[obj.Path] = nil } - var willMoveObjs []types.Object + var willMoveObjs []jcstypes.Object for _, obj := range objByPath { if obj == nil { continue @@ -372,8 +372,8 @@ func (svc *Service) checkPathChangedObjects(tx db.SQLContext, objs []types.Objec return willMoveObjs, nil } -func (svc *ObjectService) Delete(objectIDs []types.ObjectID) error { - var sucs []types.ObjectID +func (svc *ObjectService) Delete(objectIDs []jcstypes.ObjectID) error { + var sucs []jcstypes.ObjectID err := svc.DB.DoTx(func(tx db.SQLContext) error { avaiIDs, err := svc.DB.Object().BatchTestObjectID(tx, objectIDs) if err != nil { @@ -395,19 +395,19 @@ func (svc *ObjectService) Delete(objectIDs []types.ObjectID) error { return nil } -func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, error) { +func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*jcstypes.Object, error) { type CloningObject struct { Cloning api.CloningObject OrgIndex int } type PackageClonings struct { - PackageID types.PackageID + PackageID jcstypes.PackageID Clonings map[string]CloningObject } var evt []*datamap.BodyNewOrUpdateObject - cloningMap := make(map[types.PackageID]*PackageClonings) + cloningMap := make(map[jcstypes.PackageID]*PackageClonings) for i, cloning := range clonings { pkg, ok := cloningMap[cloning.NewPackageID] if !ok { @@ -423,7 +423,7 @@ func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, } } - ret := make([]*types.Object, len(cloningMap)) + ret := make([]*jcstypes.Object, len(cloningMap)) err := svc.DB.DoTx(func(tx db.SQLContext) error { // 剔除掉新路径已经存在的对象 for _, pkg := range cloningMap { @@ -449,7 +449,7 @@ func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, } var avaiClonings []CloningObject - var avaiObjIDs []types.ObjectID + var avaiObjIDs []jcstypes.ObjectID for _, pkg := range cloningMap { for _, cloning := range pkg.Clonings { avaiClonings = append(avaiClonings, cloning) @@ -462,7 +462,7 @@ func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, return fmt.Errorf("batch getting object details: %w", err) } - avaiDetailsMap := make(map[types.ObjectID]types.ObjectDetail) + avaiDetailsMap := make(map[jcstypes.ObjectID]jcstypes.ObjectDetail) for _, detail := range avaiDetails { avaiDetailsMap[detail.Object.ObjectID] = detail } @@ -470,7 +470,7 @@ func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, oldAvaiClonings := avaiClonings avaiClonings = nil - var newObjs []types.Object + var newObjs []jcstypes.Object for _, cloning := range oldAvaiClonings { // 进一步剔除原始对象不存在的情况 detail, ok := avaiDetailsMap[cloning.Cloning.ObjectID] @@ -494,7 +494,7 @@ func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, } // 创建了新对象就能拿到新对象ID,再创建新对象块 - var newBlks []types.ObjectBlock + var newBlks []jcstypes.ObjectBlock for i, cloning := range avaiClonings { oldBlks := avaiDetailsMap[cloning.Cloning.ObjectID].Blocks for _, blk := range oldBlks { @@ -550,12 +550,12 @@ func (svc *ObjectService) Clone(clonings []api.CloningObject) ([]*types.Object, // userID: 用户ID。 // packageID: 包ID。 // 返回值: 对象列表和错误信息。 -func (svc *ObjectService) GetPackageObjects(packageID types.PackageID) ([]types.Object, error) { +func (svc *ObjectService) GetPackageObjects(packageID jcstypes.PackageID) ([]jcstypes.Object, error) { return svc.DB.Object().GetPackageObjects(svc.DB.DefCtx(), packageID) } -func (svc *ObjectService) GetObjectDetails(objectIDs []types.ObjectID) ([]*types.ObjectDetail, error) { - detailsMp := make(map[types.ObjectID]*types.ObjectDetail) +func (svc *ObjectService) GetObjectDetails(objectIDs []jcstypes.ObjectID) ([]*jcstypes.ObjectDetail, error) { + detailsMp := make(map[jcstypes.ObjectID]*jcstypes.ObjectDetail) err := svc.DB.DoTx(func(tx db.SQLContext) error { var err error @@ -568,7 +568,7 @@ func (svc *ObjectService) GetObjectDetails(objectIDs []types.ObjectID) ([]*types return fmt.Errorf("batch get objects: %w", err) } for _, obj := range objs { - detailsMp[obj.ObjectID] = &types.ObjectDetail{ + detailsMp[obj.ObjectID] = &jcstypes.ObjectDetail{ Object: obj, } } @@ -601,7 +601,7 @@ func (svc *ObjectService) GetObjectDetails(objectIDs []types.ObjectID) ([]*types return nil, err } - details := make([]*types.ObjectDetail, len(objectIDs)) + details := make([]*jcstypes.ObjectDetail, len(objectIDs)) for i, objID := range objectIDs { details[i] = detailsMp[objID] } @@ -609,8 +609,8 @@ func (svc *ObjectService) GetObjectDetails(objectIDs []types.ObjectID) ([]*types return details, nil } -func (svc *ObjectService) NewMultipartUploadObject(packageID types.PackageID, path string) (types.Object, error) { - var obj types.Object +func (svc *ObjectService) NewMultipartUploadObject(packageID jcstypes.PackageID, path string) (jcstypes.Object, error) { + var obj jcstypes.Object err := svc.DB.DoTx(func(tx db.SQLContext) error { oldObj, err := svc.DB.Object().GetByPath(tx, packageID, path) if err == nil { @@ -620,12 +620,12 @@ func (svc *ObjectService) NewMultipartUploadObject(packageID types.PackageID, pa return fmt.Errorf("delete object blocks: %w", err) } - obj.FileHash = types.EmptyHash + obj.FileHash = jcstypes.EmptyHash obj.Size = 0 - obj.Redundancy = types.NewMultipartUploadRedundancy() + obj.Redundancy = jcstypes.NewMultipartUploadRedundancy() obj.UpdateTime = time.Now() - err = svc.DB.Object().BatchUpdate(tx, []types.Object{obj}) + err = svc.DB.Object().BatchUpdate(tx, []jcstypes.Object{obj}) if err != nil { return fmt.Errorf("update object: %w", err) } @@ -633,12 +633,12 @@ func (svc *ObjectService) NewMultipartUploadObject(packageID types.PackageID, pa return nil } - obj = types.Object{ + obj = jcstypes.Object{ PackageID: packageID, Path: path, - FileHash: types.EmptyHash, + FileHash: jcstypes.EmptyHash, Size: 0, - Redundancy: types.NewMultipartUploadRedundancy(), + Redundancy: jcstypes.NewMultipartUploadRedundancy(), CreateTime: time.Now(), UpdateTime: time.Now(), } @@ -652,50 +652,50 @@ func (svc *ObjectService) NewMultipartUploadObject(packageID types.PackageID, pa }) if err != nil { logger.Warnf("new multipart upload object: %s", err.Error()) - return types.Object{}, err + return jcstypes.Object{}, err } return obj, nil } -func (svc *ObjectService) CompleteMultipartUpload(objectID types.ObjectID, indexes []int) (types.Object, error) { +func (svc *ObjectService) CompleteMultipartUpload(objectID jcstypes.ObjectID, indexes []int) (jcstypes.Object, error) { if len(indexes) == 0 { - return types.Object{}, fmt.Errorf("no block indexes specified") + return jcstypes.Object{}, fmt.Errorf("no block indexes specified") } objDe, err := db.DoTx11(svc.DB, svc.DB.Object().GetDetail, objectID) if err != nil { - return types.Object{}, err + return jcstypes.Object{}, err } - _, ok := objDe.Object.Redundancy.(*types.MultipartUploadRedundancy) + _, ok := objDe.Object.Redundancy.(*jcstypes.MultipartUploadRedundancy) if !ok { - return types.Object{}, fmt.Errorf("object %v is not a multipart upload", objectID) + return jcstypes.Object{}, fmt.Errorf("object %v is not a multipart upload", objectID) } if len(objDe.Blocks) == 0 { - return types.Object{}, fmt.Errorf("object %v has no blocks", objectID) + return jcstypes.Object{}, fmt.Errorf("object %v has no blocks", objectID) } - objBlkMap := make(map[int]types.ObjectBlock) + objBlkMap := make(map[int]jcstypes.ObjectBlock) for _, blk := range objDe.Blocks { objBlkMap[blk.Index] = blk } lockBld := reqbuilder.NewBuilder() - var compBlks []types.ObjectBlock - var compBlkSpaces []types.UserSpaceDetail - var targetSpace types.UserSpaceDetail + var compBlks []jcstypes.ObjectBlock + var compBlkSpaces []jcstypes.UserSpaceDetail + var targetSpace jcstypes.UserSpaceDetail for i, idx := range indexes { blk, ok := objBlkMap[idx] if !ok { - return types.Object{}, fmt.Errorf("block %d not found in object %v", idx, objectID) + return jcstypes.Object{}, fmt.Errorf("block %d not found in object %v", idx, objectID) } stg := svc.UserSpaceMeta.Get(blk.UserSpaceID) if stg == nil { - return types.Object{}, fmt.Errorf("storage of user space %d not found", blk.UserSpaceID) + return jcstypes.Object{}, fmt.Errorf("storage of user space %d not found", blk.UserSpaceID) } compBlks = append(compBlks, blk) @@ -708,21 +708,21 @@ func (svc *ObjectService) CompleteMultipartUpload(objectID types.ObjectID, index mutex, err := lockBld.MutexLock(svc.PubLock) if err != nil { - return types.Object{}, fmt.Errorf("acquire lock: %w", err) + return jcstypes.Object{}, fmt.Errorf("acquire lock: %w", err) } defer mutex.Unlock() bld := exec.NewPlanBuilder() err = plans.CompleteMultipart(compBlks, compBlkSpaces, targetSpace, "shard", bld) if err != nil { - return types.Object{}, err + return jcstypes.Object{}, err } exeCtx := exec.NewExecContext() exec.SetValueByType(exeCtx, svc.StgPool) ret, err := bld.Execute(exeCtx).Wait(context.Background()) if err != nil { - return types.Object{}, err + return jcstypes.Object{}, err } shardInfo := ret.Get("shard").(*ops2.FileInfoValue) @@ -732,8 +732,8 @@ func (svc *ObjectService) CompleteMultipartUpload(objectID types.ObjectID, index ObjectID: objectID, FileHash: shardInfo.Hash, Size: shardInfo.Size, - Redundancy: types.NewNoneRedundancy(), - Blocks: []types.ObjectBlock{{ + Redundancy: jcstypes.NewNoneRedundancy(), + Blocks: []jcstypes.ObjectBlock{{ ObjectID: objectID, Index: 0, UserSpaceID: targetSpace.UserSpace.UserSpaceID, @@ -744,12 +744,12 @@ func (svc *ObjectService) CompleteMultipartUpload(objectID types.ObjectID, index }) if err != nil { - return types.Object{}, err + return jcstypes.Object{}, err } obj, err := svc.DB.Object().GetByID(svc.DB.DefCtx(), objectID) if err != nil { - return types.Object{}, err + return jcstypes.Object{}, err } return obj, nil diff --git a/client/internal/services/package.go b/client/internal/services/package.go index 6c4484b..f9f5b1b 100644 --- a/client/internal/services/package.go +++ b/client/internal/services/package.go @@ -7,7 +7,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/types/datamap" ) @@ -21,14 +21,14 @@ func (svc *Service) PackageSvc() *PackageService { return &PackageService{Service: svc} } -func (svc *PackageService) GetBucketPackages(bucketID types.BucketID) ([]types.Package, error) { +func (svc *PackageService) GetBucketPackages(bucketID jcstypes.BucketID) ([]jcstypes.Package, error) { return svc.DB.Package().GetBucketPackages(svc.DB.DefCtx(), bucketID) } -func (svc *PackageService) Create(bucketID types.BucketID, name string) (types.Package, error) { +func (svc *PackageService) Create(bucketID jcstypes.BucketID, name string) (jcstypes.Package, error) { pkg, err := svc.DB.Package().Create(svc.DB.DefCtx(), bucketID, name, time.Now()) if err != nil { - return types.Package{}, err + return jcstypes.Package{}, err } svc.EvtPub.Publish(&datamap.BodyNewPackage{ @@ -39,7 +39,7 @@ func (svc *PackageService) Create(bucketID types.BucketID, name string) (types.P } // DeletePackage 删除指定的包 -func (svc *PackageService) DeletePackage(packageID types.PackageID) error { +func (svc *PackageService) DeletePackage(packageID jcstypes.PackageID) error { err := svc.DB.Package().DeleteComplete(svc.DB.DefCtx(), packageID) if err != nil { return err @@ -52,10 +52,10 @@ func (svc *PackageService) DeletePackage(packageID types.PackageID) error { return nil } -func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.BucketID, name string) (types.Package, error) { - var pkg types.Package - var oldObjIDs []types.ObjectID - var newObjIDs []types.ObjectID +func (svc *PackageService) Clone(packageID jcstypes.PackageID, bucketID jcstypes.BucketID, name string) (jcstypes.Package, error) { + var pkg jcstypes.Package + var oldObjIDs []jcstypes.ObjectID + var newObjIDs []jcstypes.ObjectID err := svc.DB.DoTx(func(tx db.SQLContext) error { var err error @@ -74,7 +74,7 @@ func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.Bucke return fmt.Errorf("getting object blocks: %w", err) } - clonedObjs := make([]types.Object, len(objs)) + clonedObjs := make([]jcstypes.Object, len(objs)) for i, obj := range objs { clonedObjs[i] = obj clonedObjs[i].ObjectID = 0 @@ -86,7 +86,7 @@ func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.Bucke return fmt.Errorf("batch creating objects: %w", err) } - oldToNew := make(map[types.ObjectID]types.ObjectID) + oldToNew := make(map[jcstypes.ObjectID]jcstypes.ObjectID) for i, obj := range clonedObjs { oldToNew[objs[i].ObjectID] = obj.ObjectID @@ -94,7 +94,7 @@ func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.Bucke newObjIDs = append(newObjIDs, obj.ObjectID) } - clonedBlks := make([]types.ObjectBlock, len(objBlks)) + clonedBlks := make([]jcstypes.ObjectBlock, len(objBlks)) for i, blk := range objBlks { clonedBlks[i] = blk clonedBlks[i].ObjectID = oldToNew[blk.ObjectID] @@ -108,7 +108,7 @@ func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.Bucke return nil }) if err != nil { - return types.Package{}, err + return jcstypes.Package{}, err } svc.EvtPub.Publish(&datamap.BodyPackageCloned{ @@ -122,8 +122,8 @@ func (svc *PackageService) Clone(packageID types.PackageID, bucketID types.Bucke } func (svc *PackageService) AddAccessStat(entries []db.AddAccessStatEntry) { - pkgIDs := make([]types.PackageID, len(entries)) - objIDs := make([]types.ObjectID, len(entries)) + pkgIDs := make([]jcstypes.PackageID, len(entries)) + objIDs := make([]jcstypes.ObjectID, len(entries)) for i, e := range entries { pkgIDs[i] = e.PackageID objIDs[i] = e.ObjectID diff --git a/client/internal/services/service.go b/client/internal/services/service.go index 1e6648d..6568ea6 100644 --- a/client/internal/services/service.go +++ b/client/internal/services/service.go @@ -2,6 +2,7 @@ package services import ( "gitlink.org.cn/cloudream/jcs-pub/client/internal/accessstat" + "gitlink.org.cn/cloudream/jcs-pub/client/internal/accesstoken" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader" "gitlink.org.cn/cloudream/jcs-pub/client/internal/downloader/strategy" @@ -31,6 +32,7 @@ type Service struct { SpaceSyncer *spacesyncer.SpaceSyncer TickTock *ticktock.TickTock SpeedStats *speedstats.SpeedStats + AccToken *accesstoken.Keeper } func NewService( @@ -47,6 +49,7 @@ func NewService( spaceSyncer *spacesyncer.SpaceSyncer, tickTock *ticktock.TickTock, speedStats *speedstats.SpeedStats, + accToken *accesstoken.Keeper, ) *Service { return &Service{ PubLock: publock, @@ -62,5 +65,6 @@ func NewService( SpaceSyncer: spaceSyncer, TickTock: tickTock, SpeedStats: speedStats, + AccToken: accToken, } } diff --git a/client/internal/spacesyncer/execute.go b/client/internal/spacesyncer/execute.go index 27ca50c..15cf77b 100644 --- a/client/internal/spacesyncer/execute.go +++ b/client/internal/spacesyncer/execute.go @@ -3,14 +3,14 @@ package spacesyncer import ( "gitlink.org.cn/cloudream/common/pkgs/trie" stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func execute(syncer *SpaceSyncer, task *task) { switch mode := task.Task.Mode.(type) { - case *types.SpaceSyncModeFull: + case *jcstypes.SpaceSyncModeFull: executeFull(syncer, task) - case *types.SpaceSyncModeDiff: + case *jcstypes.SpaceSyncModeDiff: executeDiff(syncer, task, mode) } } diff --git a/client/internal/spacesyncer/execute_full.go b/client/internal/spacesyncer/execute_full.go index 51df2fc..2379d6e 100644 --- a/client/internal/spacesyncer/execute_full.go +++ b/client/internal/spacesyncer/execute_full.go @@ -12,7 +12,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func executeFull(syncer *SpaceSyncer, task *task) { @@ -30,7 +30,7 @@ func executeFull(syncer *SpaceSyncer, task *task) { return } - dstSpaceIDs := make([]types.UserSpaceID, len(task.Task.Dests)) + dstSpaceIDs := make([]jcstypes.UserSpaceID, len(task.Task.Dests)) for i := range task.Task.Dests { dstSpaceIDs[i] = task.Task.Dests[i].DestUserSpaceID } diff --git a/client/internal/ticktock/redundancy_shrink_test.go b/client/internal/ticktock/redundancy_shrink_test.go index e440c9f..1a846c6 100644 --- a/client/internal/ticktock/redundancy_shrink_test.go +++ b/client/internal/ticktock/redundancy_shrink_test.go @@ -5,20 +5,20 @@ import ( . "github.com/smartystreets/goconvey/convey" "gitlink.org.cn/cloudream/common/pkgs/bitmap" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) func newTreeTest(nodeBlocksMap []bitmap.Bitmap64) combinatorialTree { tree := combinatorialTree{ blocksMaps: make(map[int]bitmap.Bitmap64), - stgIDToLocalStgID: make(map[types.UserSpaceID]int), + stgIDToLocalStgID: make(map[jcstypes.UserSpaceID]int), } tree.nodes = make([]combinatorialTreeNode, (1 << len(nodeBlocksMap))) for id, mp := range nodeBlocksMap { - tree.stgIDToLocalStgID[types.UserSpaceID(id)] = len(tree.localStgIDToStgID) + tree.stgIDToLocalStgID[jcstypes.UserSpaceID(id)] = len(tree.localStgIDToStgID) tree.blocksMaps[len(tree.localStgIDToStgID)] = mp - tree.localStgIDToStgID = append(tree.localStgIDToStgID, types.UserSpaceID(id)) + tree.localStgIDToStgID = append(tree.localStgIDToStgID, jcstypes.UserSpaceID(id)) } tree.nodes[0].localHubID = -1 @@ -125,7 +125,7 @@ func Test_UpdateBitmap(t *testing.T) { testcases := []struct { title string nodeBlocks []bitmap.Bitmap64 - updatedHubID types.UserSpaceID + updatedHubID jcstypes.UserSpaceID updatedBitmap bitmap.Bitmap64 k int expectedTreeNodeBitmaps []int @@ -134,7 +134,7 @@ func Test_UpdateBitmap(t *testing.T) { { title: "4个节点,更新但值不变", nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, - updatedHubID: types.UserSpaceID(0), + updatedHubID: jcstypes.UserSpaceID(0), updatedBitmap: bitmap.Bitmap64(1), k: 4, expectedTreeNodeBitmaps: []int{0, 1, 3, 7, 15, 11, 5, 13, 9, 2, 6, 14, 10, 4, 12, 8}, @@ -143,7 +143,7 @@ func Test_UpdateBitmap(t *testing.T) { { title: "4个节点,更新0", nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, - updatedHubID: types.UserSpaceID(0), + updatedHubID: jcstypes.UserSpaceID(0), updatedBitmap: bitmap.Bitmap64(2), k: 4, expectedTreeNodeBitmaps: []int{0, 2, 2, 6, 14, 10, 6, 14, 10, 2, 6, 14, 10, 4, 12, 8}, @@ -152,7 +152,7 @@ func Test_UpdateBitmap(t *testing.T) { { title: "4个节点,更新1", nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, - updatedHubID: types.UserSpaceID(1), + updatedHubID: jcstypes.UserSpaceID(1), updatedBitmap: bitmap.Bitmap64(1), k: 4, expectedTreeNodeBitmaps: []int{0, 1, 1, 5, 13, 9, 5, 13, 9, 1, 5, 13, 9, 4, 12, 8}, @@ -161,7 +161,7 @@ func Test_UpdateBitmap(t *testing.T) { { title: "4个节点,更新2", nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, - updatedHubID: types.UserSpaceID(2), + updatedHubID: jcstypes.UserSpaceID(2), updatedBitmap: bitmap.Bitmap64(1), k: 4, expectedTreeNodeBitmaps: []int{0, 1, 3, 3, 11, 11, 1, 9, 9, 2, 3, 11, 10, 1, 9, 8}, @@ -170,7 +170,7 @@ func Test_UpdateBitmap(t *testing.T) { { title: "4个节点,更新3", nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, - updatedHubID: types.UserSpaceID(3), + updatedHubID: jcstypes.UserSpaceID(3), updatedBitmap: bitmap.Bitmap64(1), k: 4, expectedTreeNodeBitmaps: []int{0, 1, 3, 7, 7, 3, 5, 5, 1, 2, 6, 7, 3, 4, 5, 1}, @@ -179,7 +179,7 @@ func Test_UpdateBitmap(t *testing.T) { { title: "4个节点,k<4,更新0,0之前没有k个块,现在拥有", nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8}, - updatedHubID: types.UserSpaceID(0), + updatedHubID: jcstypes.UserSpaceID(0), updatedBitmap: bitmap.Bitmap64(3), k: 2, expectedTreeNodeBitmaps: []int{0, 3, 3, 7, 15, 11, 5, 13, 9, 2, 6, 14, 10, 4, 12, 8}, @@ -187,7 +187,7 @@ func Test_UpdateBitmap(t *testing.T) { { title: "4个节点,k<4,更新0,0之前有k个块,现在没有", nodeBlocks: []bitmap.Bitmap64{3, 4, 0, 0}, - updatedHubID: types.UserSpaceID(0), + updatedHubID: jcstypes.UserSpaceID(0), updatedBitmap: bitmap.Bitmap64(0), k: 2, expectedTreeNodeBitmaps: []int{0, 0, 4, 4, 4, 4, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0}, diff --git a/client/internal/ticktock/user_space_gc.go b/client/internal/ticktock/user_space_gc.go index 76cf2b4..295c841 100644 --- a/client/internal/ticktock/user_space_gc.go +++ b/client/internal/ticktock/user_space_gc.go @@ -7,7 +7,7 @@ import ( "gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/utils/reflect2" "gitlink.org.cn/cloudream/jcs-pub/client/internal/db" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock/reqbuilder" ) @@ -44,7 +44,7 @@ func (j *UserSpaceGC) Execute(t *TickTock) { } } -func (j *UserSpaceGC) gcOne(t *TickTock, space *types.UserSpaceDetail) { +func (j *UserSpaceGC) gcOne(t *TickTock, space *jcstypes.UserSpaceDetail) { log := logger.WithType[UserSpaceGC]("Event") mutex, err := reqbuilder.NewBuilder().UserSpace().GC(space.UserSpace.UserSpaceID).MutexLock(t.pubLock) @@ -63,11 +63,11 @@ func (j *UserSpaceGC) gcOne(t *TickTock, space *types.UserSpaceDetail) { } } -func (j *UserSpaceGC) gcShards(t *TickTock, space *types.UserSpaceDetail) error { +func (j *UserSpaceGC) gcShards(t *TickTock, space *jcstypes.UserSpaceDetail) error { db2 := t.db // 收集需要进行垃圾回收的文件哈希值 - var allFileHashes []types.FileHash + var allFileHashes []jcstypes.FileHash err := db2.DoTx(func(tx db.SQLContext) error { blocks, err := db2.ObjectBlock().GetByUserSpaceID(tx, space.UserSpace.UserSpaceID) if err != nil { @@ -104,7 +104,7 @@ func (j *UserSpaceGC) gcShards(t *TickTock, space *types.UserSpaceDetail) error return nil } -func (j *UserSpaceGC) gcTemps(t *TickTock, space *types.UserSpaceDetail) error { +func (j *UserSpaceGC) gcTemps(t *TickTock, space *jcstypes.UserSpaceDetail) error { store, err := t.stgPool.GetBaseStore(space) if err != nil { return fmt.Errorf("getting base store: %w", err) diff --git a/client/internal/uploader/create_load.go b/client/internal/uploader/create_load.go index 99d5dd9..805f632 100644 --- a/client/internal/uploader/create_load.go +++ b/client/internal/uploader/create_load.go @@ -13,13 +13,13 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type CreateUploader struct { - pkg types.Package - targetSpaces []types.UserSpaceDetail - copyRoots []types.JPath + pkg jcstypes.Package + targetSpaces []jcstypes.UserSpaceDetail + copyRoots []jcstypes.JPath uploader *Uploader pubLock *publock.Mutex successes []db.AddObjectEntry @@ -28,11 +28,11 @@ type CreateUploader struct { } type CreateUploadResult struct { - Package types.Package - Objects map[string]types.Object + Package jcstypes.Package + Objects map[string]jcstypes.Object } -func (u *CreateUploader) Upload(pa types.JPath, stream io.Reader, opts ...UploadOption) error { +func (u *CreateUploader) Upload(pa jcstypes.JPath, stream io.Reader, opts ...UploadOption) error { opt := UploadOption{} if len(opts) > 0 { opt = opts[0] @@ -42,7 +42,7 @@ func (u *CreateUploader) Upload(pa types.JPath, stream io.Reader, opts ...Upload opt.CreateTime = time.Now() } - spaceIDs := make([]types.UserSpaceID, 0, len(u.targetSpaces)) + spaceIDs := make([]jcstypes.UserSpaceID, 0, len(u.targetSpaces)) ft := ioswitch2.FromTo{} fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream()) @@ -96,7 +96,7 @@ func (u *CreateUploader) Commit() (CreateUploadResult, error) { defer u.pubLock.Unlock() - var addedObjs []types.Object + var addedObjs []jcstypes.Object err := u.uploader.db.DoTx(func(tx db.SQLContext) error { var err error addedObjs, err = u.uploader.db.Object().BatchAdd(tx, u.pkg.PackageID, u.successes) @@ -108,7 +108,7 @@ func (u *CreateUploader) Commit() (CreateUploadResult, error) { ret := CreateUploadResult{ Package: u.pkg, - Objects: make(map[string]types.Object), + Objects: make(map[string]jcstypes.Object), } for _, entry := range addedObjs { diff --git a/client/internal/uploader/update.go b/client/internal/uploader/update.go index 3733c21..d8b9193 100644 --- a/client/internal/uploader/update.go +++ b/client/internal/uploader/update.go @@ -14,37 +14,37 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/ops2" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/ioswitch2/parser" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/publock" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type UpdateUploader struct { uploader *Uploader - pkgID types.PackageID - targetSpace types.UserSpaceDetail + pkgID jcstypes.PackageID + targetSpace jcstypes.UserSpaceDetail pubLock *publock.Mutex - copyToSpaces []types.UserSpaceDetail - copyToPath []types.JPath + copyToSpaces []jcstypes.UserSpaceDetail + copyToPath []jcstypes.JPath successes []db.AddObjectEntry lock sync.Mutex commited bool } type UploadSpaceInfo struct { - Space types.UserSpaceDetail + Space jcstypes.UserSpaceDetail Delay time.Duration IsSameLocation bool } type UpdateResult struct { // 上传成功的文件列表,Key为Path - Objects map[string]types.Object + Objects map[string]jcstypes.Object } type UploadOption struct { CreateTime time.Time // 设置文件的上传时间,如果为0值,则使用开始上传时的时间。 } -func (w *UpdateUploader) Upload(pat types.JPath, stream io.Reader, opts ...UploadOption) error { +func (w *UpdateUploader) Upload(pat jcstypes.JPath, stream io.Reader, opts ...UploadOption) error { opt := UploadOption{} if len(opts) > 0 { opt = opts[0] @@ -93,7 +93,7 @@ func (w *UpdateUploader) Upload(pat types.JPath, stream io.Reader, opts ...Uploa Size: shardInfo.Size, FileHash: shardInfo.Hash, CreateTime: opt.CreateTime, - UserSpaceIDs: []types.UserSpaceID{w.targetSpace.UserSpace.UserSpaceID}, + UserSpaceIDs: []jcstypes.UserSpaceID{w.targetSpace.UserSpace.UserSpaceID}, }) return nil } @@ -132,7 +132,7 @@ func (w *UpdateUploader) Commit() (UpdateResult, error) { defer w.pubLock.Unlock() - var addedObjs []types.Object + var addedObjs []jcstypes.Object err := w.uploader.db.DoTx(func(tx db.SQLContext) error { var err error addedObjs, err = w.uploader.db.Object().BatchAdd(tx, w.pkgID, w.successes) @@ -143,7 +143,7 @@ func (w *UpdateUploader) Commit() (UpdateResult, error) { } ret := UpdateResult{ - Objects: make(map[string]types.Object), + Objects: make(map[string]jcstypes.Object), } for _, entry := range addedObjs { diff --git a/client/sdk/api/v1/object.go b/client/sdk/api/v1/object.go index 29ace6e..45be4df 100644 --- a/client/sdk/api/v1/object.go +++ b/client/sdk/api/v1/object.go @@ -14,7 +14,7 @@ import ( "gitlink.org.cn/cloudream/common/sdks" "gitlink.org.cn/cloudream/common/utils/http2" "gitlink.org.cn/cloudream/common/utils/serder" - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type ObjectService struct { @@ -30,12 +30,12 @@ func (c *Client) Object() *ObjectService { const ObjectListPathByPath = "/object/listByPath" type ObjectListByPath struct { - PackageID types.PackageID `form:"packageID" binding:"required" url:"packageID" json:"packageID"` - Path string `form:"path" url:"path" json:"path"` // 允许为空字符串 - IsPrefix bool `form:"isPrefix" url:"isPrefix" json:"isPrefix"` - NoRecursive bool `form:"noRecursive" url:"noRecursive" json:"noRecursive"` // 仅当isPrefix为true时有效,表示仅查询直接属于Prefix下的对象,对于更深的对象,返回它们的公共前缀 - MaxKeys int `form:"maxKeys" url:"maxKeys" json:"maxKeys"` - ContinuationToken string `form:"continuationToken" url:"continuationToken" json:"continuationToken"` // 用于分页,如果为空字符串,表示从头开始 + PackageID jcstypes.PackageID `form:"packageID" binding:"required" url:"packageID" json:"packageID"` + Path string `form:"path" url:"path" json:"path"` // 允许为空字符串 + IsPrefix bool `form:"isPrefix" url:"isPrefix" json:"isPrefix"` + NoRecursive bool `form:"noRecursive" url:"noRecursive" json:"noRecursive"` // 仅当isPrefix为true时有效,表示仅查询直接属于Prefix下的对象,对于更深的对象,返回它们的公共前缀 + MaxKeys int `form:"maxKeys" url:"maxKeys" json:"maxKeys"` + ContinuationToken string `form:"continuationToken" url:"continuationToken" json:"continuationToken"` // 用于分页,如果为空字符串,表示从头开始 } func (r *ObjectListByPath) MakeParam() *sdks.RequestParam { @@ -43,10 +43,10 @@ func (r *ObjectListByPath) MakeParam() *sdks.RequestParam { } type ObjectListByPathResp struct { - CommonPrefixes []string `json:"commonPrefixes"` // 仅在IsPrefix为true且NoRecursive为true时有效,包含更深层对象的shared prefix - Objects []types.Object `json:"objects"` // 如果IsPrefix为true且NoRecursive为false,则返回所有匹配的对象,否则只返回直接属于Prefix下的对象 - IsTruncated bool `json:"isTruncated"` // 是否还有更多对象 - NextContinuationToken string `json:"nextContinuationToken"` // 用于分页,如果IsTruncated为true,则下次请求的ContinuationToken为该值 + CommonPrefixes []string `json:"commonPrefixes"` // 仅在IsPrefix为true且NoRecursive为true时有效,包含更深层对象的shared prefix + Objects []jcstypes.Object `json:"objects"` // 如果IsPrefix为true且NoRecursive为false,则返回所有匹配的对象,否则只返回直接属于Prefix下的对象 + IsTruncated bool `json:"isTruncated"` // 是否还有更多对象 + NextContinuationToken string `json:"nextContinuationToken"` // 用于分页,如果IsTruncated为true,则下次请求的ContinuationToken为该值 } func (r *ObjectListByPathResp) ParseResponse(resp *http.Response) error { @@ -60,7 +60,7 @@ func (c *ObjectService) ListByPath(req ObjectListByPath) (*ObjectListByPathResp, const ObjectListByIDsPath = "/object/listByIDs" type ObjectListByIDs struct { - ObjectIDs []types.ObjectID `form:"objectIDs" binding:"required" url:"objectIDs"` + ObjectIDs []jcstypes.ObjectID `form:"objectIDs" binding:"required" url:"objectIDs"` } func (r *ObjectListByIDs) MakeParam() *sdks.RequestParam { @@ -68,7 +68,7 @@ func (r *ObjectListByIDs) MakeParam() *sdks.RequestParam { } type ObjectListByIDsResp struct { - Objects []*types.Object `json:"object"` // 与ObjectIDs一一对应,如果某个ID不存在,则对应位置为nil + Objects []*jcstypes.Object `json:"object"` // 与ObjectIDs一一对应,如果某个ID不存在,则对应位置为nil } func (r *ObjectListByIDsResp) ParseResponse(resp *http.Response) error { @@ -87,10 +87,10 @@ type ObjectUpload struct { } type ObjectUploadInfo struct { - PackageID types.PackageID `json:"packageID" binding:"required"` - Affinity types.UserSpaceID `json:"affinity"` - CopyTo []types.UserSpaceID `json:"copyTo"` - CopyToPath []string `json:"copyToPath"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` + Affinity jcstypes.UserSpaceID `json:"affinity"` + CopyTo []jcstypes.UserSpaceID `json:"copyTo"` + CopyToPath []string `json:"copyToPath"` } type UploadingObject struct { @@ -101,7 +101,7 @@ type UploadingObject struct { type UploadObjectIterator = iterator.Iterator[*UploadingObject] type ObjectUploadResp struct { - Uploadeds []types.Object `json:"uploadeds"` + Uploadeds []jcstypes.Object `json:"uploadeds"` } func (c *ObjectService) Upload(req ObjectUpload) (*ObjectUploadResp, error) { @@ -154,9 +154,9 @@ func (c *ObjectService) Upload(req ObjectUpload) (*ObjectUploadResp, error) { const ObjectDownloadPath = "/object/download" type ObjectDownload struct { - ObjectID types.ObjectID `form:"objectID" url:"objectID" binding:"required"` - Offset int64 `form:"offset" url:"offset,omitempty"` - Length *int64 `form:"length" url:"length,omitempty"` + ObjectID jcstypes.ObjectID `form:"objectID" url:"objectID" binding:"required"` + Offset int64 `form:"offset" url:"offset,omitempty"` + Length *int64 `form:"length" url:"length,omitempty"` } func (r *ObjectDownload) MakeParam() *sdks.RequestParam { @@ -208,10 +208,10 @@ func (c *ObjectService) Download(req ObjectDownload) (*DownloadingObject, error) const ObjectDownloadByPathPath = "/object/downloadByPath" type ObjectDownloadByPath struct { - PackageID types.PackageID `form:"packageID" url:"packageID" binding:"required"` - Path string `form:"path" url:"path" binding:"required"` - Offset int64 `form:"offset" url:"offset,omitempty"` - Length *int64 `form:"length" url:"length,omitempty"` + PackageID jcstypes.PackageID `form:"packageID" url:"packageID" binding:"required"` + Path string `form:"path" url:"path" binding:"required"` + Offset int64 `form:"offset" url:"offset,omitempty"` + Length *int64 `form:"length" url:"length,omitempty"` } func (r *ObjectDownloadByPath) MakeParam() *sdks.RequestParam { @@ -258,11 +258,11 @@ func (c *ObjectService) DownloadByPath(req ObjectDownloadByPath) (*DownloadingOb const ObjectUpdateInfoPath = "/object/updateInfo" type UpdatingObject struct { - ObjectID types.ObjectID `json:"objectID" binding:"required"` - UpdateTime time.Time `json:"updateTime" binding:"required"` + ObjectID jcstypes.ObjectID `json:"objectID" binding:"required"` + UpdateTime time.Time `json:"updateTime" binding:"required"` } -func (u *UpdatingObject) ApplyTo(obj *types.Object) { +func (u *UpdatingObject) ApplyTo(obj *jcstypes.Object) { obj.UpdateTime = u.UpdateTime } @@ -275,7 +275,7 @@ func (r *ObjectUpdateInfo) MakeParam() *sdks.RequestParam { } type ObjectUpdateInfoResp struct { - Successes []types.ObjectID `json:"successes"` + Successes []jcstypes.ObjectID `json:"successes"` } func (r *ObjectUpdateInfoResp) ParseResponse(resp *http.Response) error { @@ -289,9 +289,9 @@ func (c *ObjectService) UpdateInfo(req ObjectUpdateInfo) (*ObjectUpdateInfoResp, const ObjectUpdateInfoByPathPath = "/object/updateInfoByPath" type ObjectUpdateInfoByPath struct { - PackageID types.PackageID `json:"packageID" binding:"required"` - Path string `json:"path" binding:"required"` - UpdateTime time.Time `json:"updateTime" binding:"required"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` + Path string `json:"path" binding:"required"` + UpdateTime time.Time `json:"updateTime" binding:"required"` } func (r *ObjectUpdateInfoByPath) MakeParam() *sdks.RequestParam { @@ -311,12 +311,12 @@ func (c *ObjectService) UpdateInfoByPath(req ObjectUpdateInfoByPath) (*ObjectUpd const ObjectMovePath = "/object/move" type MovingObject struct { - ObjectID types.ObjectID `json:"objectID" binding:"required"` - PackageID types.PackageID `json:"packageID" binding:"required"` - Path string `json:"path" binding:"required"` + ObjectID jcstypes.ObjectID `json:"objectID" binding:"required"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` + Path string `json:"path" binding:"required"` } -func (m *MovingObject) ApplyTo(obj *types.Object) { +func (m *MovingObject) ApplyTo(obj *jcstypes.Object) { obj.PackageID = m.PackageID obj.Path = m.Path } @@ -330,7 +330,7 @@ func (r *ObjectMove) MakeParam() *sdks.RequestParam { } type ObjectMoveResp struct { - Successes []types.ObjectID `json:"successes"` + Successes []jcstypes.ObjectID `json:"successes"` } func (r *ObjectMoveResp) ParseResponse(resp *http.Response) error { @@ -344,7 +344,7 @@ func (c *ObjectService) Move(req ObjectMove) (*ObjectMoveResp, error) { const ObjectDeletePath = "/object/delete" type ObjectDelete struct { - ObjectIDs []types.ObjectID `json:"objectIDs" binding:"required"` + ObjectIDs []jcstypes.ObjectID `json:"objectIDs" binding:"required"` } func (r *ObjectDelete) MakeParam() *sdks.RequestParam { @@ -364,8 +364,8 @@ func (c *ObjectService) Delete(req ObjectDelete) error { const ObjectDeleteByPathPath = "/object/deleteByPath" type ObjectDeleteByPath struct { - PackageID types.PackageID `json:"packageID" binding:"required"` - Path string `json:"path" binding:"required"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` + Path string `json:"path" binding:"required"` } func (r *ObjectDeleteByPath) MakeParam() *sdks.RequestParam { @@ -393,13 +393,13 @@ func (r *ObjectClone) MakeParam() *sdks.RequestParam { } type CloningObject struct { - ObjectID types.ObjectID `json:"objectID" binding:"required"` - NewPath string `json:"newPath" binding:"required"` - NewPackageID types.PackageID `json:"newPackageID" binding:"required"` + ObjectID jcstypes.ObjectID `json:"objectID" binding:"required"` + NewPath string `json:"newPath" binding:"required"` + NewPackageID jcstypes.PackageID `json:"newPackageID" binding:"required"` } type ObjectCloneResp struct { - Objects []*types.Object `json:"objects"` + Objects []*jcstypes.Object `json:"objects"` } func (r *ObjectCloneResp) ParseResponse(resp *http.Response) error { @@ -413,7 +413,7 @@ func (c *ObjectService) Clone(req ObjectClone) (*ObjectCloneResp, error) { const ObjectGetPackageObjectsPath = "/object/getPackageObjects" type ObjectGetPackageObjects struct { - PackageID types.PackageID `form:"packageID" url:"packageID" binding:"required"` + PackageID jcstypes.PackageID `form:"packageID" url:"packageID" binding:"required"` } func (r *ObjectGetPackageObjects) MakeParam() *sdks.RequestParam { @@ -421,7 +421,7 @@ func (r *ObjectGetPackageObjects) MakeParam() *sdks.RequestParam { } type ObjectGetPackageObjectsResp struct { - Objects []types.Object `json:"objects"` + Objects []jcstypes.Object `json:"objects"` } func (r *ObjectGetPackageObjectsResp) ParseResponse(resp *http.Response) error { @@ -435,8 +435,8 @@ func (c *ObjectService) GetPackageObjects(req ObjectGetPackageObjects) (*ObjectG const ObjectNewMultipartUploadPath = "/object/newMultipartUpload" type ObjectNewMultipartUpload struct { - PackageID types.PackageID `json:"packageID" binding:"required"` - Path string `json:"path" binding:"required"` + PackageID jcstypes.PackageID `json:"packageID" binding:"required"` + Path string `json:"path" binding:"required"` } func (r *ObjectNewMultipartUpload) MakeParam() *sdks.RequestParam { @@ -444,7 +444,7 @@ func (r *ObjectNewMultipartUpload) MakeParam() *sdks.RequestParam { } type ObjectNewMultipartUploadResp struct { - Object types.Object `json:"object"` + Object jcstypes.Object `json:"object"` } func (r *ObjectNewMultipartUploadResp) ParseResponse(resp *http.Response) error { @@ -463,8 +463,8 @@ type ObjectUploadPart struct { } type ObjectUploadPartInfo struct { - ObjectID types.ObjectID `json:"objectID" binding:"required"` - Index int `json:"index"` + ObjectID jcstypes.ObjectID `json:"objectID" binding:"required"` + Index int `json:"index"` } type ObjectUploadPartResp struct{} @@ -512,8 +512,8 @@ func (c *ObjectService) UploadPart(req ObjectUploadPart) (*ObjectUploadPartResp, const ObjectCompleteMultipartUploadPath = "/object/completeMultipartUpload" type ObjectCompleteMultipartUpload struct { - ObjectID types.ObjectID `json:"objectID" binding:"required"` - Indexes []int `json:"indexes" binding:"required"` + ObjectID jcstypes.ObjectID `json:"objectID" binding:"required"` + Indexes []int `json:"indexes" binding:"required"` } func (r *ObjectCompleteMultipartUpload) MakeParam() *sdks.RequestParam { @@ -521,7 +521,7 @@ func (r *ObjectCompleteMultipartUpload) MakeParam() *sdks.RequestParam { } type ObjectCompleteMultipartUploadResp struct { - Object types.Object `json:"object"` + Object jcstypes.Object `json:"object"` } func (r *ObjectCompleteMultipartUploadResp) ParseResponse(resp *http.Response) error { diff --git a/client/sdk/api/v1/pub_shards.go b/client/sdk/api/v1/pub_shards.go new file mode 100644 index 0000000..fec0edc --- /dev/null +++ b/client/sdk/api/v1/pub_shards.go @@ -0,0 +1,72 @@ +package api + +import ( + "net/http" + + "gitlink.org.cn/cloudream/common/sdks" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" +) + +type PubShardsService struct { + *Client +} + +func (c *Client) PubShards() *PubShardsService { + return &PubShardsService{ + Client: c, + } +} + +const PubShardsCreatePath = "/pubShards/create" + +type PubShardsCreate struct { + Name string `json:"name"` + Storage jcstypes.StorageType `json:"storage"` + Credential jcstypes.StorageCredential `json:"credential"` + ShardStore jcstypes.ShardStoreUserConfig `json:"shardStore"` + Features []jcstypes.StorageFeature `json:"features"` + WorkingDir string `json:"workingDir"` + Password string `json:"password"` + MasterHub jcstypes.HubID `json:"masterHub"` +} + +func (r *PubShardsCreate) MakeParam() *sdks.RequestParam { + return sdks.MakeJSONParam(http.MethodPost, PubShardsCreatePath, r) +} + +type PubShardsCreateResp struct { + PubShards jcstypes.PubShards `json:"pubShards"` +} + +func (r *PubShardsCreateResp) ParseResponse(resp *http.Response) error { + return sdks.ParseCodeDataJSONResponse(resp, r) +} + +func (c *PubShardsService) Create(req PubShardsCreate) (*PubShardsCreateResp, error) { + return JSONAPI(&c.cfg, c.httpCli, &req, &PubShardsCreateResp{}) +} + +const PubShardsJoinPath = "/pubShards/join" + +type PubShardsJoin struct { + Name string `json:"name"` + PubShardsID jcstypes.PubShardsID `json:"pubShardID"` + Password string `json:"password"` +} + +func (r *PubShardsJoin) MakeParam() *sdks.RequestParam { + return sdks.MakeJSONParam(http.MethodPost, PubShardsJoinPath, r) +} + +type PubShardsJoinResp struct { + PubShards jcstypes.PubShards `json:"pubShards"` + UserSpace jcstypes.UserSpace `json:"userSpace"` +} + +func (r *PubShardsJoinResp) ParseResponse(resp *http.Response) error { + return sdks.ParseCodeDataJSONResponse(resp, r) +} + +func (c *PubShardsService) Join(req PubShardsJoin) (*PubShardsJoinResp, error) { + return JSONAPI(&c.cfg, c.httpCli, &req, &PubShardsJoinResp{}) +} diff --git a/common/assets/confs/hub.config.json b/common/assets/confs/hub.config.json index 2b03c33..e69b08b 100644 --- a/common/assets/confs/hub.config.json +++ b/common/assets/confs/hub.config.json @@ -42,5 +42,8 @@ }, "tickTock": { "testHubConnectivitiesInterval": "5m" + }, + "pubShards": { + "dbDir": "shards-dbs" } } \ No newline at end of file diff --git a/common/globals/globals.go b/common/globals/globals.go index e4c80fc..5d47998 100644 --- a/common/globals/globals.go +++ b/common/globals/globals.go @@ -1,11 +1,11 @@ package stgglb import ( - "gitlink.org.cn/cloudream/jcs-pub/common/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type LocalMachineInfo struct { - Location types.Location `json:"location"` + Location jcstypes.Location `json:"location"` } var Local *LocalMachineInfo @@ -14,7 +14,7 @@ var Local *LocalMachineInfo var StandaloneMode bool // 当前Client服务登录的用户的ID,如果是Standalone模式,则为0。TODO 临时解决办法 -var UserID types.UserID +var UserID jcstypes.UserID // InitLocal // diff --git a/common/pkgs/rpc/coordinator/coordinator.pb.go b/common/pkgs/rpc/coordinator/coordinator.pb.go index a94901f..90b4d86 100644 --- a/common/pkgs/rpc/coordinator/coordinator.pb.go +++ b/common/pkgs/rpc/coordinator/coordinator.pb.go @@ -27,7 +27,7 @@ var file_pkgs_rpc_coordinator_coordinator_proto_rawDesc = []byte{ 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x63, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, 0x1a, 0x12, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x70, 0x63, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xb7, 0x03, 0x0a, 0x0b, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, + 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xc9, 0x04, 0x0a, 0x0b, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2b, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x48, 0x75, 0x62, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, @@ -54,12 +54,21 @@ var file_pkgs_rpc_coordinator_coordinator_proto_rawDesc = []byte{ 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x12, 0x48, 0x75, 0x62, 0x4c, 0x6f, 0x61, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, - 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x6c, 0x69, 0x6e, 0x6b, 0x2e, 0x6f, 0x72, 0x67, 0x2e, 0x63, 0x6e, - 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x6a, 0x63, 0x73, 0x2d, 0x70, - 0x75, 0x62, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, - 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, 0x3b, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, + 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x75, 0x62, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x10, 0x48, 0x75, 0x62, 0x4c, 0x6f, 0x61, 0x64, 0x50, 0x75, 0x62, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x10, 0x55, 0x73, 0x65, 0x72, 0x47, 0x65, 0x74, 0x50, 0x75, 0x62, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x6c, 0x69, 0x6e, 0x6b, 0x2e, 0x6f, 0x72, 0x67, 0x2e, + 0x63, 0x6e, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x6a, 0x63, 0x73, + 0x2d, 0x70, 0x75, 0x62, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x72, 0x72, 0x70, 0x63, 0x3b, 0x63, 0x6f, 0x72, 0x72, + 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_pkgs_rpc_coordinator_coordinator_proto_goTypes = []any{ @@ -67,29 +76,35 @@ var file_pkgs_rpc_coordinator_coordinator_proto_goTypes = []any{ (*rpc.Response)(nil), // 1: rpc.Response } var file_pkgs_rpc_coordinator_coordinator_proto_depIdxs = []int32{ - 0, // 0: corrpc.Coordinator.GetHubConfig:input_type -> rpc.Request - 0, // 1: corrpc.Coordinator.GetHubs:input_type -> rpc.Request - 0, // 2: corrpc.Coordinator.GetHubConnectivities:input_type -> rpc.Request - 0, // 3: corrpc.Coordinator.ReportHubConnectivity:input_type -> rpc.Request - 0, // 4: corrpc.Coordinator.SelectStorageHub:input_type -> rpc.Request - 0, // 5: corrpc.Coordinator.UserLogin:input_type -> rpc.Request - 0, // 6: corrpc.Coordinator.UserRefreshToken:input_type -> rpc.Request - 0, // 7: corrpc.Coordinator.UserLogout:input_type -> rpc.Request - 0, // 8: corrpc.Coordinator.HubLoadAccessToken:input_type -> rpc.Request - 1, // 9: corrpc.Coordinator.GetHubConfig:output_type -> rpc.Response - 1, // 10: corrpc.Coordinator.GetHubs:output_type -> rpc.Response - 1, // 11: corrpc.Coordinator.GetHubConnectivities:output_type -> rpc.Response - 1, // 12: corrpc.Coordinator.ReportHubConnectivity:output_type -> rpc.Response - 1, // 13: corrpc.Coordinator.SelectStorageHub:output_type -> rpc.Response - 1, // 14: corrpc.Coordinator.UserLogin:output_type -> rpc.Response - 1, // 15: corrpc.Coordinator.UserRefreshToken:output_type -> rpc.Response - 1, // 16: corrpc.Coordinator.UserLogout:output_type -> rpc.Response - 1, // 17: corrpc.Coordinator.HubLoadAccessToken:output_type -> rpc.Response - 9, // [9:18] is the sub-list for method output_type - 0, // [0:9] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: corrpc.Coordinator.GetHubConfig:input_type -> rpc.Request + 0, // 1: corrpc.Coordinator.GetHubs:input_type -> rpc.Request + 0, // 2: corrpc.Coordinator.GetHubConnectivities:input_type -> rpc.Request + 0, // 3: corrpc.Coordinator.ReportHubConnectivity:input_type -> rpc.Request + 0, // 4: corrpc.Coordinator.SelectStorageHub:input_type -> rpc.Request + 0, // 5: corrpc.Coordinator.UserLogin:input_type -> rpc.Request + 0, // 6: corrpc.Coordinator.UserRefreshToken:input_type -> rpc.Request + 0, // 7: corrpc.Coordinator.UserLogout:input_type -> rpc.Request + 0, // 8: corrpc.Coordinator.HubLoadAccessToken:input_type -> rpc.Request + 0, // 9: corrpc.Coordinator.CreatePubShards:input_type -> rpc.Request + 0, // 10: corrpc.Coordinator.HubLoadPubShards:input_type -> rpc.Request + 0, // 11: corrpc.Coordinator.UserGetPubShards:input_type -> rpc.Request + 1, // 12: corrpc.Coordinator.GetHubConfig:output_type -> rpc.Response + 1, // 13: corrpc.Coordinator.GetHubs:output_type -> rpc.Response + 1, // 14: corrpc.Coordinator.GetHubConnectivities:output_type -> rpc.Response + 1, // 15: corrpc.Coordinator.ReportHubConnectivity:output_type -> rpc.Response + 1, // 16: corrpc.Coordinator.SelectStorageHub:output_type -> rpc.Response + 1, // 17: corrpc.Coordinator.UserLogin:output_type -> rpc.Response + 1, // 18: corrpc.Coordinator.UserRefreshToken:output_type -> rpc.Response + 1, // 19: corrpc.Coordinator.UserLogout:output_type -> rpc.Response + 1, // 20: corrpc.Coordinator.HubLoadAccessToken:output_type -> rpc.Response + 1, // 21: corrpc.Coordinator.CreatePubShards:output_type -> rpc.Response + 1, // 22: corrpc.Coordinator.HubLoadPubShards:output_type -> rpc.Response + 1, // 23: corrpc.Coordinator.UserGetPubShards:output_type -> rpc.Response + 12, // [12:24] is the sub-list for method output_type + 0, // [0:12] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } func init() { file_pkgs_rpc_coordinator_coordinator_proto_init() } diff --git a/common/pkgs/rpc/coordinator/coordinator.proto b/common/pkgs/rpc/coordinator/coordinator.proto index 40b8039..7561075 100644 --- a/common/pkgs/rpc/coordinator/coordinator.proto +++ b/common/pkgs/rpc/coordinator/coordinator.proto @@ -19,4 +19,8 @@ service Coordinator { rpc UserRefreshToken(rpc.Request) returns(rpc.Response); rpc UserLogout(rpc.Request) returns(rpc.Response); rpc HubLoadAccessToken(rpc.Request) returns(rpc.Response); + + rpc CreatePubShards(rpc.Request) returns(rpc.Response); + rpc HubLoadPubShards(rpc.Request) returns(rpc.Response); + rpc UserGetPubShards(rpc.Request) returns(rpc.Response); } \ No newline at end of file diff --git a/common/pkgs/rpc/coordinator/coordinator_grpc.pb.go b/common/pkgs/rpc/coordinator/coordinator_grpc.pb.go index 173038b..d638f49 100644 --- a/common/pkgs/rpc/coordinator/coordinator_grpc.pb.go +++ b/common/pkgs/rpc/coordinator/coordinator_grpc.pb.go @@ -29,6 +29,9 @@ const ( Coordinator_UserRefreshToken_FullMethodName = "/corrpc.Coordinator/UserRefreshToken" Coordinator_UserLogout_FullMethodName = "/corrpc.Coordinator/UserLogout" Coordinator_HubLoadAccessToken_FullMethodName = "/corrpc.Coordinator/HubLoadAccessToken" + Coordinator_CreatePubShards_FullMethodName = "/corrpc.Coordinator/CreatePubShards" + Coordinator_HubLoadPubShards_FullMethodName = "/corrpc.Coordinator/HubLoadPubShards" + Coordinator_UserGetPubShards_FullMethodName = "/corrpc.Coordinator/UserGetPubShards" ) // CoordinatorClient is the client API for Coordinator service. @@ -44,6 +47,9 @@ type CoordinatorClient interface { UserRefreshToken(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) UserLogout(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) HubLoadAccessToken(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + CreatePubShards(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + HubLoadPubShards(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + UserGetPubShards(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) } type coordinatorClient struct { @@ -135,6 +141,33 @@ func (c *coordinatorClient) HubLoadAccessToken(ctx context.Context, in *rpc.Requ return out, nil } +func (c *coordinatorClient) CreatePubShards(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Coordinator_CreatePubShards_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *coordinatorClient) HubLoadPubShards(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Coordinator_HubLoadPubShards_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *coordinatorClient) UserGetPubShards(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Coordinator_UserGetPubShards_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // CoordinatorServer is the server API for Coordinator service. // All implementations must embed UnimplementedCoordinatorServer // for forward compatibility @@ -148,6 +181,9 @@ type CoordinatorServer interface { UserRefreshToken(context.Context, *rpc.Request) (*rpc.Response, error) UserLogout(context.Context, *rpc.Request) (*rpc.Response, error) HubLoadAccessToken(context.Context, *rpc.Request) (*rpc.Response, error) + CreatePubShards(context.Context, *rpc.Request) (*rpc.Response, error) + HubLoadPubShards(context.Context, *rpc.Request) (*rpc.Response, error) + UserGetPubShards(context.Context, *rpc.Request) (*rpc.Response, error) mustEmbedUnimplementedCoordinatorServer() } @@ -182,6 +218,15 @@ func (UnimplementedCoordinatorServer) UserLogout(context.Context, *rpc.Request) func (UnimplementedCoordinatorServer) HubLoadAccessToken(context.Context, *rpc.Request) (*rpc.Response, error) { return nil, status.Errorf(codes.Unimplemented, "method HubLoadAccessToken not implemented") } +func (UnimplementedCoordinatorServer) CreatePubShards(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreatePubShards not implemented") +} +func (UnimplementedCoordinatorServer) HubLoadPubShards(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method HubLoadPubShards not implemented") +} +func (UnimplementedCoordinatorServer) UserGetPubShards(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UserGetPubShards not implemented") +} func (UnimplementedCoordinatorServer) mustEmbedUnimplementedCoordinatorServer() {} // UnsafeCoordinatorServer may be embedded to opt out of forward compatibility for this service. @@ -357,6 +402,60 @@ func _Coordinator_HubLoadAccessToken_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _Coordinator_CreatePubShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CoordinatorServer).CreatePubShards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Coordinator_CreatePubShards_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CoordinatorServer).CreatePubShards(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Coordinator_HubLoadPubShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CoordinatorServer).HubLoadPubShards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Coordinator_HubLoadPubShards_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CoordinatorServer).HubLoadPubShards(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Coordinator_UserGetPubShards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CoordinatorServer).UserGetPubShards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Coordinator_UserGetPubShards_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CoordinatorServer).UserGetPubShards(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + // Coordinator_ServiceDesc is the grpc.ServiceDesc for Coordinator service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -400,6 +499,18 @@ var Coordinator_ServiceDesc = grpc.ServiceDesc{ MethodName: "HubLoadAccessToken", Handler: _Coordinator_HubLoadAccessToken_Handler, }, + { + MethodName: "CreatePubShards", + Handler: _Coordinator_CreatePubShards_Handler, + }, + { + MethodName: "HubLoadPubShards", + Handler: _Coordinator_HubLoadPubShards_Handler, + }, + { + MethodName: "UserGetPubShards", + Handler: _Coordinator_UserGetPubShards_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "pkgs/rpc/coordinator/coordinator.proto", diff --git a/common/pkgs/rpc/coordinator/hub.go b/common/pkgs/rpc/coordinator/hub.go index e1b6f31..be63d4b 100644 --- a/common/pkgs/rpc/coordinator/hub.go +++ b/common/pkgs/rpc/coordinator/hub.go @@ -35,6 +35,9 @@ func RespGetHubConfig(hub jcstypes.Hub) *GetHubConfigResp { } } func (c *Client) GetHubConfig(ctx context.Context, msg *GetHubConfig) (*GetHubConfigResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } return rpc.UnaryClient[*GetHubConfigResp](c.cli.GetHubConfig, ctx, msg) } func (s *Server) GetHubConfig(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { @@ -49,6 +52,8 @@ type GetHubsResp struct { Hubs []*jcstypes.Hub `json:"hubs"` } +var _ = TokenAuth(Coordinator_GetHubs_FullMethodName) + func NewGetHubs(hubIDs []jcstypes.HubID) *GetHubs { return &GetHubs{ HubIDs: hubIDs, @@ -69,6 +74,9 @@ func (r *GetHubsResp) GetHub(id jcstypes.HubID) *jcstypes.Hub { return nil } func (c *Client) GetHubs(ctx context.Context, msg *GetHubs) (*GetHubsResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } return rpc.UnaryClient[*GetHubsResp](c.cli.GetHubs, ctx, msg) } func (s *Server) GetHubs(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { @@ -95,6 +103,9 @@ func RespGetHubConnectivities(cons []jcstypes.HubConnectivity) *GetHubConnectivi } } func (c *Client) GetHubConnectivities(ctx context.Context, msg *GetHubConnectivities) (*GetHubConnectivitiesResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } return rpc.UnaryClient[*GetHubConnectivitiesResp](c.cli.GetHubConnectivities, ctx, msg) } func (s *Server) GetHubConnectivities(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { @@ -109,6 +120,9 @@ type ReportHubConnectivityResp struct { } func (c *Client) ReportHubConnectivity(ctx context.Context, msg *ReportHubConnectivity) (*ReportHubConnectivityResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } return rpc.UnaryClient[*ReportHubConnectivityResp](c.cli.ReportHubConnectivity, ctx, msg) } func (s *Server) ReportHubConnectivity(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { diff --git a/common/pkgs/rpc/coordinator/pub_shards.go b/common/pkgs/rpc/coordinator/pub_shards.go new file mode 100644 index 0000000..e396e28 --- /dev/null +++ b/common/pkgs/rpc/coordinator/pub_shards.go @@ -0,0 +1,89 @@ +package corrpc + +import ( + context "context" + + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" +) + +type PubShardsService interface { + CreatePubShards(ctx context.Context, msg *CreatePubShards) (*CreatePubShardsResp, *rpc.CodeError) + HubLoadPubShards(ctx context.Context, msg *HubLoadPubShards) (*HubLoadPubShardsResp, *rpc.CodeError) + UserGetPubShards(ctx context.Context, msg *UserGetPubShards) (*UserGetPubShardsResp, *rpc.CodeError) +} + +type CreatePubShards struct { + // 密码原文 + Password string + MasterHub jcstypes.HubID + // 公共分片存储的名称,不要求唯一 + Name string + // 用户空间所在的存储服务配置 + Storage jcstypes.StorageType + // 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等 + Credential jcstypes.StorageCredential + // 用户空间的分片存储配置 + ShardStore jcstypes.ShardStoreUserConfig + // 存储服务特性功能的配置 + Features []jcstypes.StorageFeature + // 各种组件保存数据的根目录。组件工作过程中都会以这个目录为根(除了BaseStore)。 + WorkingDir jcstypes.JPath +} +type CreatePubShardsResp struct { + PubShardStore jcstypes.PubShards +} + +var _ = TokenAuth(Coordinator_CreatePubShards_FullMethodName) + +func (c *Client) CreatePubShards(ctx context.Context, msg *CreatePubShards) (*CreatePubShardsResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*CreatePubShardsResp](c.cli.CreatePubShards, ctx, msg) +} +func (s *Server) CreatePubShards(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.CreatePubShards, ctx, msg) +} + +// Hub端加载公共分片存储信息 +type HubLoadPubShards struct { + // 公共分片存储的ID + PubShardsID jcstypes.PubShardsID +} +type HubLoadPubShardsResp struct { + PubShards jcstypes.PubShards +} + +func (c *Client) HubLoadPubShards(ctx context.Context, msg *HubLoadPubShards) (*HubLoadPubShardsResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*HubLoadPubShardsResp](c.cli.HubLoadPubShards, ctx, msg) +} +func (s *Server) HubLoadPubShards(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.HubLoadPubShards, ctx, msg) +} + +// 用户获取公共分片存储信息,需要验证密码 +type UserGetPubShards struct { + // 公共分片存储的ID + PubShardsID jcstypes.PubShardsID + Password string +} +type UserGetPubShardsResp struct { + PubShards jcstypes.PubShards + MasterHub jcstypes.Hub +} + +var _ = TokenAuth(Coordinator_UserGetPubShards_FullMethodName) + +func (c *Client) UserGetPubShards(ctx context.Context, msg *UserGetPubShards) (*UserGetPubShardsResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*UserGetPubShardsResp](c.cli.UserGetPubShards, ctx, msg) +} +func (s *Server) UserGetPubShards(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.UserGetPubShards, ctx, msg) +} diff --git a/common/pkgs/rpc/coordinator/server.go b/common/pkgs/rpc/coordinator/server.go index 3a659e3..5768615 100644 --- a/common/pkgs/rpc/coordinator/server.go +++ b/common/pkgs/rpc/coordinator/server.go @@ -8,6 +8,7 @@ type CoordinatorAPI interface { HubService StorageService UserService + PubShardsService } type Server struct { diff --git a/common/pkgs/rpc/hub/hub.pb.go b/common/pkgs/rpc/hub/hub.pb.go index eb7a201..df1bf3c 100644 --- a/common/pkgs/rpc/hub/hub.pb.go +++ b/common/pkgs/rpc/hub/hub.pb.go @@ -26,7 +26,7 @@ var file_pkgs_rpc_hub_hub_proto_rawDesc = []byte{ 0x0a, 0x16, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x75, 0x62, 0x2f, 0x68, 0x75, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x68, 0x75, 0x62, 0x72, 0x70, 0x63, 0x1a, 0x12, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x70, 0x63, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xf5, 0x02, 0x0a, 0x03, 0x48, 0x75, 0x62, 0x12, 0x2c, 0x0a, 0x0d, + 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xde, 0x04, 0x0a, 0x03, 0x48, 0x75, 0x62, 0x12, 0x2c, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0c, 0x53, 0x65, @@ -49,12 +49,26 @@ var file_pkgs_rpc_hub_hub_proto_rawDesc = []byte{ 0x3b, 0x0a, 0x1c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x55, 0x73, 0x65, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, - 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, - 0x67, 0x69, 0x74, 0x6c, 0x69, 0x6e, 0x6b, 0x2e, 0x6f, 0x72, 0x67, 0x2e, 0x63, 0x6e, 0x2f, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x61, 0x6d, 0x2f, 0x6a, 0x63, 0x73, 0x2d, 0x70, 0x75, 0x62, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, - 0x2f, 0x68, 0x75, 0x62, 0x72, 0x70, 0x63, 0x3b, 0x68, 0x75, 0x62, 0x72, 0x70, 0x63, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x0e, + 0x50, 0x75, 0x62, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x0c, + 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x0d, 0x50, + 0x75, 0x62, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0c, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x10, 0x50, 0x75, 0x62, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x6c, 0x12, 0x0c, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, + 0x63, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x0b, 0x50, 0x75, + 0x62, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x47, 0x43, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x6c, 0x69, 0x6e, 0x6b, + 0x2e, 0x6f, 0x72, 0x67, 0x2e, 0x63, 0x6e, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x61, + 0x6d, 0x2f, 0x6a, 0x63, 0x73, 0x2d, 0x70, 0x75, 0x62, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x75, 0x62, 0x72, 0x70, 0x63, + 0x3b, 0x68, 0x75, 0x62, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_pkgs_rpc_hub_hub_proto_goTypes = []any{ @@ -63,27 +77,37 @@ var file_pkgs_rpc_hub_hub_proto_goTypes = []any{ (*rpc.Response)(nil), // 2: rpc.Response } var file_pkgs_rpc_hub_hub_proto_depIdxs = []int32{ - 0, // 0: hubrpc.Hub.ExecuteIOPlan:input_type -> rpc.Request - 1, // 1: hubrpc.Hub.SendIOStream:input_type -> rpc.ChunkedData - 0, // 2: hubrpc.Hub.GetIOStream:input_type -> rpc.Request - 0, // 3: hubrpc.Hub.SendIOVar:input_type -> rpc.Request - 0, // 4: hubrpc.Hub.GetIOVar:input_type -> rpc.Request - 0, // 5: hubrpc.Hub.Ping:input_type -> rpc.Request - 0, // 6: hubrpc.Hub.GetState:input_type -> rpc.Request - 0, // 7: hubrpc.Hub.NotifyUserAccessTokenInvalid:input_type -> rpc.Request - 2, // 8: hubrpc.Hub.ExecuteIOPlan:output_type -> rpc.Response - 2, // 9: hubrpc.Hub.SendIOStream:output_type -> rpc.Response - 1, // 10: hubrpc.Hub.GetIOStream:output_type -> rpc.ChunkedData - 2, // 11: hubrpc.Hub.SendIOVar:output_type -> rpc.Response - 2, // 12: hubrpc.Hub.GetIOVar:output_type -> rpc.Response - 2, // 13: hubrpc.Hub.Ping:output_type -> rpc.Response - 2, // 14: hubrpc.Hub.GetState:output_type -> rpc.Response - 2, // 15: hubrpc.Hub.NotifyUserAccessTokenInvalid:output_type -> rpc.Response - 8, // [8:16] is the sub-list for method output_type - 0, // [0:8] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: hubrpc.Hub.ExecuteIOPlan:input_type -> rpc.Request + 1, // 1: hubrpc.Hub.SendIOStream:input_type -> rpc.ChunkedData + 0, // 2: hubrpc.Hub.GetIOStream:input_type -> rpc.Request + 0, // 3: hubrpc.Hub.SendIOVar:input_type -> rpc.Request + 0, // 4: hubrpc.Hub.GetIOVar:input_type -> rpc.Request + 0, // 5: hubrpc.Hub.Ping:input_type -> rpc.Request + 0, // 6: hubrpc.Hub.GetState:input_type -> rpc.Request + 0, // 7: hubrpc.Hub.NotifyUserAccessTokenInvalid:input_type -> rpc.Request + 0, // 8: hubrpc.Hub.PubShardsStore:input_type -> rpc.Request + 0, // 9: hubrpc.Hub.PubShardsInfo:input_type -> rpc.Request + 0, // 10: hubrpc.Hub.PubShardsListAll:input_type -> rpc.Request + 0, // 11: hubrpc.Hub.PubShardsGC:input_type -> rpc.Request + 0, // 12: hubrpc.Hub.PubShardsStats:input_type -> rpc.Request + 2, // 13: hubrpc.Hub.ExecuteIOPlan:output_type -> rpc.Response + 2, // 14: hubrpc.Hub.SendIOStream:output_type -> rpc.Response + 1, // 15: hubrpc.Hub.GetIOStream:output_type -> rpc.ChunkedData + 2, // 16: hubrpc.Hub.SendIOVar:output_type -> rpc.Response + 2, // 17: hubrpc.Hub.GetIOVar:output_type -> rpc.Response + 2, // 18: hubrpc.Hub.Ping:output_type -> rpc.Response + 2, // 19: hubrpc.Hub.GetState:output_type -> rpc.Response + 2, // 20: hubrpc.Hub.NotifyUserAccessTokenInvalid:output_type -> rpc.Response + 2, // 21: hubrpc.Hub.PubShardsStore:output_type -> rpc.Response + 2, // 22: hubrpc.Hub.PubShardsInfo:output_type -> rpc.Response + 2, // 23: hubrpc.Hub.PubShardsListAll:output_type -> rpc.Response + 2, // 24: hubrpc.Hub.PubShardsGC:output_type -> rpc.Response + 2, // 25: hubrpc.Hub.PubShardsStats:output_type -> rpc.Response + 13, // [13:26] is the sub-list for method output_type + 0, // [0:13] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } func init() { file_pkgs_rpc_hub_hub_proto_init() } diff --git a/common/pkgs/rpc/hub/hub.proto b/common/pkgs/rpc/hub/hub.proto index 6144e49..c773fb2 100644 --- a/common/pkgs/rpc/hub/hub.proto +++ b/common/pkgs/rpc/hub/hub.proto @@ -18,4 +18,10 @@ service Hub { rpc GetState(rpc.Request) returns(rpc.Response); rpc NotifyUserAccessTokenInvalid(rpc.Request) returns(rpc.Response); + + rpc PubShardsStore(rpc.Request) returns(rpc.Response); + rpc PubShardsInfo(rpc.Request) returns(rpc.Response); + rpc PubShardsListAll(rpc.Request) returns(rpc.Response); + rpc PubShardsGC(rpc.Request) returns(rpc.Response); + rpc PubShardsStats(rpc.Request) returns(rpc.Response); } \ No newline at end of file diff --git a/common/pkgs/rpc/hub/hub_grpc.pb.go b/common/pkgs/rpc/hub/hub_grpc.pb.go index ec65369..8d4264a 100644 --- a/common/pkgs/rpc/hub/hub_grpc.pb.go +++ b/common/pkgs/rpc/hub/hub_grpc.pb.go @@ -28,6 +28,11 @@ const ( Hub_Ping_FullMethodName = "/hubrpc.Hub/Ping" Hub_GetState_FullMethodName = "/hubrpc.Hub/GetState" Hub_NotifyUserAccessTokenInvalid_FullMethodName = "/hubrpc.Hub/NotifyUserAccessTokenInvalid" + Hub_PubShardsStore_FullMethodName = "/hubrpc.Hub/PubShardsStore" + Hub_PubShardsInfo_FullMethodName = "/hubrpc.Hub/PubShardsInfo" + Hub_PubShardsListAll_FullMethodName = "/hubrpc.Hub/PubShardsListAll" + Hub_PubShardsGC_FullMethodName = "/hubrpc.Hub/PubShardsGC" + Hub_PubShardsStats_FullMethodName = "/hubrpc.Hub/PubShardsStats" ) // HubClient is the client API for Hub service. @@ -42,6 +47,11 @@ type HubClient interface { Ping(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) GetState(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) NotifyUserAccessTokenInvalid(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + PubShardsStore(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + PubShardsInfo(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + PubShardsListAll(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + PubShardsGC(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) + PubShardsStats(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) } type hubClient struct { @@ -172,6 +182,51 @@ func (c *hubClient) NotifyUserAccessTokenInvalid(ctx context.Context, in *rpc.Re return out, nil } +func (c *hubClient) PubShardsStore(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Hub_PubShardsStore_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hubClient) PubShardsInfo(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Hub_PubShardsInfo_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hubClient) PubShardsListAll(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Hub_PubShardsListAll_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hubClient) PubShardsGC(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Hub_PubShardsGC_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *hubClient) PubShardsStats(ctx context.Context, in *rpc.Request, opts ...grpc.CallOption) (*rpc.Response, error) { + out := new(rpc.Response) + err := c.cc.Invoke(ctx, Hub_PubShardsStats_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // HubServer is the server API for Hub service. // All implementations must embed UnimplementedHubServer // for forward compatibility @@ -184,6 +239,11 @@ type HubServer interface { Ping(context.Context, *rpc.Request) (*rpc.Response, error) GetState(context.Context, *rpc.Request) (*rpc.Response, error) NotifyUserAccessTokenInvalid(context.Context, *rpc.Request) (*rpc.Response, error) + PubShardsStore(context.Context, *rpc.Request) (*rpc.Response, error) + PubShardsInfo(context.Context, *rpc.Request) (*rpc.Response, error) + PubShardsListAll(context.Context, *rpc.Request) (*rpc.Response, error) + PubShardsGC(context.Context, *rpc.Request) (*rpc.Response, error) + PubShardsStats(context.Context, *rpc.Request) (*rpc.Response, error) mustEmbedUnimplementedHubServer() } @@ -215,6 +275,21 @@ func (UnimplementedHubServer) GetState(context.Context, *rpc.Request) (*rpc.Resp func (UnimplementedHubServer) NotifyUserAccessTokenInvalid(context.Context, *rpc.Request) (*rpc.Response, error) { return nil, status.Errorf(codes.Unimplemented, "method NotifyUserAccessTokenInvalid not implemented") } +func (UnimplementedHubServer) PubShardsStore(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PubShardsStore not implemented") +} +func (UnimplementedHubServer) PubShardsInfo(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PubShardsInfo not implemented") +} +func (UnimplementedHubServer) PubShardsListAll(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PubShardsListAll not implemented") +} +func (UnimplementedHubServer) PubShardsGC(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PubShardsGC not implemented") +} +func (UnimplementedHubServer) PubShardsStats(context.Context, *rpc.Request) (*rpc.Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PubShardsStats not implemented") +} func (UnimplementedHubServer) mustEmbedUnimplementedHubServer() {} // UnsafeHubServer may be embedded to opt out of forward compatibility for this service. @@ -383,6 +458,96 @@ func _Hub_NotifyUserAccessTokenInvalid_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _Hub_PubShardsStore_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HubServer).PubShardsStore(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Hub_PubShardsStore_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HubServer).PubShardsStore(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Hub_PubShardsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HubServer).PubShardsInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Hub_PubShardsInfo_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HubServer).PubShardsInfo(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Hub_PubShardsListAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HubServer).PubShardsListAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Hub_PubShardsListAll_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HubServer).PubShardsListAll(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Hub_PubShardsGC_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HubServer).PubShardsGC(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Hub_PubShardsGC_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HubServer).PubShardsGC(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Hub_PubShardsStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(rpc.Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HubServer).PubShardsStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Hub_PubShardsStats_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HubServer).PubShardsStats(ctx, req.(*rpc.Request)) + } + return interceptor(ctx, in, info, handler) +} + // Hub_ServiceDesc is the grpc.ServiceDesc for Hub service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -414,6 +579,26 @@ var Hub_ServiceDesc = grpc.ServiceDesc{ MethodName: "NotifyUserAccessTokenInvalid", Handler: _Hub_NotifyUserAccessTokenInvalid_Handler, }, + { + MethodName: "PubShardsStore", + Handler: _Hub_PubShardsStore_Handler, + }, + { + MethodName: "PubShardsInfo", + Handler: _Hub_PubShardsInfo_Handler, + }, + { + MethodName: "PubShardsListAll", + Handler: _Hub_PubShardsListAll_Handler, + }, + { + MethodName: "PubShardsGC", + Handler: _Hub_PubShardsGC_Handler, + }, + { + MethodName: "PubShardsStats", + Handler: _Hub_PubShardsStats_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/common/pkgs/rpc/hub/pool.go b/common/pkgs/rpc/hub/pool.go index 515f1ff..c7b98c9 100644 --- a/common/pkgs/rpc/hub/pool.go +++ b/common/pkgs/rpc/hub/pool.go @@ -8,10 +8,12 @@ import ( "gitlink.org.cn/cloudream/common/consts/errorcode" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" ) type PoolConfig struct { - Conn rpc.PoolConfig + Conn rpc.PoolConfig + AddrProvider func(id jcstypes.HubID) string } type PoolConfigJSON struct { @@ -47,11 +49,13 @@ func (c *PoolConfigJSON) Build(tokenProv rpc.AccessTokenProvider) (*PoolConfig, } type Pool struct { + cfg PoolConfig connPool *rpc.ConnPool } func NewPool(cfg PoolConfig) *Pool { return &Pool{ + cfg: cfg, connPool: rpc.NewConnPool(cfg.Conn), } } @@ -75,3 +79,41 @@ func (p *Pool) Get(ip string, port int) *Client { pool: p, } } + +func (p *Pool) GetByID(hubID jcstypes.HubID) *Client { + if p.cfg.AddrProvider == nil { + return &Client{ + addr: "", + con: nil, + pool: p, + fusedErr: rpc.Failed(errorcode.OperationFailed, "no address provider"), + } + } + + addr := p.cfg.AddrProvider(hubID) + if addr == "" { + return &Client{ + addr: "", + con: nil, + pool: p, + fusedErr: rpc.Failed(errorcode.OperationFailed, "no address for hub %v", hubID), + } + } + + con, err := p.connPool.GetConnection(addr) + if err != nil { + return &Client{ + addr: addr, + con: nil, + pool: p, + fusedErr: rpc.Failed(errorcode.OperationFailed, err.Error()), + } + } + + return &Client{ + addr: addr, + con: con, + cli: NewHubClient(con), + pool: p, + } +} diff --git a/common/pkgs/rpc/hub/pub_shards.go b/common/pkgs/rpc/hub/pub_shards.go new file mode 100644 index 0000000..1bdc408 --- /dev/null +++ b/common/pkgs/rpc/hub/pub_shards.go @@ -0,0 +1,127 @@ +package hubrpc + +import ( + context "context" + + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" + stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" +) + +type PubShardsService interface { + PubShardsStore(ctx context.Context, msg *PubShardsStore) (*PubShardsStoreResp, *rpc.CodeError) + PubShardsInfo(ctx context.Context, msg *PubShardsInfo) (*PubShardsInfoResp, *rpc.CodeError) + PubShardsGC(ctx context.Context, msg *PubShardsGC) (*PubShardsGCResp, *rpc.CodeError) + PubShardsListAll(ctx context.Context, msg *PubShardsListAll) (*PubShardsListAllResp, *rpc.CodeError) + PubShardsStats(ctx context.Context, msg *PubShardsStats) (*PubShardsStatsResp, *rpc.CodeError) +} + +// 将一个分片纳入公共分片存储 +type PubShardsStore struct { + // 公共分片存储的ID + PubShardsID jcstypes.PubShardsID + Password string + Path jcstypes.JPath + Size int64 + Hash jcstypes.FileHash +} +type PubShardsStoreResp struct { + Info stgtypes.FileInfo +} + +var _ = TokenAuth(Hub_PubShardsStore_FullMethodName) + +func (c *Client) PubShardsStore(ctx context.Context, msg *PubShardsStore) (*PubShardsStoreResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*PubShardsStoreResp](c.cli.PubShardsStore, ctx, msg) +} +func (s *Server) PubShardsStore(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.PubShardsStore, ctx, msg) +} + +// 获取指定分片的信息 +type PubShardsInfo struct { + PubShardsID jcstypes.PubShardsID + Password string + FileHash jcstypes.FileHash +} +type PubShardsInfoResp struct { + Info stgtypes.FileInfo +} + +var _ = TokenAuth(Hub_PubShardsInfo_FullMethodName) + +func (c *Client) PubShardsInfo(ctx context.Context, msg *PubShardsInfo) (*PubShardsInfoResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*PubShardsInfoResp](c.cli.PubShardsInfo, ctx, msg) +} +func (s *Server) PubShardsInfo(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.PubShardsInfo, ctx, msg) +} + +// 重置本Client的FileHash引用记录 +type PubShardsGC struct { + PubShardsID jcstypes.PubShardsID + Password string + // 新的FileHash引用记录 + FileHashes []jcstypes.FileHash +} +type PubShardsGCResp struct{} + +var _ = TokenAuth(Hub_PubShardsGC_FullMethodName) + +func (c *Client) PubShardsGC(ctx context.Context, msg *PubShardsGC) (*PubShardsGCResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*PubShardsGCResp](c.cli.PubShardsGC, ctx, msg) +} +func (s *Server) PubShardsGC(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.PubShardsGC, ctx, msg) +} + +// 获取所有分片的信息 +type PubShardsListAll struct { + PubShardsID jcstypes.PubShardsID + Password string +} +type PubShardsListAllResp struct { + Infos []stgtypes.FileInfo +} + +var _ = TokenAuth(Hub_PubShardsListAll_FullMethodName) + +func (c *Client) PubShardsListAll(ctx context.Context, msg *PubShardsListAll) (*PubShardsListAllResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*PubShardsListAllResp](c.cli.PubShardsListAll, ctx, msg) +} +func (s *Server) PubShardsListAll(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.PubShardsListAll, ctx, msg) +} + +// 获取统计信息 +type PubShardsStats struct { + PubShardsID jcstypes.PubShardsID + Password string +} +type PubShardsStatsResp struct { + Stats stgtypes.Stats +} + +var _ = TokenAuth(Hub_PubShardsStats_FullMethodName) + +func (c *Client) PubShardsStats(ctx context.Context, msg *PubShardsStats) (*PubShardsStatsResp, *rpc.CodeError) { + if c.fusedErr != nil { + return nil, c.fusedErr + } + return rpc.UnaryClient[*PubShardsStatsResp](c.cli.PubShardsStats, ctx, msg) +} +func (s *Server) PubShardsStats(ctx context.Context, msg *rpc.Request) (*rpc.Response, error) { + return rpc.UnaryServer(s.svrImpl.PubShardsStats, ctx, msg) +} diff --git a/common/pkgs/rpc/hub/server.go b/common/pkgs/rpc/hub/server.go index 89b6bf7..91afaeb 100644 --- a/common/pkgs/rpc/hub/server.go +++ b/common/pkgs/rpc/hub/server.go @@ -9,6 +9,7 @@ type HubAPI interface { IOSwitchSvc MicsSvc UserSvc + PubShardsService } type Server struct { diff --git a/common/pkgs/rpc/hub/user_space.go b/common/pkgs/rpc/hub/user_space.go deleted file mode 100644 index dbc50e5..0000000 --- a/common/pkgs/rpc/hub/user_space.go +++ /dev/null @@ -1,55 +0,0 @@ -package hubrpc - -/* -import ( - "context" - - jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" - "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" - stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" -) - -type UserSpaceSvc interface { - BaseStoreListAll(ctx context.Context, req *BaseStoreListAll) (*BaseStoreListAllResp, *rpc.CodeError) - BaseStoreMkdirs(ctx context.Context, req *BaseStoreMkdirs) (*BaseStoreMkdirsResp, *rpc.CodeError) -} - -// 列出指定BaseStore的指定位置内的所有文件 -type BaseStoreListAll struct { - UserSpace jcstypes.UserSpaceDetail - Path string -} -type BaseStoreListAllResp struct { - Entries []stgtypes.ListEntry -} - -func (c *Client) BaseStoreListAll(ctx context.Context, req *BaseStoreListAll) (*BaseStoreListAllResp, *rpc.CodeError) { - if c.fusedErr != nil { - return nil, c.fusedErr - } - return rpc.UnaryClient[*BaseStoreListAllResp](c.cli.BaseStoreListAll, ctx, req) -} -func (s *Server) BaseStoreListAll(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { - return rpc.UnaryServer(s.svrImpl.BaseStoreListAll, ctx, req) -} - -// 批量在指定BaseStore中创建文件夹 -type BaseStoreMkdirs struct { - UserSpace jcstypes.UserSpaceDetail - Pathes []string -} - -type BaseStoreMkdirsResp struct { - Successes []bool -} - -func (c *Client) BaseStoreMkdirs(ctx context.Context, req *BaseStoreMkdirs) (*BaseStoreMkdirsResp, *rpc.CodeError) { - if c.fusedErr != nil { - return nil, c.fusedErr - } - return rpc.UnaryClient[*BaseStoreMkdirsResp](c.cli.BaseStoreMkdirs, ctx, req) -} -func (s *Server) BaseStoreMkdirs(ctx context.Context, req *rpc.Request) (*rpc.Response, error) { - return rpc.UnaryServer(s.svrImpl.BaseStoreMkdirs, ctx, req) -} -*/ diff --git a/common/pkgs/rpc/utils.go b/common/pkgs/rpc/utils.go index ff8fbfb..47589c7 100644 --- a/common/pkgs/rpc/utils.go +++ b/common/pkgs/rpc/utils.go @@ -7,6 +7,7 @@ import ( "gitlink.org.cn/cloudream/common/consts/errorcode" "gitlink.org.cn/cloudream/common/utils/io2" "gitlink.org.cn/cloudream/common/utils/serder" + "gitlink.org.cn/cloudream/jcs-pub/common/ecode" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -243,9 +244,9 @@ func DownloadStreamServer[Resp DownloadStreamResp, Req any, APIRet DownloadStrea return nil } -func Failed(errCode string, format string, args ...any) *CodeError { +func Failed(errCode ecode.ErrorCode, format string, args ...any) *CodeError { return &CodeError{ - Code: errCode, + Code: string(errCode), Message: fmt.Sprintf(format, args...), } } @@ -282,7 +283,7 @@ func getCodeError(err error) *CodeError { return Failed(errorcode.OperationFailed, err.Error()) } -func MakeCodeError(code string, msg string) error { +func MakeCodeError(code ecode.ErrorCode, msg string) error { ce, _ := status.New(codes.Unknown, "custom error").WithDetails(Failed(code, msg)) return ce.Err() } diff --git a/common/pkgs/storage/all.go b/common/pkgs/storage/all.go index ce1107a..46c8646 100644 --- a/common/pkgs/storage/all.go +++ b/common/pkgs/storage/all.go @@ -5,4 +5,5 @@ import ( _ "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/efile" _ "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/local" _ "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/obs" + _ "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pubshards" ) diff --git a/common/pkgs/storage/pubshards/pubshards.go b/common/pkgs/storage/pubshards/pubshards.go new file mode 100644 index 0000000..f57e54a --- /dev/null +++ b/common/pkgs/storage/pubshards/pubshards.go @@ -0,0 +1,53 @@ +package pubshards + +import ( + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory/reg" + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" +) + +func init() { + reg.RegisterBuilder[*jcstypes.PubShardsType](newBuilder) +} + +type builder struct { + types.EmptyBuilder + detail *jcstypes.UserSpaceDetail +} + +func newBuilder(detail *jcstypes.UserSpaceDetail) types.StorageBuilder { + return &builder{ + detail: detail, + } +} + +func (b *builder) FeatureDesc() types.FeatureDesc { + return types.FeatureDesc{} +} + +func (b *builder) CreateShardStore(typeOnly bool) (types.ShardStore, error) { + stgType := b.detail.UserSpace.Storage.(*jcstypes.PubShardsType) + + if typeOnly { + return (*ShardStore)(nil), nil + } + + return NewShardStore(b.detail, stgType) +} + +func (b *builder) CreateBaseStore(typeOnly bool) (types.BaseStore, error) { + stgType := b.detail.UserSpace.Storage.(*jcstypes.PubShardsType) + newDeatil := jcstypes.UserSpaceDetail{ + UserID: b.detail.UserID, + UserSpace: jcstypes.UserSpace{ + Storage: stgType.Base, + Credential: b.detail.UserSpace.Credential, + ShardStore: b.detail.UserSpace.ShardStore, + Features: b.detail.UserSpace.Features, + WorkingDir: b.detail.UserSpace.WorkingDir, + }, + RecommendHub: b.detail.RecommendHub, + } + blder := reg.GetBuilderInternal(&newDeatil) + return blder.CreateBaseStore(typeOnly) +} diff --git a/common/pkgs/storage/pubshards/shard_store.go b/common/pkgs/storage/pubshards/shard_store.go new file mode 100644 index 0000000..a185e7f --- /dev/null +++ b/common/pkgs/storage/pubshards/shard_store.go @@ -0,0 +1,100 @@ +package pubshards + +import ( + "context" + "fmt" + + stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" + hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" +) + +type ShardStore struct { + detail *jcstypes.UserSpaceDetail + stgType *jcstypes.PubShardsType + hubCli *hubrpc.Client +} + +func NewShardStore(detail *jcstypes.UserSpaceDetail, stgType *jcstypes.PubShardsType) (*ShardStore, error) { + if stgglb.StandaloneMode { + return nil, fmt.Errorf("pub shards only support online mode") + } + + return &ShardStore{ + detail: detail, + stgType: stgType, + }, nil +} + +func (s *ShardStore) Start(ch *types.StorageEventChan) { + s.hubCli = stgglb.HubRPCPool.GetByID(s.stgType.MasterHub) +} + +func (s *ShardStore) Stop() { + if s.hubCli != nil { + s.hubCli.Release() + s.hubCli = nil + } +} + +func (s *ShardStore) Store(path jcstypes.JPath, hash jcstypes.FileHash, size int64) (types.FileInfo, error) { + resp, cerr := s.hubCli.PubShardsStore(context.Background(), &hubrpc.PubShardsStore{ + PubShardsID: s.stgType.PubShardsID, + Password: s.stgType.Password, + Path: path, + Hash: hash, + Size: size, + }) + if cerr != nil { + return types.FileInfo{}, cerr.ToError() + } + + return resp.Info, nil +} + +func (s *ShardStore) Info(hash jcstypes.FileHash) (types.FileInfo, error) { + resp, cerr := s.hubCli.PubShardsInfo(context.Background(), &hubrpc.PubShardsInfo{ + PubShardsID: s.stgType.PubShardsID, + Password: s.stgType.Password, + FileHash: hash, + }) + if cerr != nil { + return types.FileInfo{}, cerr.ToError() + } + return resp.Info, nil +} + +func (s *ShardStore) ListAll() ([]types.FileInfo, error) { + resp, cerr := s.hubCli.PubShardsListAll(context.Background(), &hubrpc.PubShardsListAll{ + PubShardsID: s.stgType.PubShardsID, + Password: s.stgType.Password, + }) + if cerr != nil { + return nil, cerr.ToError() + } + return resp.Infos, nil +} + +func (s *ShardStore) GC(avaiables []jcstypes.FileHash) error { + _, cerr := s.hubCli.PubShardsGC(context.Background(), &hubrpc.PubShardsGC{ + PubShardsID: s.stgType.PubShardsID, + Password: s.stgType.Password, + FileHashes: avaiables, + }) + if cerr != nil { + return cerr.ToError() + } + return nil +} + +func (s *ShardStore) Stats() types.Stats { + resp, cerr := s.hubCli.PubShardsStats(context.Background(), &hubrpc.PubShardsStats{ + PubShardsID: s.stgType.PubShardsID, + Password: s.stgType.Password, + }) + if cerr != nil { + return types.Stats{} + } + return resp.Stats +} diff --git a/common/types/client.go b/common/types/client.go index 24c88d4..649ab4f 100644 --- a/common/types/client.go +++ b/common/types/client.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "fmt" diff --git a/common/types/coordinator.go b/common/types/coordinator.go index a1117e3..01dae03 100644 --- a/common/types/coordinator.go +++ b/common/types/coordinator.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "fmt" @@ -14,6 +14,8 @@ type HubID int64 type UserID int64 +type PubShardsID string + type Hub struct { HubID HubID `gorm:"column:HubID; primaryKey; type:bigint; autoIncrement" json:"hubID"` Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` @@ -114,3 +116,35 @@ type LoadedAccessToken struct { func (LoadedAccessToken) TableName() string { return "LoadedAccessToken" } + +type PubShards struct { + PubShardsID PubShardsID `gorm:"column:PubShardsID; primaryKey; type:char(36); not null" json:"pubShardsID"` + // bcrypt哈希过的密码,带有盐值 + Password string `gorm:"column:Password; type:varchar(255); not null" json:"password"` + // 创建者的用户ID + Creator UserID `gorm:"column:Creator; type:bigint; not null" json:"creator"` + // 管理此公共分片存储的主节点 + MasterHub HubID `gorm:"column:MasterHub; type:bigint; not null" json:"masterHub"` + // 公共分片存储的名称,不要求唯一 + Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"` + // 用户空间所在的存储服务配置 + Storage StorageType `gorm:"column:Storage; type:json; not null; serializer:union" json:"storage"` + // 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等 + Credential StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"` + // 用户空间的分片存储配置 + ShardStore ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"` + // 存储服务特性功能的配置 + Features []StorageFeature `json:"features" gorm:"column:Features; type:json; serializer:union"` + // 各种组件保存数据的根目录。组件工作过程中都会以这个目录为根(除了BaseStore)。 + WorkingDir JPath `gorm:"column:WorkingDir; type:varchar(1024); not null; serializer:string" json:"workingDir"` + // 用户空间信息的版本号,每一次更改都需要更新版本号 + Revision int64 `gorm:"column:Revision; type:bigint; not null" json:"revision"` +} + +func (PubShards) TableName() string { + return "PublicShardStore" +} + +func (p *PubShards) String() string { + return fmt.Sprintf("%v(%v)", p.Name, p.PubShardsID) +} diff --git a/common/types/filehash.go b/common/types/filehash.go index 35cebc2..7eafa56 100644 --- a/common/types/filehash.go +++ b/common/types/filehash.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "crypto/sha256" diff --git a/common/types/location.go b/common/types/location.go index 2ef2ccb..ad007f7 100644 --- a/common/types/location.go +++ b/common/types/location.go @@ -1,4 +1,4 @@ -package types +package jcstypes type Location struct { StorageName string `json:"storageName"` diff --git a/common/types/path.go b/common/types/path.go index 2b234c7..339bf39 100644 --- a/common/types/path.go +++ b/common/types/path.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "path/filepath" diff --git a/common/types/redundancy.go b/common/types/redundancy.go index bddd112..31b07db 100644 --- a/common/types/redundancy.go +++ b/common/types/redundancy.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "github.com/samber/lo" diff --git a/common/types/space_syncer.go b/common/types/space_syncer.go index 12ed6f0..a5adba7 100644 --- a/common/types/space_syncer.go +++ b/common/types/space_syncer.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "time" diff --git a/common/types/status.go b/common/types/status.go index 5b9e2a8..ec72f8d 100644 --- a/common/types/status.go +++ b/common/types/status.go @@ -1,4 +1,4 @@ -package types +package jcstypes import "time" diff --git a/common/types/storage.go b/common/types/storage.go index 35390df..a2a69c8 100644 --- a/common/types/storage.go +++ b/common/types/storage.go @@ -1,35 +1,12 @@ -package types +package jcstypes import ( + "fmt" + "gitlink.org.cn/cloudream/common/pkgs/types" "gitlink.org.cn/cloudream/common/utils/serder" ) -/* -type Storage struct { - StorageID StorageID `json:"storageID" gorm:"column:StorageID; primaryKey; type:bigint; autoIncrement;"` - Name string `json:"name" gorm:"column:Name; type:varchar(256); not null"` - // 完全管理此存储服务的Hub的ID - MasterHub HubID `json:"masterHub" gorm:"column:MasterHub; type:bigint; not null"` - // 存储服务的类型,包含地址信息以及鉴权所需数据 - Type StorageType `json:"type" gorm:"column:Type; type:json; not null; serializer:union"` - // 存储服务拥有的特别功能 - Features []StorageFeature `json:"features" gorm:"column:Features; type:json; serializer:union"` -} - -func (Storage) TableName() string { - return "Storage" -} - -func (s *Storage) String() string { - return fmt.Sprintf("%v(%v)", s.Name, s.StorageID) -} - -type StorageDetail struct { - Storage Storage - MasterHub *Hub -} -*/ // 存储服务地址 type StorageType interface { GetStorageType() string @@ -47,6 +24,7 @@ var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[Storage (*COSType)(nil), (*EFileType)(nil), (*S3Type)(nil), + (*PubShardsType)(nil), )), "type") type LocalType struct { @@ -233,3 +211,32 @@ func (a *S3Type) Equals(other StorageType) bool { type ShardStoreUserConfig struct { MaxSize int64 `json:"maxSize"` } + +type PubShardsType struct { + serder.Metadata `union:"PubShards"` + Type string `json:"type"` + Base StorageType `json:"base"` + PubShardsID PubShardsID `json:"pubShardsID"` + Password string `json:"password"` + MasterHub HubID `json:"masterHub"` +} + +func (a *PubShardsType) GetStorageType() string { + return "PubShards" +} + +func (a *PubShardsType) GetLocation() Location { + return a.Base.GetLocation() +} + +func (a *PubShardsType) Equals(other StorageType) bool { + o, ok := other.(*PubShardsType) + if !ok { + return false + } + return a.Base.Equals(o.Base) +} + +func (a *PubShardsType) String() string { + return fmt.Sprintf("PubShards(%v)", a.Base.String()) +} diff --git a/common/types/storage_credential.go b/common/types/storage_credential.go index 2b4a16e..33a6677 100644 --- a/common/types/storage_credential.go +++ b/common/types/storage_credential.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "fmt" diff --git a/common/types/storage_feature.go b/common/types/storage_feature.go index 02b8f2b..6214b53 100644 --- a/common/types/storage_feature.go +++ b/common/types/storage_feature.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "gitlink.org.cn/cloudream/common/pkgs/types" diff --git a/common/types/utils.go b/common/types/utils.go index 408bbe0..69eea2e 100644 --- a/common/types/utils.go +++ b/common/types/utils.go @@ -1,4 +1,4 @@ -package types +package jcstypes import ( "strings" diff --git a/coordinator/internal/cmd/migrate.go b/coordinator/internal/cmd/migrate.go index 74df83c..c2246d3 100644 --- a/coordinator/internal/cmd/migrate.go +++ b/coordinator/internal/cmd/migrate.go @@ -44,6 +44,7 @@ func migrate(configPath string) { migrateOne(db, jcstypes.User{}) migrateOne(db, jcstypes.UserAccessToken{}) migrateOne(db, jcstypes.LoadedAccessToken{}) + migrateOne(db, jcstypes.PubShards{}) fmt.Println("migrate success") } diff --git a/coordinator/internal/db/db.go b/coordinator/internal/db/db.go index bd83200..243d8f1 100644 --- a/coordinator/internal/db/db.go +++ b/coordinator/internal/db/db.go @@ -28,6 +28,16 @@ func (db *DB) DoTx(do func(tx SQLContext) error) error { }) } +func DoTx01[R any](db *DB, do func(tx SQLContext) (R, error)) (R, error) { + var ret R + err := db.db.Transaction(func(tx *gorm.DB) error { + var err error + ret, err = do(SQLContext{tx}) + return err + }) + return ret, err +} + func DoTx02[R any](db *DB, do func(tx SQLContext) (R, error)) (R, error) { var ret R err := db.db.Transaction(func(tx *gorm.DB) error { diff --git a/coordinator/internal/db/pub_shards.go b/coordinator/internal/db/pub_shards.go new file mode 100644 index 0000000..24b2f27 --- /dev/null +++ b/coordinator/internal/db/pub_shards.go @@ -0,0 +1,24 @@ +package db + +import jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + +type PubShardsDB struct { + *DB +} + +func (db *DB) PubShards() *PubShardsDB { + return &PubShardsDB{DB: db} +} + +func (*PubShardsDB) Get(ctx SQLContext, publicShardStoreID jcstypes.PubShardsID) (*jcstypes.PubShards, error) { + var cfg jcstypes.PubShards + if err := ctx.First(&cfg, publicShardStoreID).Error; err != nil { + return nil, err + } + + return &cfg, nil +} + +func (*PubShardsDB) Create(ctx SQLContext, cfg jcstypes.PubShards) error { + return ctx.Create(&cfg).Error +} diff --git a/coordinator/internal/db/string_serializer.go b/coordinator/internal/db/string_serializer.go new file mode 100644 index 0000000..fa80ccd --- /dev/null +++ b/coordinator/internal/db/string_serializer.go @@ -0,0 +1,99 @@ +package db + +import ( + "context" + "fmt" + "reflect" + + "gorm.io/gorm/schema" +) + +// 必须给结构体(而不是指针)实现此接口。FromString实现为静态方法 +type StringDBValuer interface { + ToString() (string, error) + FromString(str string) (any, error) +} + +type StringSerializer struct { +} + +func (StringSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error { + if dbValue == nil { + fieldValue := reflect.New(field.FieldType) + field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem()) + return nil + } + + str := "" + switch v := dbValue.(type) { + case []byte: + str = string(v) + case string: + str = v + default: + return fmt.Errorf("expected []byte or string, got: %T", dbValue) + } + + if field.FieldType.Kind() == reflect.Struct { + val := reflect.Zero(field.FieldType) + + sv, ok := val.Interface().(StringDBValuer) + if !ok { + return fmt.Errorf("ref of field type %v is not StringDBValuer", field.FieldType) + } + + v2, err := sv.FromString(str) + if err != nil { + return err + } + + field.ReflectValueOf(ctx, dst).Set(reflect.ValueOf(v2)) + return nil + } + + if field.FieldType.Kind() == reflect.Ptr { + val := reflect.Zero(field.FieldType.Elem()) + + sv, ok := val.Interface().(StringDBValuer) + if !ok { + return fmt.Errorf("field type %v is not StringDBValuer", field.FieldType) + } + + v2, err := sv.FromString(str) + if err != nil { + return err + } + + field.ReflectValueOf(ctx, dst).Set(reflect.ValueOf(v2)) + return nil + } + + return fmt.Errorf("unsupported field type: %v", field.FieldType) +} + +func (StringSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) { + val := reflect.ValueOf(fieldValue) + if val.Kind() == reflect.Struct { + sv, ok := val.Interface().(StringDBValuer) + if !ok { + return nil, fmt.Errorf("ref of field type %v is not StringDBValuer", field.FieldType) + } + + return sv.ToString() + } + + if val.Kind() == reflect.Ptr { + sv, ok := val.Elem().Interface().(StringDBValuer) + if !ok { + return nil, fmt.Errorf("field type %v is not StringDBValuer", field.FieldType) + } + + return sv.ToString() + } + + return nil, fmt.Errorf("unsupported field type: %v", field.FieldType) +} + +func init() { + schema.RegisterSerializer("string", StringSerializer{}) +} diff --git a/coordinator/internal/rpc/pub_shards.go b/coordinator/internal/rpc/pub_shards.go new file mode 100644 index 0000000..3817ac6 --- /dev/null +++ b/coordinator/internal/rpc/pub_shards.go @@ -0,0 +1,100 @@ +package rpc + +import ( + "context" + "encoding/hex" + + "github.com/google/uuid" + "gitlink.org.cn/cloudream/jcs-pub/common/ecode" + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" + corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + "gitlink.org.cn/cloudream/jcs-pub/coordinator/internal/db" + "golang.org/x/crypto/bcrypt" + "gorm.io/gorm" +) + +func (svc *Service) CreatePubShards(ctx context.Context, msg *corrpc.CreatePubShards) (*corrpc.CreatePubShardsResp, *rpc.CodeError) { + authInfo, ok := rpc.GetAuthInfo(ctx) + if !ok { + return nil, rpc.Failed(ecode.Unauthorized, "unauthorized") + } + + passHash, err := bcrypt.GenerateFromPassword([]byte(msg.Password), bcrypt.DefaultCost) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "generate hash: %v", err) + } + + cfg, err := db.DoTx01(svc.db, func(tx db.SQLContext) (jcstypes.PubShards, error) { + _, err := svc.db.Hub().GetByID(tx, msg.MasterHub) + if err != nil { + return jcstypes.PubShards{}, err + } + + cfg := jcstypes.PubShards{ + PubShardsID: jcstypes.PubShardsID(uuid.NewString()), + Password: hex.EncodeToString(passHash), + Creator: authInfo.UserID, + MasterHub: msg.MasterHub, + Name: msg.Name, + Storage: msg.Storage, + Credential: msg.Credential, + ShardStore: msg.ShardStore, + Features: msg.Features, + WorkingDir: msg.WorkingDir, + Revision: 0, + } + + err = svc.db.PubShards().Create(svc.db.DefCtx(), cfg) + if err != nil { + return jcstypes.PubShards{}, err + } + + return cfg, nil + }) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "%v", err) + } + + return &corrpc.CreatePubShardsResp{PubShardStore: cfg}, nil +} + +func (svc *Service) HubLoadPubShards(ctx context.Context, msg *corrpc.HubLoadPubShards) (*corrpc.HubLoadPubShardsResp, *rpc.CodeError) { + pubShard, err := svc.db.PubShards().Get(svc.db.DefCtx(), msg.PubShardsID) + if err != nil { + if err == gorm.ErrRecordNotFound { + return nil, rpc.Failed(ecode.DataNotFound, "not found") + } + + return nil, rpc.Failed(ecode.OperationFailed, "get public shard store: %v", err) + } + + return &corrpc.HubLoadPubShardsResp{PubShards: *pubShard}, nil +} + +func (svc *Service) UserGetPubShards(ctx context.Context, msg *corrpc.UserGetPubShards) (*corrpc.UserGetPubShardsResp, *rpc.CodeError) { + pubShard, err := svc.db.PubShards().Get(svc.db.DefCtx(), msg.PubShardsID) + if err != nil { + if err == gorm.ErrRecordNotFound { + return nil, rpc.Failed(ecode.DataNotFound, "not found") + } + + return nil, rpc.Failed(ecode.OperationFailed, "get public shard store: %v", err) + } + + pass, err := hex.DecodeString(pubShard.Password) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "decode password: %v", err) + } + + if err := bcrypt.CompareHashAndPassword(pass, []byte(msg.Password)); err != nil { + return nil, rpc.Failed(ecode.Unauthorized, "invalid password") + } + + hub, err := svc.db.Hub().GetByID(svc.db.DefCtx(), pubShard.MasterHub) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "get master hub: %v", err) + } + + return &corrpc.UserGetPubShardsResp{PubShards: *pubShard, MasterHub: hub}, nil +} diff --git a/coordinator/internal/rpc/storage.go b/coordinator/internal/rpc/storage.go index 2b3c612..445257e 100644 --- a/coordinator/internal/rpc/storage.go +++ b/coordinator/internal/rpc/storage.go @@ -21,18 +21,22 @@ func (svc *Service) SelectStorageHub(ctx context.Context, msg *corrpc.SelectStor stgHubIDs := make([]jcstypes.HubID, 0, len(msg.Storages)) for _, stg := range msg.Storages { - stgLoc := stg.GetLocation() - var matchedHubID jcstypes.HubID - var matchedScore int - for _, loc := range allLoc { - sc := matchLocation(stgLoc, loc) - if sc > matchedScore { - matchedScore = sc - matchedHubID = loc.HubID + switch stg := stg.(type) { + case *jcstypes.PubShardsType: + matchedHubID = stg.MasterHub + + default: + stgLoc := stg.GetLocation() + var matchedScore int + for _, loc := range allLoc { + sc := matchLocation(stgLoc, loc) + if sc > matchedScore { + matchedScore = sc + matchedHubID = loc.HubID + } } } - stgHubIDs = append(stgHubIDs, matchedHubID) } diff --git a/go.mod b/go.mod index 53d293b..b1e4976 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( golang.org/x/term v0.32.0 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.36.6 - gorm.io/gorm v1.25.12 + gorm.io/gorm v1.30.0 ) require ( @@ -54,18 +54,23 @@ require ( github.com/cloudwego/base64x v0.1.4 // indirect github.com/cloudwego/iasm v0.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/fatih/color v1.18.0 // indirect github.com/gabriel-vasile/mimetype v1.4.6 // indirect + github.com/glebarez/go-sqlite v1.21.2 // indirect + github.com/glebarez/sqlite v1.11.0 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/mattn/go-tty v0.0.3 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/term v1.2.0-beta.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/tjfoc/gmsm v1.4.1 // indirect @@ -77,6 +82,11 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gorm.io/driver/sqlite v1.6.0 // indirect + modernc.org/libc v1.22.5 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/sqlite v1.23.1 // indirect ) require ( diff --git a/go.sum b/go.sum index 9f9c1f8..8095462 100644 --- a/go.sum +++ b/go.sum @@ -53,6 +53,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= @@ -64,6 +66,10 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= +github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= +github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= +github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= github.com/go-co-op/gocron/v2 v2.15.0 h1:Kpvo71VSihE+RImmpA+3ta5CcMhoRzMGw4dJawrj4zo= github.com/go-co-op/gocron/v2 v2.15.0/go.mod h1:ZF70ZwEqz0OO4RBXE1sNxnANy/zvwLcattWEFsqpKig= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= @@ -167,6 +173,8 @@ github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-tty v0.0.3 h1:5OfyWorkyO7xP52Mq7tB36ajHDG5OHrmBGIS/DtakQI= github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -188,6 +196,9 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -370,10 +381,22 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo= gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= +gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ= +gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8= gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= +gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs= +gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM= +modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk= nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/hub/internal/cmd/serve.go b/hub/internal/cmd/serve.go index 4beb60b..a4217f8 100644 --- a/hub/internal/cmd/serve.go +++ b/hub/internal/cmd/serve.go @@ -11,6 +11,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/accesstoken" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/http" + "gitlink.org.cn/cloudream/jcs-pub/hub/internal/pubshards" myrpc "gitlink.org.cn/cloudream/jcs-pub/hub/internal/rpc" "gitlink.org.cn/cloudream/common/pkgs/logger" @@ -124,13 +125,18 @@ func serve(configPath string, opts serveOptions) { tktk.Start() defer tktk.Stop() + // 共享分片存储 + shardsPool := pubshards.New(config.Cfg().PubShards, hubCfg.Hub.HubID) + shardsPool.Start() + defer shardsPool.Stop() + // 客户端访问令牌管理器 accToken := accesstoken.New(config.Cfg().ID) accTokenChan := accToken.Start() defer accToken.Stop() // RPC服务 - rpcSvr := hubrpc.NewServer(config.Cfg().RPC, myrpc.NewService(&worker, stgPool, accToken), accToken) + rpcSvr := hubrpc.NewServer(config.Cfg().RPC, myrpc.NewService(&worker, stgPool, accToken, shardsPool), accToken) rpcSvrChan := rpcSvr.Start() defer rpcSvr.Stop() diff --git a/hub/internal/config/config.go b/hub/internal/config/config.go index ce54af5..7debaa4 100644 --- a/hub/internal/config/config.go +++ b/hub/internal/config/config.go @@ -10,6 +10,7 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/sysevent" jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/http" + "gitlink.org.cn/cloudream/jcs-pub/hub/internal/pubshards" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/ticktock" ) @@ -23,6 +24,7 @@ type Config struct { Logger log.Config `json:"logger"` SysEvent sysevent.Config `json:"sysEvent"` TickTock ticktock.Config `json:"tickTock"` + PubShards pubshards.Config `json:"pubShards"` } var cfg Config diff --git a/hub/internal/pubshards/config.go b/hub/internal/pubshards/config.go new file mode 100644 index 0000000..ee706da --- /dev/null +++ b/hub/internal/pubshards/config.go @@ -0,0 +1,6 @@ +package pubshards + +type Config struct { + // 存放DB文件的目录 + DBDir string `json:"dbDir"` +} diff --git a/hub/internal/pubshards/pool.go b/hub/internal/pubshards/pool.go new file mode 100644 index 0000000..5a967e1 --- /dev/null +++ b/hub/internal/pubshards/pool.go @@ -0,0 +1,193 @@ +package pubshards + +import ( + "context" + "encoding/hex" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/glebarez/sqlite" + "gitlink.org.cn/cloudream/common/pkgs/async" + "gitlink.org.cn/cloudream/common/pkgs/logger" + stgglb "gitlink.org.cn/cloudream/jcs-pub/common/globals" + corrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/coordinator" + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/factory" + stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + "golang.org/x/crypto/bcrypt" + "gorm.io/gorm" +) + +type Pool struct { + cfg Config + localHubID jcstypes.HubID + stores map[jcstypes.PubShardsID]*LoadedStore + stgEventChan *stgtypes.StorageEventChan + done chan any + lock sync.Mutex +} + +func New(cfg Config, localHubID jcstypes.HubID) *Pool { + return &Pool{ + cfg: cfg, + localHubID: localHubID, + stores: make(map[jcstypes.PubShardsID]*LoadedStore), + stgEventChan: async.NewUnboundChannel[stgtypes.StorageEvent](), + done: make(chan any, 1), + } +} + +func (p *Pool) GetOrLoad(pubStoreID jcstypes.PubShardsID, password string) (*LoadedStore, error) { + log := logger.WithField("Mod", "PubShards") + + p.lock.Lock() + defer p.lock.Unlock() + + loaded := p.stores[pubStoreID] + if loaded == nil { + corCli := stgglb.CoordinatorRPCPool.Get() + defer corCli.Release() + + resp, cerr := corCli.UserGetPubShards(context.Background(), &corrpc.UserGetPubShards{ + PubShardsID: pubStoreID, + Password: password, + }) + if cerr != nil { + return nil, cerr.ToError() + } + + if resp.PubShards.MasterHub != p.localHubID { + return nil, fmt.Errorf("this hub is not the master hub of the public shard store") + } + + pwdHash, err := hex.DecodeString(resp.PubShards.Password) + if err != nil { + return nil, fmt.Errorf("decode password: %w", err) + } + + detail := jcstypes.UserSpaceDetail{ + UserSpace: jcstypes.UserSpace{ + Name: resp.PubShards.Name, + Storage: resp.PubShards.Storage, + Credential: resp.PubShards.Credential, + ShardStore: &resp.PubShards.ShardStore, + Features: resp.PubShards.Features, + WorkingDir: resp.PubShards.WorkingDir, + }, + RecommendHub: &resp.MasterHub, + } + + blder := factory.GetBuilder(&detail) + ss, err := blder.CreateShardStore(false) + if err != nil { + return nil, err + } + + err = os.MkdirAll(p.cfg.DBDir, 0755) + if err != nil { + return nil, err + } + + dbFilePath := filepath.Join(p.cfg.DBDir, fmt.Sprintf("%s.db", pubStoreID)) + db, err := gorm.Open(sqlite.Open(dbFilePath), &gorm.Config{}) + if err != nil { + return nil, err + } + err = db.AutoMigrate(FileEntry{}) + if err != nil { + return nil, err + } + + ss.Start(p.stgEventChan) + + loaded = &LoadedStore{ + ShardStore: ss, + Config: resp.PubShards, + PasswordHash: pwdHash, + ClientFileHashDB: db, + } + p.stores[pubStoreID] = loaded + + log.Infof("%v loaded", loaded.Config.String()) + + } else { + // 如果已经被加载,那么就要验证一下密码是否正确 + if bcrypt.CompareHashAndPassword(loaded.PasswordHash, []byte(password)) != nil { + return nil, fmt.Errorf("wrong password") + } + } + + return loaded, nil +} + +func (p *Pool) Start() { + go func() { + log := logger.WithField("Mod", "PubShards") + + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + gced := make(map[jcstypes.PubShardsID]bool) + + loop: + for { + select { + case <-ticker.C: + case <-p.done: + break loop + } + + // 凌晨5点开始GC + if time.Now().Hour() != 5 { + gced = make(map[jcstypes.PubShardsID]bool) + continue + } + + p.lock.Lock() + for pubStoreID, loaded := range p.stores { + if gced[pubStoreID] { + continue + } + + allHashes, err := loaded.GetAllHashes() + if err != nil { + log.Warnf("get all hashes of %v: %v", loaded.Config.String(), err) + continue + } + + err = loaded.ShardStore.GC(allHashes) + if err != nil { + log.Warnf("gc %v: %v", loaded.Config.String(), err) + continue + } + + gced[pubStoreID] = true + log.Infof("%v gc done", loaded.Config.String()) + } + p.lock.Unlock() + } + + p.lock.Lock() + for _, loaded := range p.stores { + loaded.ShardStore.Stop() + d, err := loaded.ClientFileHashDB.DB() + if err != nil { + log.Warnf("get sql db of %v: %v", loaded.Config.String(), err) + continue + } + d.Close() + } + p.stores = make(map[jcstypes.PubShardsID]*LoadedStore) + p.lock.Unlock() + }() +} + +func (p *Pool) Stop() { + select { + case p.done <- true: + default: + } +} diff --git a/hub/internal/pubshards/pub_shards.go b/hub/internal/pubshards/pub_shards.go new file mode 100644 index 0000000..2aeb68e --- /dev/null +++ b/hub/internal/pubshards/pub_shards.go @@ -0,0 +1,98 @@ +package pubshards + +import ( + "fmt" + + stgtypes "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/types" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + "gorm.io/gorm" + "gorm.io/gorm/clause" +) + +type LoadedStore struct { + ShardStore stgtypes.ShardStore + Config jcstypes.PubShards + PasswordHash []byte + ClientFileHashDB *gorm.DB +} + +func (s *LoadedStore) StoreShard(userID jcstypes.UserID, path jcstypes.JPath, hash jcstypes.FileHash, size int64) (stgtypes.FileInfo, error) { + info, err := s.ShardStore.Store(path, hash, size) + if err != nil { + return stgtypes.FileInfo{}, err + } + + err = s.ClientFileHashDB.Clauses(clause.Insert{Modifier: "or ignore"}).Create(&FileEntry{ + UserID: userID, + Path: info.Path, + Hash: hash, + Size: size, + }).Error + if err != nil { + return stgtypes.FileInfo{}, err + } + + return info, nil +} + +func (s *LoadedStore) InfoShard(hash jcstypes.FileHash) (stgtypes.FileInfo, error) { + return s.ShardStore.Info(hash) +} + +func (s *LoadedStore) ListUserAll(userID jcstypes.UserID) ([]stgtypes.FileInfo, error) { + var files []FileEntry + err := s.ClientFileHashDB.Table("Files").Where("UserID = ?", userID).Find(&files).Error + if err != nil { + return nil, err + } + + infos := make([]stgtypes.FileInfo, len(files)) + for i, file := range files { + infos[i] = stgtypes.FileInfo{ + Path: file.Path, + Size: file.Size, + Hash: file.Hash, + } + } + + return infos, nil +} + +func (s *LoadedStore) GC(userID jcstypes.UserID, fileHashes []jcstypes.FileHash) error { + return s.ClientFileHashDB.Transaction(func(tx *gorm.DB) error { + if err := tx.Delete(&FileEntry{}, "UserID = ?", userID).Error; err != nil { + return fmt.Errorf("delete all hashes: %w", err) + } + + hashes := make([]FileEntry, len(fileHashes)) + for i, hash := range fileHashes { + hashes[i] = FileEntry{ + UserID: userID, + Hash: hash, + } + } + + return tx.Clauses(clause.Insert{Modifier: "or ignore"}).Create(&hashes).Error + }) +} + +func (s *LoadedStore) GetUserStats(userID jcstypes.UserID) stgtypes.Stats { + // TODO 实现 + return stgtypes.Stats{} +} + +func (s *LoadedStore) GetAllHashes() ([]jcstypes.FileHash, error) { + var hashes []jcstypes.FileHash + return hashes, s.ClientFileHashDB.Distinct("FileHash").Find(&hashes).Error +} + +type FileEntry struct { + UserID jcstypes.UserID `gorm:"column:UserID; type:bigint; primaryKey; not null" json:"userID"` + Hash jcstypes.FileHash `gorm:"column:Hash; type:char(68); primaryKey; not null" json:"hash"` + Path jcstypes.JPath `gorm:"column:Path; type:varchar(1024); not null; serializer:string" json:"path"` + Size int64 `gorm:"column:Size; type:bigint; not null" json:"size"` +} + +func (t FileEntry) TableName() string { + return "Files" +} diff --git a/hub/internal/pubshards/string_serializer.go b/hub/internal/pubshards/string_serializer.go new file mode 100644 index 0000000..fcd2d75 --- /dev/null +++ b/hub/internal/pubshards/string_serializer.go @@ -0,0 +1,99 @@ +package pubshards + +import ( + "context" + "fmt" + "reflect" + + "gorm.io/gorm/schema" +) + +// 必须给结构体(而不是指针)实现此接口。FromString实现为静态方法 +type StringDBValuer interface { + ToString() (string, error) + FromString(str string) (any, error) +} + +type StringSerializer struct { +} + +func (StringSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error { + if dbValue == nil { + fieldValue := reflect.New(field.FieldType) + field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem()) + return nil + } + + str := "" + switch v := dbValue.(type) { + case []byte: + str = string(v) + case string: + str = v + default: + return fmt.Errorf("expected []byte or string, got: %T", dbValue) + } + + if field.FieldType.Kind() == reflect.Struct { + val := reflect.Zero(field.FieldType) + + sv, ok := val.Interface().(StringDBValuer) + if !ok { + return fmt.Errorf("ref of field type %v is not StringDBValuer", field.FieldType) + } + + v2, err := sv.FromString(str) + if err != nil { + return err + } + + field.ReflectValueOf(ctx, dst).Set(reflect.ValueOf(v2)) + return nil + } + + if field.FieldType.Kind() == reflect.Ptr { + val := reflect.Zero(field.FieldType.Elem()) + + sv, ok := val.Interface().(StringDBValuer) + if !ok { + return fmt.Errorf("field type %v is not StringDBValuer", field.FieldType) + } + + v2, err := sv.FromString(str) + if err != nil { + return err + } + + field.ReflectValueOf(ctx, dst).Set(reflect.ValueOf(v2)) + return nil + } + + return fmt.Errorf("unsupported field type: %v", field.FieldType) +} + +func (StringSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) { + val := reflect.ValueOf(fieldValue) + if val.Kind() == reflect.Struct { + sv, ok := val.Interface().(StringDBValuer) + if !ok { + return nil, fmt.Errorf("ref of field type %v is not StringDBValuer", field.FieldType) + } + + return sv.ToString() + } + + if val.Kind() == reflect.Ptr { + sv, ok := val.Elem().Interface().(StringDBValuer) + if !ok { + return nil, fmt.Errorf("field type %v is not StringDBValuer", field.FieldType) + } + + return sv.ToString() + } + + return nil, fmt.Errorf("unsupported field type: %v", field.FieldType) +} + +func init() { + schema.RegisterSerializer("string", StringSerializer{}) +} diff --git a/hub/internal/rpc/pub_shards.go b/hub/internal/rpc/pub_shards.go new file mode 100644 index 0000000..362991c --- /dev/null +++ b/hub/internal/rpc/pub_shards.go @@ -0,0 +1,104 @@ +package rpc + +import ( + "context" + + "gitlink.org.cn/cloudream/jcs-pub/common/ecode" + "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" + hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" +) + +func (svc *Service) PubShardsStore(ctx context.Context, msg *hubrpc.PubShardsStore) (*hubrpc.PubShardsStoreResp, *rpc.CodeError) { + authInfo, ok := rpc.GetAuthInfo(ctx) + if !ok { + return nil, rpc.Failed(ecode.Unauthorized, "unauthorized") + } + + pubShards, cerr := svc.pubShards.GetOrLoad(msg.PubShardsID, msg.Password) + if cerr != nil { + return nil, rpc.Failed(ecode.OperationFailed, "load pub shards store: %v", cerr) + } + + info, err := pubShards.StoreShard(authInfo.UserID, msg.Path, msg.Hash, msg.Size) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "store file: %v", err) + } + + return &hubrpc.PubShardsStoreResp{ + Info: info, + }, nil +} + +func (svc *Service) PubShardsInfo(ctx context.Context, msg *hubrpc.PubShardsInfo) (*hubrpc.PubShardsInfoResp, *rpc.CodeError) { + pubShards, err := svc.pubShards.GetOrLoad(msg.PubShardsID, msg.Password) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "load pub shards store: %v", err) + } + + info, err := pubShards.InfoShard(msg.FileHash) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, err.Error()) + } + + return &hubrpc.PubShardsInfoResp{ + Info: info, + }, nil +} + +func (svc *Service) PubShardsListAll(ctx context.Context, msg *hubrpc.PubShardsListAll) (*hubrpc.PubShardsListAllResp, *rpc.CodeError) { + authInfo, ok := rpc.GetAuthInfo(ctx) + if !ok { + return nil, rpc.Failed(ecode.Unauthorized, "unauthorized") + } + + pubShards, err := svc.pubShards.GetOrLoad(msg.PubShardsID, msg.Password) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "load pub shards store: %v", err) + } + + infos, err := pubShards.ListUserAll(authInfo.UserID) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "list all: %v", err) + } + + return &hubrpc.PubShardsListAllResp{ + Infos: infos, + }, nil +} + +func (svc *Service) PubShardsGC(ctx context.Context, msg *hubrpc.PubShardsGC) (*hubrpc.PubShardsGCResp, *rpc.CodeError) { + authInfo, ok := rpc.GetAuthInfo(ctx) + if !ok { + return nil, rpc.Failed(ecode.Unauthorized, "unauthorized") + } + + pubShards, err := svc.pubShards.GetOrLoad(msg.PubShardsID, msg.Password) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "load pub shards store: %v", err) + } + + err = pubShards.GC(authInfo.UserID, msg.FileHashes) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "reset hashes: %v", err) + } + + return &hubrpc.PubShardsGCResp{}, nil +} + +func (svc *Service) PubShardsStats(ctx context.Context, msg *hubrpc.PubShardsStats) (*hubrpc.PubShardsStatsResp, *rpc.CodeError) { + authInfo, ok := rpc.GetAuthInfo(ctx) + if !ok { + return nil, rpc.Failed(ecode.Unauthorized, "unauthorized") + } + + pubShards, err := svc.pubShards.GetOrLoad(msg.PubShardsID, msg.Password) + if err != nil { + return nil, rpc.Failed(ecode.OperationFailed, "load pub shards store: %v", err) + } + + stats := pubShards.GetUserStats(authInfo.UserID) + + return &hubrpc.PubShardsStatsResp{ + Stats: stats, + }, nil +} diff --git a/hub/internal/rpc/rpc.go b/hub/internal/rpc/rpc.go index aaf4616..3dd5c1f 100644 --- a/hub/internal/rpc/rpc.go +++ b/hub/internal/rpc/rpc.go @@ -5,19 +5,22 @@ import ( hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/storage/pool" "gitlink.org.cn/cloudream/jcs-pub/hub/internal/accesstoken" + "gitlink.org.cn/cloudream/jcs-pub/hub/internal/pubshards" ) type Service struct { swWorker *exec.Worker stgPool *pool.Pool accessToken *accesstoken.Cache + pubShards *pubshards.Pool } -func NewService(swWorker *exec.Worker, stgPool *pool.Pool, accessToken *accesstoken.Cache) *Service { +func NewService(swWorker *exec.Worker, stgPool *pool.Pool, accessToken *accesstoken.Cache, pubShards *pubshards.Pool) *Service { return &Service{ swWorker: swWorker, stgPool: stgPool, accessToken: accessToken, + pubShards: pubShards, } } diff --git a/hub/internal/rpc/user_space.go b/hub/internal/rpc/user_space.go deleted file mode 100644 index f86f222..0000000 --- a/hub/internal/rpc/user_space.go +++ /dev/null @@ -1,49 +0,0 @@ -package rpc - -/* -import ( - "context" - - "gitlink.org.cn/cloudream/common/consts/errorcode" - "gitlink.org.cn/cloudream/common/pkgs/logger" - "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc" - hubrpc "gitlink.org.cn/cloudream/jcs-pub/common/pkgs/rpc/hub" -) - -func (svc *Service) BaseStoreListAll(context context.Context, msg *hubrpc.BaseStoreListAll) (*hubrpc.BaseStoreListAllResp, *rpc.CodeError) { - pub, err := svc.stgPool.GetBaseStore(&msg.UserSpace) - if err != nil { - return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) - } - - es, err := pub.ListAll(msg.Path) - if err != nil { - return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) - } - - return &hubrpc.BaseStoreListAllResp{ - Entries: es, - }, nil -} - -func (svc *Service) BaseStoreMkdirs(context context.Context, msg *hubrpc.BaseStoreMkdirs) (*hubrpc.BaseStoreMkdirsResp, *rpc.CodeError) { - pub, err := svc.stgPool.GetBaseStore(&msg.UserSpace) - if err != nil { - return nil, rpc.Failed(errorcode.OperationFailed, err.Error()) - } - - var suc []bool - for _, p := range msg.Pathes { - if err := pub.Mkdir(p); err != nil { - suc = append(suc, false) - logger.Warnf("userspace %v mkdir %s: %v", msg.UserSpace, p, err) - } else { - suc = append(suc, true) - } - } - - return &hubrpc.BaseStoreMkdirsResp{ - Successes: suc, - }, nil -} -*/ diff --git a/jcsctl/cmd/all/all.go b/jcsctl/cmd/all/all.go index da6dbaf..8aa9b87 100644 --- a/jcsctl/cmd/all/all.go +++ b/jcsctl/cmd/all/all.go @@ -8,6 +8,7 @@ import ( _ "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/getp" _ "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/ls" _ "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/package" + _ "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/pubshards" _ "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/puto" _ "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/putp" _ "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/userspace" diff --git a/jcsctl/cmd/pubshards/create.go b/jcsctl/cmd/pubshards/create.go new file mode 100644 index 0000000..fb4072a --- /dev/null +++ b/jcsctl/cmd/pubshards/create.go @@ -0,0 +1,105 @@ +package pubshards + +import ( + "fmt" + + "github.com/chzyer/readline" + "github.com/spf13/cobra" + cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" + "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd/userspace" +) + +func init() { + var opt createOption + c := &cobra.Command{ + Use: "create", + Short: "create a new pubshards", + Args: cobra.NoArgs, + RunE: func(c *cobra.Command, args []string) error { + ctx := cmd.GetCmdCtx(c) + return create(c, ctx, opt, args) + }, + } + PubShardsCmd.AddCommand(c) +} + +type createOption struct { +} + +func create(c *cobra.Command, ctx *cmd.CommandContext, opt createOption, args []string) error { + rl, err := readline.New("> ") + if err != nil { + return err + } + defer rl.Close() + + var blder userspace.UserSpaceBuilder + + name := cmd.ReadString(rl, "请输入共享分片存储名称") + + stgTypes := []string{"Local", "OBS", "OSS", "COS", "EFile", "S3"} + idx := cmd.SelectOne(rl, "请选择共享分片存储的存储服务类型", stgTypes) + switch stgTypes[idx] { + case "Local": + blder.AskLocal(rl) + case "OBS": + blder.AskOBS(rl) + case "OSS": + blder.AskOSS(rl) + case "COS": + blder.AskCOS(rl) + case "EFile": + blder.AskEFile(rl) + case "S3": + blder.AskS3(rl) + } + + blder.AskShardStore(rl, true) + + blder.AskWorkingDir(rl) + + hubID := cmd.ReadInt(rl, "请输入管理共享分片存储的工作组件ID") + + var pwd string + for { + pwd = string(cmd.ReadPassword(rl, "请输入共享分片存储的密码")) + pwd2 := string(cmd.ReadPassword(rl, "请再次输入共享分片存储的密码")) + if pwd == pwd2 { + break + } + fmt.Println("两次输入的密码不一致,请重新输入") + } + + resp, err := ctx.Client.PubShards().Create(cliapi.PubShardsCreate{ + Name: name, + Storage: blder.Storage, + Credential: blder.Credential, + ShardStore: *blder.ShardStore, + Features: blder.Features, + WorkingDir: blder.WorkingDir, + Password: pwd, + MasterHub: jcstypes.HubID(hubID), + }) + if err != nil { + return err + } + + fmt.Printf("创建共享分片存储成功, ID为: %v\n可将此ID和密码分发给其他人以加入共享分片存储\n", resp.PubShards.PubShardsID) + if !cmd.ReadBool(rl, "是否要现在加入共享分片存储") { + return nil + } + + usName := cmd.ReadString(rl, "请输入新用户空间的名称") + _, err = ctx.Client.PubShards().Join(cliapi.PubShardsJoin{ + Name: usName, + PubShardsID: resp.PubShards.PubShardsID, + Password: pwd, + }) + if err != nil { + return err + } + + return nil +} diff --git a/jcsctl/cmd/pubshards/join.go b/jcsctl/cmd/pubshards/join.go new file mode 100644 index 0000000..d8db937 --- /dev/null +++ b/jcsctl/cmd/pubshards/join.go @@ -0,0 +1,56 @@ +package pubshards + +import ( + "fmt" + + "github.com/chzyer/readline" + "github.com/spf13/cobra" + cliapi "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" + jcstypes "gitlink.org.cn/cloudream/jcs-pub/common/types" + "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" +) + +func init() { + var opt joinOption + c := &cobra.Command{ + Use: "join ", + Short: "join to a pubshards", + Args: cobra.ExactArgs(1), + RunE: func(c *cobra.Command, args []string) error { + ctx := cmd.GetCmdCtx(c) + return join(c, ctx, opt, args) + }, + } + c.Flags().StringVarP(&opt.Password, "p", "p", "", "password for the pubshards") + PubShardsCmd.AddCommand(c) +} + +type joinOption struct { + Password string +} + +func join(c *cobra.Command, ctx *cmd.CommandContext, opt joinOption, args []string) error { + rl, err := readline.New("> ") + if err != nil { + return err + } + defer rl.Close() + + if !c.Flags().Changed("p") { + opt.Password = string(cmd.ReadPassword(rl, "Password")) + } + + usName := cmd.ReadString(rl, "请输入新用户空间的名称") + + resp, err := ctx.Client.PubShards().Join(cliapi.PubShardsJoin{ + Name: usName, + PubShardsID: jcstypes.PubShardsID(args[0]), + Password: opt.Password, + }) + if err != nil { + return err + } + + fmt.Printf("加入成功, 用户空间配置ID为: %v\n", resp.UserSpace.UserSpaceID) + return nil +} diff --git a/jcsctl/cmd/pubshards/pubshards.go b/jcsctl/cmd/pubshards/pubshards.go new file mode 100644 index 0000000..e31bfac --- /dev/null +++ b/jcsctl/cmd/pubshards/pubshards.go @@ -0,0 +1,15 @@ +package pubshards + +import ( + "github.com/spf13/cobra" + "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" +) + +var PubShardsCmd = &cobra.Command{ + Use: "pubshards", + Aliases: []string{"pbs"}, +} + +func init() { + cmd.RootCmd.AddCommand(PubShardsCmd) +} diff --git a/jcsctl/cmd/userspace/create.go b/jcsctl/cmd/userspace/create.go index b04d983..e402f41 100644 --- a/jcsctl/cmd/userspace/create.go +++ b/jcsctl/cmd/userspace/create.go @@ -2,10 +2,9 @@ package userspace import ( "fmt" - "strconv" - "strings" "github.com/chzyer/readline" + "github.com/inhies/go-bytesize" "github.com/spf13/cobra" "gitlink.org.cn/cloudream/jcs-pub/client/sdk/api/v1" @@ -13,10 +12,6 @@ import ( "gitlink.org.cn/cloudream/jcs-pub/jcsctl/cmd" ) -type MyUserSpace struct { - api.UserSpaceCreate -} - func init() { cmd := cobra.Command{ Use: "create", @@ -30,7 +25,6 @@ func init() { } func create(c *cobra.Command, ctx *cmd.CommandContext) { - var userSpace MyUserSpace rl, err := readline.New("> ") if err != nil { @@ -39,47 +33,35 @@ func create(c *cobra.Command, ctx *cmd.CommandContext) { } defer rl.Close() - rl.SetPrompt("\033[36m请输入存储服务名称(Name): \033[0m") - name, err := rl.Readline() - if err != nil { - return - } - userSpace.Name = name + var blder UserSpaceBuilder - storageType, err := promptSelectStorage() - if err != nil { - return - } + blder.Name = cmd.ReadString(rl, "请输入存储服务名称(Name)") - switch storageType { + stgTypes := []string{"Local", "OBS", "OSS", "COS", "EFile", "S3", "PubShards"} + idx := cmd.SelectOne(rl, "请选择存储类型(StorageType)", stgTypes) + + switch stgTypes[idx] { case "Local": - err = userSpace.collectLocalConfig(rl) + blder.AskLocal(rl) case "OBS": - err = userSpace.collectObsConfig(rl) + blder.AskOBS(rl) case "OSS": - err = userSpace.collectOssConfig(rl) + blder.AskOSS(rl) case "COS": - err = userSpace.collectCosConfig(rl) + blder.AskCOS(rl) case "EFile": - err = userSpace.collectEfileConfig(rl) + blder.AskEFile(rl) case "S3": - err = userSpace.collectS3Config(rl) - } - if err != nil { - return + blder.AskS3(rl) + case "PubShards": + blder.AskPubShards(rl) } - err = userSpace.collectShardStore(rl) - if err != nil { - return - } + blder.AskShardStore(rl, false) - err = userSpace.collectWorkingDir(rl) - if err != nil { - return - } + blder.AskWorkingDir(rl) - _, err = ctx.Client.UserSpace().Create(userSpace.UserSpaceCreate) + _, err = ctx.Client.UserSpace().Create(blder.UserSpaceCreate) if err != nil { fmt.Printf("\033[31m保存配置失败: %v\033[0m", err) return @@ -87,477 +69,134 @@ func create(c *cobra.Command, ctx *cmd.CommandContext) { fmt.Println("\033[32m配置保存成功!\033[0m") } -func promptSelectStorage() (string, error) { - rl, _ := readline.NewEx(&readline.Config{ - Prompt: "\033[36m»\033[0m ", - HistoryFile: "/tmp/storage_history.txt", - InterruptPrompt: "^C", - }) - defer rl.Close() - - fmt.Println("\033[1;36m请选择存储类型(StorageType):\033[0m") - options := []string{"Local", "OBS", "OSS", "COS", "EFile", "S3"} - for i, option := range options { - fmt.Printf("\033[33m%d. %s\033[0m\n", i+1, option) - } - - for { - line, err := rl.Readline() - if err != nil { - return "", err - } - - trimmed := strings.TrimSpace(line) - switch trimmed { - case "1": - return "Local", nil - case "2": - return "OBS", nil - case "3": - return "OSS", nil - case "4": - return "COS", nil - case "5": - return "EFile", nil - case "6": - return "S3", nil - default: - fmt.Printf("\033[31m错误: 无效选项 '%s',请输入序号!\033[0m\n", line) - } - } +type UserSpaceBuilder struct { + api.UserSpaceCreate } -func (userSpace *MyUserSpace) collectLocalConfig(rl *readline.Instance) error { - var err error - rl.SetPrompt("\033[36m请输入StorageName: \033[0m") - storageName, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Location: \033[0m") - location, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入RootDir: \033[0m") - rootDir, err := rl.Readline() - if err != nil { - return err - } - - userSpace.Storage = &jcstypes.LocalType{ +func (b *UserSpaceBuilder) AskLocal(rl *readline.Instance) { + b.Storage = &jcstypes.LocalType{ Type: "Local", Location: jcstypes.Location{ - StorageName: storageName, - Location: location, + StorageName: cmd.ReadString(rl, "StorageName"), + Location: cmd.ReadString(rl, "Location"), }, } - userSpace.Credential = &jcstypes.LocalCred{ + b.Credential = &jcstypes.LocalCred{ Type: "Local", - RootDir: rootDir, + RootDir: cmd.ReadString(rl, "RootDir"), } - return nil } -func (userSpace *MyUserSpace) collectObsConfig(rl *readline.Instance) error { - var err error - rl.SetPrompt("\033[36m请输入Region: \033[0m") - region, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Endpoint: \033[0m") - endpoint, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Bucket: \033[0m") - bucket, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入ProjectID: \033[0m") - projectID, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入AccessKeyID: \033[0m") - accessKey, err := rl.Readline() - if err != nil { - return err - } - - secretBytes, err := rl.ReadPassword("\033[36m请输入AccessKeySecret: \033[0m") - if err != nil { - return err - } - secretKey := string(secretBytes) - - userSpace.Storage = &jcstypes.OBSType{ +func (b *UserSpaceBuilder) AskOBS(rl *readline.Instance) { + b.Storage = &jcstypes.OBSType{ Type: "OBS", - Region: region, - Endpoint: endpoint, - Bucket: bucket, - ProjectID: projectID, + Region: cmd.ReadString(rl, "Region"), + Endpoint: cmd.ReadString(rl, "Endpoint"), + Bucket: cmd.ReadString(rl, "Bucket"), + ProjectID: cmd.ReadString(rl, "ProjectID"), } - userSpace.Credential = &jcstypes.OBSCred{ + b.Credential = &jcstypes.OBSCred{ Type: "OBS", - AK: accessKey, - SK: secretKey, + AK: cmd.ReadString(rl, "AccessKeyID"), + SK: string(cmd.ReadPassword(rl, "AccessKeySecret")), } - for { - rl.SetPrompt("\033[36m是否支持存储服务间直传文件?(y/n): \033[0m") - input, err := rl.Readline() - if err != nil { - return err - } - - switch strings.ToLower(strings.TrimSpace(input)) { - case "y", "yes": - userSpace.Features = append(userSpace.Features, &jcstypes.S2STransferFeature{ - Type: "S2STransfer", - }) - return nil - case "n", "no": - fmt.Println("\033[36m不支持存储服务间直传文件 \033[0m") - return nil - default: - fmt.Println("\033[31m无效输入!请输入 y/n 或 yes/no \033[0m") - } + if cmd.ReadBool(rl, "是否支持存储服务间直传文件?") { + b.Features = append(b.Features, &jcstypes.S2STransferFeature{ + Type: "S2STransfer", + }) } } -func (userSpace *MyUserSpace) collectOssConfig(rl *readline.Instance) error { - var err error - rl.SetPrompt("\033[36m请输入Region: \033[0m") - region, err := rl.Readline() - if err != nil { - return err +func (b *UserSpaceBuilder) AskOSS(rl *readline.Instance) { + b.Storage = &jcstypes.OSSType{ + Type: "OSS", + Region: cmd.ReadString(rl, "Region"), + Endpoint: cmd.ReadString(rl, "Endpoint"), + Bucket: cmd.ReadString(rl, "Bucket"), } - - rl.SetPrompt("\033[36m请输入Endpoint: \033[0m") - endpoint, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Bucket: \033[0m") - bucket, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入AccessKeyID: \033[0m") - accessKey, err := rl.Readline() - if err != nil { - return err - } - - secretBytes, err := rl.ReadPassword("\033[36m请输入AccessKeySecret: \033[0m") - if err != nil { - return err - } - secretKey := string(secretBytes) - - userSpace.Storage = &jcstypes.OSSType{ - Region: region, - Endpoint: endpoint, - Bucket: bucket, - } - userSpace.Credential = &jcstypes.OSSCred{ + b.Credential = &jcstypes.OSSCred{ Type: "OSS", - AK: accessKey, - SK: secretKey, + AK: cmd.ReadString(rl, "AccessKeyID"), + SK: string(cmd.ReadPassword(rl, "SecretAccessKey")), } - return nil } -func (userSpace *MyUserSpace) collectCosConfig(rl *readline.Instance) error { - var err error - rl.SetPrompt("\033[36m请输入Region: \033[0m") - region, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Endpoint: \033[0m") - endpoint, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Bucket: \033[0m") - bucket, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入AccessKeyID: \033[0m") - accessKey, err := rl.Readline() - if err != nil { - return err - } - - secretBytes, err := rl.ReadPassword("\033[36m请输入AccessKeySecret: \033[0m") - if err != nil { - return err - } - secretKey := string(secretBytes) - - userSpace.Storage = &jcstypes.COSType{ +func (b *UserSpaceBuilder) AskCOS(rl *readline.Instance) { + b.Storage = &jcstypes.COSType{ Type: "COS", - Region: region, - Endpoint: endpoint, - Bucket: bucket, + Region: cmd.ReadString(rl, "Region"), + Endpoint: cmd.ReadString(rl, "Endpoint"), + Bucket: cmd.ReadString(rl, "Bucket"), } - userSpace.Credential = &jcstypes.COSCred{ + b.Credential = &jcstypes.COSCred{ Type: "COS", - AK: accessKey, - SK: secretKey, + AK: cmd.ReadString(rl, "AccessKeyID"), + SK: string(cmd.ReadPassword(rl, "SecretAccessKey")), } - return nil } -func (userSpace *MyUserSpace) collectEfileConfig(rl *readline.Instance) error { - var err error - rl.SetPrompt("\033[36m请输入ClusterID: \033[0m") - clusterID, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入TokenURL: \033[0m") - tokenURL, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入APIURL: \033[0m") - apiURL, err := rl.Readline() - if err != nil { - return err - } - - tokenExpire := 0 - for { - rl.SetPrompt("\033[36m请输入TokenExpire: \033[0m") - valueInt, err := rl.Readline() - if err != nil { - return err - } - if strings.TrimSpace(valueInt) == "" { - fmt.Println("\033[31m错误:输入不能为空,请输入正整数\033[0m") - continue - } - num, err := strconv.ParseInt(valueInt, 10, 64) - if err != nil { - fmt.Printf("\033[31m错误:'%s' 不是有效整数,请输入正整数\033[0m\n", valueInt) - continue - } - if num <= 0 { - fmt.Printf("\033[31m错误:%d 不是正整数,请输入大于 0 的整数\033[0m\n", num) - continue - } - tokenExpire = int(num) - break - } - - rl.SetPrompt("\033[36m请输入User: \033[0m") - user, err := rl.Readline() - if err != nil { - return err - } - - passwordBytes, err := rl.ReadPassword("\033[36m请输入Password: \033[0m") - if err != nil { - return err - } - password := string(passwordBytes) - - rl.SetPrompt("\033[36m请输入OrgID: \033[0m") - orgID, err := rl.Readline() - if err != nil { - return err - } - - userSpace.Storage = &jcstypes.EFileType{ +func (b *UserSpaceBuilder) AskEFile(rl *readline.Instance) { + b.Storage = &jcstypes.EFileType{ Type: "EFile", - ClusterID: clusterID, + ClusterID: cmd.ReadString(rl, "ClusterID"), } - userSpace.Credential = &jcstypes.EFileCred{ + b.Credential = &jcstypes.EFileCred{ Type: "EFile", - TokenURL: tokenURL, - APIURL: apiURL, - TokenExpire: tokenExpire, - User: user, - Password: password, - OrgID: orgID, + TokenURL: cmd.ReadString(rl, "TokenURL"), + APIURL: cmd.ReadString(rl, "APIURL"), + TokenExpire: cmd.ReadIntRanged(rl, "TokenExpire", 60, 3600), + User: cmd.ReadString(rl, "User"), + Password: string(cmd.ReadPassword(rl, "Password")), + OrgID: cmd.ReadString(rl, "OrgID"), } - for { - rl.SetPrompt("\033[36m是否提供能进行EC计算的接口?(y/n): \033[0m") - input, err := rl.Readline() - if err != nil { - return err - } - - switch strings.ToLower(strings.TrimSpace(input)) { - case "y", "yes": - userSpace.Features = append(userSpace.Features, &jcstypes.ECMultiplierFeature{ - Type: "ECMultiplier", - }) - return nil - case "n", "no": - fmt.Println("\033[36m未提供能进行EC计算的接口 \033[0m") - return nil - default: - fmt.Println("\033[31m无效输入!请输入 y/n 或 yes/no \033[0m") - } + if cmd.ReadBool(rl, "是否提供能进行EC计算的接口?") { + b.Features = append(b.Features, &jcstypes.ECMultiplierFeature{ + Type: "ECMultiplier", + }) } } -func (userSpace *MyUserSpace) collectS3Config(rl *readline.Instance) error { - var err error - rl.SetPrompt("\033[36m请输入Region: \033[0m") - region, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Endpoint: \033[0m") - endpoint, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入Bucket: \033[0m") - bucket, err := rl.Readline() - if err != nil { - return err - } - - rl.SetPrompt("\033[36m请输入AccessKeyID: \033[0m") - accessKey, err := rl.Readline() - if err != nil { - return err - } - - secretBytes, err := rl.ReadPassword("\033[36m请输入AccessKeySecret: \033[0m") - if err != nil { - return err - } - secretKey := string(secretBytes) - - userSpace.Storage = &jcstypes.S3Type{ +func (b *UserSpaceBuilder) AskS3(rl *readline.Instance) { + b.Storage = &jcstypes.S3Type{ Type: "S3", - Region: region, - Endpoint: endpoint, - Bucket: bucket, + Region: cmd.ReadString(rl, "Region"), + Endpoint: cmd.ReadString(rl, "Endpoint"), + Bucket: cmd.ReadString(rl, "Bucket"), } - userSpace.Credential = &jcstypes.S3Cred{ + b.Credential = &jcstypes.S3Cred{ Type: "S3", - AK: accessKey, - SK: secretKey, + AK: cmd.ReadString(rl, "AccessKeyID"), + SK: string(cmd.ReadPassword(rl, "SecretAccessKey")), } - for { - rl.SetPrompt("\033[36m是否支持分段上传?(y/n): \033[0m") - input, err := rl.Readline() - if err != nil { - return err - } - - switch strings.ToLower(strings.TrimSpace(input)) { - case "y", "yes": - userSpace.Features = append(userSpace.Features, &jcstypes.MultipartUploadFeature{ - Type: "MultipartUpload", - }) - return nil - case "n", "no": - fmt.Println("\033[36m不支持分段上传 \033[0m") - return nil - default: - fmt.Println("\033[31m无效输入!请输入 y/n 或 yes/no \033[0m") - } + if cmd.ReadBool(rl, "是否支持分段上传?") { + b.Features = append(b.Features, &jcstypes.MultipartUploadFeature{ + Type: "MultipartUpload", + }) } } -func (userSpace *MyUserSpace) collectShardStore(rl *readline.Instance) error { - for { - rl.SetPrompt("\033[36m是否开启分片存储功能?(y/n): \033[0m") - input, err := rl.Readline() - if err != nil { - return err - } - - switch strings.ToLower(strings.TrimSpace(input)) { - case "y", "yes": - for { - rl.SetPrompt("\033[36m请输入最大Size: \033[0m") - sizeInput, err := rl.Readline() - if err != nil { - return err - } - if strings.TrimSpace(sizeInput) == "" { - fmt.Println("\033[31m错误:输入不能为空31m错误:输入不能为空,请输入正整数\033[0m") - continue - } - - maxSize, err := strconv.ParseInt(sizeInput, 10, 64) - if err != nil { - fmt.Printf("\033[31m错误:'%s' 不是有效整数,请输入正整数\033[0m\n", sizeInput) - continue - } - if maxSize <= 0 { - fmt.Printf("\033[31m错误:%d 不是正整数,请输入大于 0 的整数\033[0m\n", maxSize) - continue - } - userSpace.ShardStore = &jcstypes.ShardStoreUserConfig{ - MaxSize: maxSize, - } - return nil - } - - case "n", "no": - fmt.Println("\033[31m分片存储未启用 \033[0m") - return nil - default: - fmt.Println("\033[31m无效输入!请输入 y/n 或 yes/no \033[0m") - } +func (u *UserSpaceBuilder) AskPubShards(rl *readline.Instance) { + u.Storage = &jcstypes.PubShardsType{ + PubShardsID: jcstypes.PubShardsID(cmd.ReadString(rl, "共享分片存储ID")), + Password: string(cmd.ReadPassword(rl, "共享分片存储密码")), } } -func (userSpace *MyUserSpace) collectWorkingDir(rl *readline.Instance) error { - for { - rl.SetPrompt("\033[36m默认工作路径(WorkingDir)为jcs,是否修改?(y/n): \033[0m") - input, err := rl.Readline() - if err != nil { - return err - } +func (b *UserSpaceBuilder) AskShardStore(rl *readline.Instance, must bool) { + if !must && !cmd.ReadBool(rl, "是否开启分片存储功能?") { + return + } - switch strings.ToLower(strings.TrimSpace(input)) { - case "y", "yes": - rl.SetPrompt("\033[36m请输入新的工作路径(WorkingDir): \033[0m") - newValue, err := rl.Readline() - if err != nil { - return err - } - if newValue != "" { - userSpace.WorkingDir = newValue - } - return nil - case "n", "no": - userSpace.WorkingDir = "jcs" - return nil - default: - fmt.Println("\033[31m无效输入!请输入 y/n 或 yes/no \033[0m") - } + var size bytesize.ByteSize + cmd.ReadStringValue(rl, "分片存储最大容量", &size) + b.ShardStore = &jcstypes.ShardStoreUserConfig{ + MaxSize: int64(size), } } + +func (b *UserSpaceBuilder) AskWorkingDir(rl *readline.Instance) { + b.WorkingDir = cmd.ReadStringDef(rl, "工作目录", "jcs") +} diff --git a/jcsctl/cmd/utils.go b/jcsctl/cmd/utils.go index 1d7b5de..f5fe9b9 100644 --- a/jcsctl/cmd/utils.go +++ b/jcsctl/cmd/utils.go @@ -1,6 +1,14 @@ package cmd -import "strings" +import ( + "encoding" + "fmt" + "os" + "strconv" + "strings" + + "github.com/chzyer/readline" +) func SplitObjectPath(str string) (bkt string, pkg string, obj string, ok bool) { comps := strings.Split(str, ":") @@ -16,3 +24,138 @@ func SplitObjectPath(str string) (bkt string, pkg string, obj string, ok bool) { return comps[0], comps[1], pat, true } + +func SelectOne(rl *readline.Instance, prompt string, options []string) int { + fmt.Printf("\033[36m%v: \033[0m\n", prompt) + for i, option := range options { + fmt.Printf("%d. %s\n", i+1, option) + } + rl.SetPrompt("请输入选项编号: ") + + for { + str, err := rl.Readline() + if err != nil { + os.Exit(0) + } + + idx, err := strconv.ParseInt(str, 10, 64) + if err != nil { + fmt.Println("无效的编号, 请重新输入") + continue + } + if idx < 1 || idx > int64(len(options)) { + fmt.Println("无效的编号, 请重新输入") + continue + } + + return int(idx - 1) + } +} + +func ReadString(rl *readline.Instance, prompt string) string { + rl.SetPrompt(fmt.Sprintf("%v: ", prompt)) + str, err := rl.Readline() + if err != nil { + os.Exit(0) + } + + return str +} + +func ReadStringDef(rl *readline.Instance, prompt string, def string) string { + rl.SetPrompt(fmt.Sprintf("%v(默认: %s): ", prompt, def)) + str, err := rl.Readline() + if err != nil { + os.Exit(0) + } + + if str == "" { + return def + } + + return str +} + +func ReadStringValue[T encoding.TextUnmarshaler](rl *readline.Instance, prompt string, v T) { + rl.SetPrompt(fmt.Sprintf("%v: ", prompt)) + for { + str, err := rl.Readline() + if err != nil { + os.Exit(0) + } + + err = v.UnmarshalText([]byte(str)) + if err != nil { + fmt.Printf("%v, 请重新输入\n", err) + continue + } + break + } +} + +func ReadInt(rl *readline.Instance, prompt string) int { + rl.SetPrompt(fmt.Sprintf("%v: ", prompt)) + for { + str, err := rl.Readline() + if err != nil { + os.Exit(0) + } + + idx, err := strconv.ParseInt(str, 10, 64) + if err != nil { + fmt.Println("无效的输入, 请重新输入") + continue + } + + return int(idx) + } +} + +func ReadIntRanged(rl *readline.Instance, prompt string, min, max int) int { + rl.SetPrompt(fmt.Sprintf("%v(%d-%d): ", prompt, min, max)) + for { + str, err := rl.Readline() + if err != nil { + os.Exit(0) + } + + idx, err := strconv.ParseInt(str, 10, 64) + if err != nil { + fmt.Println("无效的输入, 请重新输入") + continue + } + + if idx < int64(min) || idx > int64(max) { + fmt.Printf("输入值必须在%d-%d之间, 请重新输入\n", min, max) + continue + } + + return int(idx) + } +} + +func ReadPassword(rl *readline.Instance, prompt string) []byte { + pwd, err := rl.ReadPassword(fmt.Sprintf("%v: ", prompt)) + if err != nil { + os.Exit(0) + } + + return pwd +} + +func ReadBool(rl *readline.Instance, prompt string) bool { + rl.SetPrompt(fmt.Sprintf("\033[36m%v(y/n): \033[0m", prompt)) + for { + str, err := rl.Readline() + if err != nil { + os.Exit(0) + } + if str == "y" || str == "Y" { + return true + } + if str == "n" || str == "N" { + return false + } + fmt.Println("无效的输入, 请重新输入") + } +}