修复一些调试问题
This commit is contained in:
parent
c8cb9c79ca
commit
92f5c18e9d
|
@ -0,0 +1,44 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
"gorm.io/gorm/schema"
|
||||
)
|
||||
|
||||
type JSONSerializer struct {
|
||||
}
|
||||
|
||||
func (JSONSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
|
||||
fieldValue := reflect.New(field.FieldType)
|
||||
if dbValue != nil {
|
||||
var data []byte
|
||||
switch v := dbValue.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue)
|
||||
}
|
||||
|
||||
err := serder.JSONToObject(data, fieldValue.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (JSONSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
|
||||
return serder.ObjectToJSON(fieldValue)
|
||||
}
|
||||
|
||||
func init() {
|
||||
schema.RegisterSerializer("json", JSONSerializer{})
|
||||
}
|
|
@ -87,10 +87,14 @@ func (a *AWSAuth) Auth(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
for _, h := range headers {
|
||||
verifyReq.Header.Add(h, c.Request.Header.Get(h))
|
||||
if strings.EqualFold(h, "content-length") {
|
||||
verifyReq.ContentLength = c.Request.ContentLength
|
||||
} else if strings.EqualFold(h, "host") {
|
||||
verifyReq.Host = c.Request.Host
|
||||
} else {
|
||||
verifyReq.Header.Add(h, c.Request.Header.Get(h))
|
||||
}
|
||||
}
|
||||
verifyReq.Host = c.Request.Host
|
||||
verifyReq.ContentLength = c.Request.ContentLength
|
||||
|
||||
signer := v4.NewSigner()
|
||||
err = signer.SignHTTP(context.TODO(), a.cred, verifyReq, hexPayloadHash, AuthService, AuthRegion, timestamp)
|
||||
|
@ -102,7 +106,7 @@ func (a *AWSAuth) Auth(c *gin.Context) {
|
|||
|
||||
verifySig := getSignatureFromAWSHeader(verifyReq)
|
||||
if !strings.EqualFold(verifySig, reqSig) {
|
||||
logger.Warnf("signature mismatch, input header: %s, verify: %s", authorizationHeader, verifySig)
|
||||
logger.Warnf("signature mismatch, input header: %s, verify: %s", authorizationHeader, verifyReq.Header.Get(AuthorizationHeader))
|
||||
c.AbortWithStatusJSON(http.StatusOK, Failed(errorcode.Unauthorized, "signature mismatch"))
|
||||
return
|
||||
}
|
||||
|
@ -143,10 +147,14 @@ func (a *AWSAuth) AuthWithoutBody(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
for _, h := range headers {
|
||||
verifyReq.Header.Add(h, c.Request.Header.Get(h))
|
||||
if strings.EqualFold(h, "content-length") {
|
||||
verifyReq.ContentLength = c.Request.ContentLength
|
||||
} else if strings.EqualFold(h, "host") {
|
||||
verifyReq.Host = c.Request.Host
|
||||
} else {
|
||||
verifyReq.Header.Add(h, c.Request.Header.Get(h))
|
||||
}
|
||||
}
|
||||
verifyReq.Host = c.Request.Host
|
||||
verifyReq.ContentLength = c.Request.ContentLength
|
||||
|
||||
err = a.signer.SignHTTP(context.TODO(), a.cred, verifyReq, "", AuthService, AuthRegion, timestamp)
|
||||
|
||||
|
@ -197,9 +205,14 @@ func (a *AWSAuth) PresignedAuth(c *gin.Context) {
|
|||
return
|
||||
}
|
||||
for _, h := range signedHeaders {
|
||||
verifyReq.Header.Add(h, c.Request.Header.Get(h))
|
||||
if strings.EqualFold(h, "content-length") {
|
||||
verifyReq.ContentLength = c.Request.ContentLength
|
||||
} else if strings.EqualFold(h, "host") {
|
||||
verifyReq.Host = c.Request.Host
|
||||
} else {
|
||||
verifyReq.Header.Add(h, c.Request.Header.Get(h))
|
||||
}
|
||||
}
|
||||
verifyReq.Host = c.Request.Host
|
||||
|
||||
timestamp, err := time.Parse("20060102T150405Z", date)
|
||||
if err != nil {
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/storage2/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/storage2/client/types"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/distlock"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser"
|
||||
|
@ -22,10 +21,10 @@ type CreateLoadUploader struct {
|
|||
targetSpaces []types.UserSpaceDetail
|
||||
loadRoots []string
|
||||
uploader *Uploader
|
||||
distlock *distlock.Mutex
|
||||
successes []db.AddObjectEntry
|
||||
lock sync.Mutex
|
||||
commited bool
|
||||
// distlock *distlock.Mutex
|
||||
successes []db.AddObjectEntry
|
||||
lock sync.Mutex
|
||||
commited bool
|
||||
}
|
||||
|
||||
type CreateLoadResult struct {
|
||||
|
@ -85,7 +84,7 @@ func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) {
|
|||
}
|
||||
u.commited = true
|
||||
|
||||
defer u.distlock.Unlock()
|
||||
// defer u.distlock.Unlock()
|
||||
|
||||
var addedObjs []types.Object
|
||||
err := u.uploader.db.DoTx(func(tx db.SQLContext) error {
|
||||
|
@ -118,7 +117,7 @@ func (u *CreateLoadUploader) Abort() {
|
|||
}
|
||||
u.commited = true
|
||||
|
||||
u.distlock.Unlock()
|
||||
// u.distlock.Unlock()
|
||||
|
||||
// TODO 可以考虑删除PackageID
|
||||
}
|
||||
|
|
|
@ -12,17 +12,16 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
|
||||
"gitlink.org.cn/cloudream/storage2/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/storage2/client/types"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/distlock"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/parser"
|
||||
)
|
||||
|
||||
type UpdateUploader struct {
|
||||
uploader *Uploader
|
||||
pkgID types.PackageID
|
||||
targetSpace types.UserSpaceDetail
|
||||
distMutex *distlock.Mutex
|
||||
uploader *Uploader
|
||||
pkgID types.PackageID
|
||||
targetSpace types.UserSpaceDetail
|
||||
// distMutex *distlock.Mutex
|
||||
loadToSpaces []types.UserSpaceDetail
|
||||
loadToPath []string
|
||||
successes []db.AddObjectEntry
|
||||
|
@ -115,7 +114,7 @@ func (w *UpdateUploader) Commit() (UpdateResult, error) {
|
|||
}
|
||||
w.commited = true
|
||||
|
||||
defer w.distMutex.Unlock()
|
||||
// defer w.distMutex.Unlock()
|
||||
|
||||
var addedObjs []types.Object
|
||||
err := w.uploader.db.DoTx(func(tx db.SQLContext) error {
|
||||
|
@ -147,5 +146,5 @@ func (w *UpdateUploader) Abort() {
|
|||
}
|
||||
|
||||
w.commited = true
|
||||
w.distMutex.Unlock()
|
||||
// w.distMutex.Unlock()
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"gitlink.org.cn/cloudream/common/utils/sort2"
|
||||
"gitlink.org.cn/cloudream/storage2/client/internal/db"
|
||||
"gitlink.org.cn/cloudream/storage2/client/internal/metacache"
|
||||
"gitlink.org.cn/cloudream/storage2/client/types"
|
||||
clitypes "gitlink.org.cn/cloudream/storage2/client/types"
|
||||
stgglb "gitlink.org.cn/cloudream/storage2/common/globals"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/connectivity"
|
||||
"gitlink.org.cn/cloudream/storage2/common/pkgs/distlock"
|
||||
|
@ -38,10 +38,11 @@ func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collecto
|
|||
connectivity: connectivity,
|
||||
stgPool: stgPool,
|
||||
spaceMeta: spaceMeta,
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID, loadTo []types.UserSpaceID, loadToPath []string) (*UpdateUploader, error) {
|
||||
func (u *Uploader) BeginUpdate(pkgID clitypes.PackageID, affinity clitypes.UserSpaceID, loadTo []clitypes.UserSpaceID, loadToPath []string) (*UpdateUploader, error) {
|
||||
spaceIDs, err := u.db.UserSpace().GetAllIDs(u.db.DefCtx())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting user space ids: %w", err)
|
||||
|
@ -75,9 +76,9 @@ func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID
|
|||
return nil, fmt.Errorf("user no available storages")
|
||||
}
|
||||
|
||||
loadToSpaces := make([]types.UserSpaceDetail, len(loadTo))
|
||||
loadToSpaces := make([]clitypes.UserSpaceDetail, len(loadTo))
|
||||
for i, spaceID := range loadTo {
|
||||
space, ok := lo.Find(spaceDetails, func(space *types.UserSpaceDetail) bool {
|
||||
space, ok := lo.Find(spaceDetails, func(space *clitypes.UserSpaceDetail) bool {
|
||||
return space.UserSpace.UserSpaceID == spaceID
|
||||
})
|
||||
if !ok {
|
||||
|
@ -115,7 +116,7 @@ func (u *Uploader) BeginUpdate(pkgID types.PackageID, affinity types.UserSpaceID
|
|||
// 1. 选择设置了亲和性的节点
|
||||
// 2. 从与当前客户端相同地域的节点中随机选一个
|
||||
// 3. 没有的话从所有节点选择延迟最低的节点
|
||||
func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity types.UserSpaceID) UploadSpaceInfo {
|
||||
func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity clitypes.UserSpaceID) UploadSpaceInfo {
|
||||
if spaceAffinity > 0 {
|
||||
aff, ok := lo.Find(spaces, func(space UploadSpaceInfo) bool { return space.Space.UserSpace.UserSpaceID == spaceAffinity })
|
||||
if ok {
|
||||
|
@ -134,10 +135,10 @@ func (w *Uploader) chooseUploadStorage(spaces []UploadSpaceInfo, spaceAffinity t
|
|||
return spaces[0]
|
||||
}
|
||||
|
||||
func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo []types.UserSpaceID, loadToPath []string) (*CreateLoadUploader, error) {
|
||||
func (u *Uploader) BeginCreateLoad(bktID clitypes.BucketID, pkgName string, loadTo []clitypes.UserSpaceID, loadToPath []string) (*CreateLoadUploader, error) {
|
||||
getSpaces := u.spaceMeta.GetMany(loadTo)
|
||||
|
||||
spacesStgs := make([]types.UserSpaceDetail, len(loadTo))
|
||||
spacesStgs := make([]clitypes.UserSpaceDetail, len(loadTo))
|
||||
for i, stg := range getSpaces {
|
||||
if stg == nil {
|
||||
return nil, fmt.Errorf("storage %v not found", loadTo[i])
|
||||
|
@ -145,7 +146,14 @@ func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo
|
|||
spacesStgs[i] = *stg
|
||||
}
|
||||
|
||||
pkg, err := u.db.Package().Create(u.db.DefCtx(), bktID, pkgName)
|
||||
pkg, err := db.DoTx01(u.db, func(tx db.SQLContext) (clitypes.Package, error) {
|
||||
_, err := u.db.Bucket().GetByID(tx, bktID)
|
||||
if err != nil {
|
||||
return clitypes.Package{}, err
|
||||
}
|
||||
|
||||
return u.db.Package().Create(u.db.DefCtx(), bktID, pkgName)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create package: %w", err)
|
||||
}
|
||||
|
@ -170,19 +178,19 @@ func (u *Uploader) BeginCreateLoad(bktID types.BucketID, pkgName string, loadTo
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (u *Uploader) UploadPart(objID types.ObjectID, index int, stream io.Reader) error {
|
||||
func (u *Uploader) UploadPart(objID clitypes.ObjectID, index int, stream io.Reader) error {
|
||||
detail, err := u.db.Object().GetDetail(u.db.DefCtx(), objID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting object detail: %w", err)
|
||||
}
|
||||
|
||||
objDe := detail
|
||||
_, ok := objDe.Object.Redundancy.(*types.MultipartUploadRedundancy)
|
||||
_, ok := objDe.Object.Redundancy.(*clitypes.MultipartUploadRedundancy)
|
||||
if !ok {
|
||||
return fmt.Errorf("object %v is not a multipart upload", objID)
|
||||
}
|
||||
|
||||
var space types.UserSpaceDetail
|
||||
var space clitypes.UserSpaceDetail
|
||||
if len(objDe.Blocks) > 0 {
|
||||
cstg := u.spaceMeta.Get(objDe.Blocks[0].UserSpaceID)
|
||||
if cstg == nil {
|
||||
|
@ -257,7 +265,7 @@ func (u *Uploader) UploadPart(objID types.ObjectID, index int, stream io.Reader)
|
|||
|
||||
shardInfo := ret["shard"].(*ops2.ShardInfoValue)
|
||||
err = u.db.DoTx(func(tx db.SQLContext) error {
|
||||
return u.db.Object().AppendPart(tx, types.ObjectBlock{
|
||||
return u.db.Object().AppendPart(tx, clitypes.ObjectBlock{
|
||||
ObjectID: objID,
|
||||
Index: index,
|
||||
UserSpaceID: space.UserSpace.UserSpaceID,
|
||||
|
|
|
@ -21,7 +21,7 @@ var RedundancyUnion = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTyp
|
|||
)), "type")
|
||||
|
||||
type NoneRedundancy struct {
|
||||
Redundancy
|
||||
Redundancy `json:"-"`
|
||||
serder.Metadata `union:"none"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ func NewNoneRedundancy() *NoneRedundancy {
|
|||
var DefaultRepRedundancy = *NewRepRedundancy(2)
|
||||
|
||||
type RepRedundancy struct {
|
||||
Redundancy
|
||||
Redundancy `json:"-"`
|
||||
serder.Metadata `union:"rep"`
|
||||
Type string `json:"type"`
|
||||
RepCount int `json:"repCount"`
|
||||
|
@ -51,7 +51,7 @@ func NewRepRedundancy(repCount int) *RepRedundancy {
|
|||
var DefaultECRedundancy = *NewECRedundancy(2, 3, 1024*1024*5)
|
||||
|
||||
type ECRedundancy struct {
|
||||
Redundancy
|
||||
Redundancy `json:"-"`
|
||||
serder.Metadata `union:"ec"`
|
||||
Type string `json:"type"`
|
||||
K int `json:"k"`
|
||||
|
@ -75,7 +75,7 @@ func (b *ECRedundancy) StripSize() int64 {
|
|||
var DefaultLRCRedundancy = *NewLRCRedundancy(2, 4, []int{2}, 1024*1024*5)
|
||||
|
||||
type LRCRedundancy struct {
|
||||
Redundancy
|
||||
Redundancy `json:"-"`
|
||||
serder.Metadata `union:"lrc"`
|
||||
Type string `json:"type"`
|
||||
K int `json:"k"`
|
||||
|
@ -132,7 +132,7 @@ func (b *LRCRedundancy) GetGroupElements(grp int) []int {
|
|||
}
|
||||
|
||||
type SegmentRedundancy struct {
|
||||
Redundancy
|
||||
Redundancy `json:"-"`
|
||||
serder.Metadata `union:"segment"`
|
||||
Type string `json:"type"`
|
||||
Segments []int64 `json:"segments"` // 每一段的大小
|
||||
|
@ -201,7 +201,7 @@ func (b *SegmentRedundancy) CalcSegmentRange(start int64, end *int64) (segIdxSta
|
|||
}
|
||||
|
||||
type MultipartUploadRedundancy struct {
|
||||
Redundancy
|
||||
Redundancy `json:"-"`
|
||||
serder.Metadata `union:"multipartUpload"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ type UserSpace struct {
|
|||
// 用户在指定存储节点的凭证信息,比如用户账户,AK/SK等
|
||||
Credential cotypes.StorageCredential `gorm:"column:Credential; type:json; not null; serializer:union" json:"credential"`
|
||||
// 用户空间的分片存储配置,如果为空,则表示不使用分片存储
|
||||
ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json;" json:"shardStore"`
|
||||
ShardStore *cotypes.ShardStoreUserConfig `gorm:"column:ShardStore; type:json; serializer:json" json:"shardStore"`
|
||||
// 用户空间信息的版本号,每一次更改都需要更新版本号
|
||||
Revision int64 `gorm:"column:Revision; type:bigint; not null" json:"revision"`
|
||||
}
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
"gorm.io/gorm/schema"
|
||||
)
|
||||
|
||||
type JSONSerializer struct {
|
||||
}
|
||||
|
||||
func (JSONSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue interface{}) error {
|
||||
fieldValue := reflect.New(field.FieldType)
|
||||
if dbValue != nil {
|
||||
var data []byte
|
||||
switch v := dbValue.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue)
|
||||
}
|
||||
|
||||
err := serder.JSONToObject(data, fieldValue.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (JSONSerializer) Value(ctx context.Context, field *schema.Field, dst reflect.Value, fieldValue interface{}) (interface{}, error) {
|
||||
return serder.ObjectToJSON(fieldValue)
|
||||
}
|
||||
|
||||
func init() {
|
||||
schema.RegisterSerializer("json", JSONSerializer{})
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package types
|
||||
|
||||
/*
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/pkgs/types"
|
||||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
)
|
||||
|
||||
type PublicStoreConfig interface {
|
||||
GetPublicStoreType() string
|
||||
// 输出调试用的字符串,不要包含敏感信息
|
||||
String() string
|
||||
}
|
||||
|
||||
var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[PublicStoreConfig](
|
||||
(*LocalPublicStorage)(nil),
|
||||
(*S3PublicStorage)(nil),
|
||||
)), "type")
|
||||
|
||||
type LocalPublicStorage struct {
|
||||
serder.Metadata `union:"Local"`
|
||||
Type string `json:"type"`
|
||||
// 调度Package时的Package的根路径
|
||||
LoadBase string `json:"loadBase"`
|
||||
}
|
||||
|
||||
func (s *LocalPublicStorage) GetPublicStoreType() string {
|
||||
return "Local"
|
||||
}
|
||||
|
||||
func (s *LocalPublicStorage) String() string {
|
||||
return fmt.Sprintf("Local[LoadBase=%v]", s.LoadBase)
|
||||
}
|
||||
|
||||
type S3PublicStorage struct {
|
||||
serder.Metadata `union:"S3"`
|
||||
Type string `json:"type"`
|
||||
LoadBase string `json:"loadBase"`
|
||||
}
|
||||
|
||||
func (s *S3PublicStorage) GetPublicStoreType() string {
|
||||
return "S3"
|
||||
}
|
||||
|
||||
func (s *S3PublicStorage) String() string {
|
||||
return fmt.Sprintf("S3[LoadBase=%v]", s.LoadBase)
|
||||
}
|
||||
*/
|
|
@ -1,51 +0,0 @@
|
|||
package types
|
||||
|
||||
/*
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gitlink.org.cn/cloudream/common/pkgs/types"
|
||||
"gitlink.org.cn/cloudream/common/utils/serder"
|
||||
)
|
||||
|
||||
// 分片存储服务的配置数据
|
||||
type ShardStoreConfig interface {
|
||||
GetShardStoreType() string
|
||||
// 输出调试用的字符串,不要包含敏感信息
|
||||
String() string
|
||||
}
|
||||
|
||||
var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[ShardStoreConfig](
|
||||
(*LocalShardStorage)(nil),
|
||||
(*S3ShardStorage)(nil),
|
||||
)), "type")
|
||||
|
||||
type LocalShardStorage struct {
|
||||
serder.Metadata `union:"Local"`
|
||||
Type string `json:"type"`
|
||||
Root string `json:"root"`
|
||||
MaxSize int64 `json:"maxSize"`
|
||||
}
|
||||
|
||||
func (s *LocalShardStorage) GetShardStoreType() string {
|
||||
return "Local"
|
||||
}
|
||||
|
||||
func (s *LocalShardStorage) String() string {
|
||||
return fmt.Sprintf("Local[root=%s, maxSize=%d]", s.Root, s.MaxSize)
|
||||
}
|
||||
|
||||
type S3ShardStorage struct {
|
||||
serder.Metadata `union:"S3"`
|
||||
Type string `json:"type"`
|
||||
Root string `json:"root"`
|
||||
}
|
||||
|
||||
func (s *S3ShardStorage) GetShardStoreType() string {
|
||||
return "S3"
|
||||
}
|
||||
|
||||
func (s *S3ShardStorage) String() string {
|
||||
return fmt.Sprintf("S3[root=%s]", s.Root)
|
||||
}
|
||||
*/
|
|
@ -20,73 +20,73 @@ var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[Storage
|
|||
)), "type")
|
||||
|
||||
type LocalCred struct {
|
||||
StorageCredential
|
||||
serder.Metadata `union:"Local"`
|
||||
Type string `json:"type"`
|
||||
StorageCredential `json:"-"`
|
||||
serder.Metadata `union:"Local"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type MashupCred struct {
|
||||
StorageCredential
|
||||
serder.Metadata `union:"Mashup"`
|
||||
Store StorageCredential `json:"store"`
|
||||
Feature StorageCredential `json:"feature"`
|
||||
StorageCredential `json:"-"`
|
||||
serder.Metadata `union:"Mashup"`
|
||||
Store StorageCredential `json:"store"`
|
||||
Feature StorageCredential `json:"feature"`
|
||||
}
|
||||
|
||||
type OSSCred struct {
|
||||
StorageCredential
|
||||
serder.Metadata `union:"OSS"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
StorageCredential `json:"-"`
|
||||
serder.Metadata `union:"OSS"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
type OBSCred struct {
|
||||
StorageCredential
|
||||
serder.Metadata `union:"OBS"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
ProjectID string `json:"projectID"`
|
||||
StorageCredential `json:"-"`
|
||||
serder.Metadata `union:"OBS"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
ProjectID string `json:"projectID"`
|
||||
}
|
||||
|
||||
type COSCred struct {
|
||||
StorageCredential
|
||||
serder.Metadata `union:"COS"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
StorageCredential `json:"-"`
|
||||
serder.Metadata `union:"COS"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
type EFileCred struct {
|
||||
StorageCredential
|
||||
serder.Metadata `union:"EFile"`
|
||||
Type string `json:"type"`
|
||||
TokenURL string `json:"tokenURL"`
|
||||
APIURL string `json:"apiURL"`
|
||||
TokenExpire int `json:"tokenExpire"` // 单位秒
|
||||
User string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
OrgID string `json:"orgID"`
|
||||
ClusterID string `json:"clusterID"`
|
||||
StorageCredential `json:"-"`
|
||||
serder.Metadata `union:"EFile"`
|
||||
Type string `json:"type"`
|
||||
TokenURL string `json:"tokenURL"`
|
||||
APIURL string `json:"apiURL"`
|
||||
TokenExpire int `json:"tokenExpire"` // 单位秒
|
||||
User string `json:"user"`
|
||||
Password string `json:"password"`
|
||||
OrgID string `json:"orgID"`
|
||||
ClusterID string `json:"clusterID"`
|
||||
}
|
||||
|
||||
// 通用的S3协议的存储服务
|
||||
type S3Cred struct {
|
||||
StorageCredential
|
||||
serder.Metadata `union:"S3"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
StorageCredential `json:"-"`
|
||||
serder.Metadata `union:"S3"`
|
||||
Type string `json:"type"`
|
||||
Region string `json:"region"`
|
||||
AK string `json:"accessKeyId"`
|
||||
SK string `json:"secretAccessKey"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
|
|
@ -36,6 +36,8 @@ func init() {
|
|||
var httpAddr string
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "start storage2 hub service",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
serve(configPath, httpAddr)
|
||||
},
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
package main
|
||||
|
||||
import "gitlink.org.cn/cloudream/storage2/hub/internal/cmd"
|
||||
import (
|
||||
_ "gitlink.org.cn/cloudream/storage2/common/pkgs/ioswitch2/ops2"
|
||||
"gitlink.org.cn/cloudream/storage2/hub/internal/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd.RootCmd.Execute()
|
||||
|
|
Loading…
Reference in New Issue