| @@ -61,7 +61,7 @@ func (svc *Service) checkIncrement(msg *agtmq.CheckCache, filesMap map[string]sh | |||
| svc.taskManager.StartComparable(task.NewIPFSPin(cache.FileHash)) | |||
| } else if cache.State == consts.CacheStateTemp { | |||
| if time.Since(cache.CacheTime) > time.Duration(config.Cfg().TempFileLifetime)*time.Second { | |||
| if time.Since(cache.CreateTime) > time.Duration(config.Cfg().TempFileLifetime)*time.Second { | |||
| entries = append(entries, agtmq.NewCheckCacheRespEntry(cache.FileHash, agtmq.CHECK_IPFS_RESP_OP_DELETE_TEMP)) | |||
| } | |||
| } | |||
| @@ -96,7 +96,7 @@ func (svc *Service) checkComplete(msg *agtmq.CheckCache, filesMap map[string]she | |||
| svc.taskManager.StartComparable(task.NewIPFSPin(cache.FileHash)) | |||
| } else if cache.State == consts.CacheStateTemp { | |||
| if time.Since(cache.CacheTime) > time.Duration(config.Cfg().TempFileLifetime)*time.Second { | |||
| if time.Since(cache.CreateTime) > time.Duration(config.Cfg().TempFileLifetime)*time.Second { | |||
| entries = append(entries, agtmq.NewCheckCacheRespEntry(cache.FileHash, agtmq.CHECK_IPFS_RESP_OP_DELETE_TEMP)) | |||
| } | |||
| } | |||
| @@ -127,8 +127,6 @@ func (svc *Service) WaitCacheMovePackage(msg *agtmq.WaitCacheMovePackage) (*agtm | |||
| return nil, mq.Failed(errorcode.TaskNotFound, "task not found") | |||
| } | |||
| mvPkgTask := tsk.Body().(*mytask.CacheMovePackage) | |||
| if msg.WaitTimeoutMs == 0 { | |||
| tsk.Wait() | |||
| @@ -137,7 +135,7 @@ func (svc *Service) WaitCacheMovePackage(msg *agtmq.WaitCacheMovePackage) (*agtm | |||
| errMsg = tsk.Error().Error() | |||
| } | |||
| return mq.ReplyOK(agtmq.NewWaitCacheMovePackageResp(true, errMsg, mvPkgTask.ResultCacheInfos)) | |||
| return mq.ReplyOK(agtmq.NewWaitCacheMovePackageResp(true, errMsg)) | |||
| } else { | |||
| if tsk.WaitTimeout(time.Duration(msg.WaitTimeoutMs) * time.Millisecond) { | |||
| @@ -147,9 +145,9 @@ func (svc *Service) WaitCacheMovePackage(msg *agtmq.WaitCacheMovePackage) (*agtm | |||
| errMsg = tsk.Error().Error() | |||
| } | |||
| return mq.ReplyOK(agtmq.NewWaitCacheMovePackageResp(true, errMsg, mvPkgTask.ResultCacheInfos)) | |||
| return mq.ReplyOK(agtmq.NewWaitCacheMovePackageResp(true, errMsg)) | |||
| } | |||
| return mq.ReplyOK(agtmq.NewWaitCacheMovePackageResp(false, "", nil)) | |||
| return mq.ReplyOK(agtmq.NewWaitCacheMovePackageResp(false, "")) | |||
| } | |||
| } | |||
| @@ -195,27 +195,7 @@ func (svc *Service) StartStorageCreatePackage(msg *agtmq.StartStorageCreatePacka | |||
| } | |||
| objIter := iterator.NewUploadingObjectIterator(fullPath, uploadFilePathes) | |||
| if msg.Redundancy.IsRepInfo() { | |||
| repInfo, err := msg.Redundancy.ToRepInfo() | |||
| if err != nil { | |||
| logger.Warnf("getting rep redundancy info: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get rep redundancy info failed") | |||
| } | |||
| tsk := svc.taskManager.StartNew(mytask.NewCreateRepPackage(msg.UserID, msg.BucketID, msg.Name, objIter, repInfo, msg.NodeAffinity)) | |||
| return mq.ReplyOK(agtmq.NewStartStorageCreatePackageResp(tsk.ID())) | |||
| } | |||
| ecInfo, err := msg.Redundancy.ToECInfo() | |||
| if err != nil { | |||
| logger.Warnf("getting ec redundancy info: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get ec redundancy info failed") | |||
| } | |||
| tsk := svc.taskManager.StartNew(mytask.NewCreateECPackage(msg.UserID, msg.BucketID, msg.Name, objIter, ecInfo, msg.NodeAffinity)) | |||
| tsk := svc.taskManager.StartNew(mytask.NewCreatePackage(msg.UserID, msg.BucketID, msg.Name, objIter, msg.NodeAffinity)) | |||
| return mq.ReplyOK(agtmq.NewStartStorageCreatePackageResp(tsk.ID())) | |||
| } | |||
| @@ -235,14 +215,6 @@ func (svc *Service) WaitStorageCreatePackage(msg *agtmq.WaitStorageCreatePackage | |||
| return mq.ReplyOK(agtmq.NewWaitStorageCreatePackageResp(true, tsk.Error().Error(), 0)) | |||
| } | |||
| // TODO 避免判断类型 | |||
| if repTask, ok := tsk.Body().(*mytask.CreateRepPackage); ok { | |||
| return mq.ReplyOK(agtmq.NewWaitStorageCreatePackageResp(true, "", repTask.Result.PackageID)) | |||
| } | |||
| if ecTask, ok := tsk.Body().(*mytask.CreateECPackage); ok { | |||
| return mq.ReplyOK(agtmq.NewWaitStorageCreatePackageResp(true, "", ecTask.Result.PackageID)) | |||
| } | |||
| return nil, mq.Failed(errorcode.TaskNotFound, "task not found") | |||
| taskBody := tsk.Body().(*mytask.CreatePackage) | |||
| return mq.ReplyOK(agtmq.NewWaitStorageCreatePackageResp(true, "", taskBody.Result.PackageID)) | |||
| } | |||
| @@ -8,19 +8,17 @@ import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type CacheMovePackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| ResultCacheInfos []cdssdk.ObjectCacheInfo | |||
| userID cdssdk.UserID | |||
| packageID cdssdk.PackageID | |||
| } | |||
| func NewCacheMovePackage(userID int64, packageID int64) *CacheMovePackage { | |||
| func NewCacheMovePackage(userID cdssdk.UserID, packageID cdssdk.PackageID) *CacheMovePackage { | |||
| return &CacheMovePackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| @@ -63,24 +61,9 @@ func (t *CacheMovePackage) do(ctx TaskContext) error { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| pkgResp, err := coorCli.GetPackage(coormq.NewGetPackage(t.userID, t.packageID)) | |||
| getResp, err := coorCli.GetPackageObjectDetails(coormq.NewGetPackageObjectDetails(t.packageID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting package: %w", err) | |||
| } | |||
| if pkgResp.Redundancy.IsRepInfo() { | |||
| return t.moveRep(ctx, coorCli, pkgResp.Package) | |||
| } else { | |||
| return fmt.Errorf("not implement yet!") | |||
| // TODO EC的CacheMove逻辑 | |||
| } | |||
| return nil | |||
| } | |||
| func (t *CacheMovePackage) moveRep(ctx TaskContext, coorCli *coormq.Client, pkg model.Package) error { | |||
| getRepResp, err := coorCli.GetPackageObjectRepData(coormq.NewGetPackageObjectRepData(pkg.PackageID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting package object rep data: %w", err) | |||
| return fmt.Errorf("getting package object details: %w", err) | |||
| } | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| @@ -89,19 +72,26 @@ func (t *CacheMovePackage) moveRep(ctx TaskContext, coorCli *coormq.Client, pkg | |||
| } | |||
| defer ipfsCli.Close() | |||
| var fileHashes []string | |||
| for _, rep := range getRepResp.Data { | |||
| if err := ipfsCli.Pin(rep.FileHash); err != nil { | |||
| return fmt.Errorf("pinning file %s: %w", rep.FileHash, err) | |||
| // TODO 可以考虑优化,比如rep类型的直接pin就可以 | |||
| objIter := iterator.NewDownloadObjectIterator(getResp.Objects, &iterator.DownloadContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| defer objIter.Close() | |||
| for { | |||
| obj, err := objIter.MoveNext() | |||
| if err != nil { | |||
| if err == iterator.ErrNoMoreItem { | |||
| break | |||
| } | |||
| return err | |||
| } | |||
| defer obj.File.Close() | |||
| fileHashes = append(fileHashes, rep.FileHash) | |||
| t.ResultCacheInfos = append(t.ResultCacheInfos, cdssdk.NewObjectCacheInfo(rep.Object, rep.FileHash)) | |||
| } | |||
| _, err = coorCli.CachePackageMoved(coormq.NewCachePackageMoved(pkg.PackageID, *stgglb.Local.NodeID, fileHashes)) | |||
| if err != nil { | |||
| return fmt.Errorf("reporting cache package moved: %w", err) | |||
| _, err = ipfsCli.CreateFile(obj.File) | |||
| if err != nil { | |||
| return fmt.Errorf("creating ipfs file: %w", err) | |||
| } | |||
| } | |||
| return nil | |||
| @@ -1,40 +0,0 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type CreateECPackageResult = cmd.CreateECPackageResult | |||
| type CreateECPackage struct { | |||
| cmd cmd.CreateECPackage | |||
| Result *CreateECPackageResult | |||
| } | |||
| func NewCreateECPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy cdssdk.ECRedundancyInfo, nodeAffinity *int64) *CreateECPackage { | |||
| return &CreateECPackage{ | |||
| cmd: *cmd.NewCreateECPackage(userID, bucketID, name, objIter, redundancy, nodeAffinity), | |||
| } | |||
| } | |||
| func (t *CreateECPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| log := logger.WithType[CreateECPackage]("Task") | |||
| log.Debugf("begin") | |||
| defer log.Debugf("end") | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -0,0 +1,40 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type CreatePackageResult = cmd.CreatePackageResult | |||
| type CreatePackage struct { | |||
| cmd cmd.CreatePackage | |||
| Result *CreatePackageResult | |||
| } | |||
| func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage { | |||
| return &CreatePackage{ | |||
| cmd: *cmd.NewCreatePackage(userID, bucketID, name, objIter, nodeAffinity), | |||
| } | |||
| } | |||
| func (t *CreatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| log := logger.WithType[CreatePackage]("Task") | |||
| log.Debugf("begin") | |||
| defer log.Debugf("end") | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -1,40 +0,0 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type CreateRepPackageResult = cmd.CreateRepPackageResult | |||
| type CreateRepPackage struct { | |||
| cmd cmd.CreateRepPackage | |||
| Result *CreateRepPackageResult | |||
| } | |||
| func NewCreateRepPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy cdssdk.RepRedundancyInfo, nodeAffinity *int64) *CreateRepPackage { | |||
| return &CreateRepPackage{ | |||
| cmd: *cmd.NewCreateRepPackage(userID, bucketID, name, objIter, redundancy, nodeAffinity), | |||
| } | |||
| } | |||
| func (t *CreateRepPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| log := logger.WithType[CreateRepPackage]("Task") | |||
| log.Debugf("begin") | |||
| defer log.Debugf("end") | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -4,6 +4,7 @@ import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| ) | |||
| @@ -12,7 +13,7 @@ type StorageLoadPackage struct { | |||
| FullPath string | |||
| } | |||
| func NewStorageLoadPackage(userID int64, packageID int64, outputPath string) *StorageLoadPackage { | |||
| func NewStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, outputPath string) *StorageLoadPackage { | |||
| return &StorageLoadPackage{ | |||
| cmd: cmd.NewDownloadPackage(userID, packageID, outputPath), | |||
| FullPath: outputPath, | |||
| @@ -4,10 +4,11 @@ import ( | |||
| "fmt" | |||
| "github.com/jedib0t/go-pretty/v6/table" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| ) | |||
| func BucketListUserBuckets(ctx CommandContext) error { | |||
| userID := int64(0) | |||
| userID := cdssdk.UserID(0) | |||
| buckets, err := ctx.Cmdline.Svc.BucketSvc().GetUserBuckets(userID) | |||
| if err != nil { | |||
| @@ -28,7 +29,7 @@ func BucketListUserBuckets(ctx CommandContext) error { | |||
| } | |||
| func BucketCreateBucket(ctx CommandContext, bucketName string) error { | |||
| userID := int64(0) | |||
| userID := cdssdk.UserID(0) | |||
| bucketID, err := ctx.Cmdline.Svc.BucketSvc().CreateBucket(userID, bucketName) | |||
| if err != nil { | |||
| @@ -39,8 +40,8 @@ func BucketCreateBucket(ctx CommandContext, bucketName string) error { | |||
| return nil | |||
| } | |||
| func BucketDeleteBucket(ctx CommandContext, bucketID int64) error { | |||
| userID := int64(0) | |||
| func BucketDeleteBucket(ctx CommandContext, bucketID cdssdk.BucketID) error { | |||
| userID := cdssdk.UserID(0) | |||
| err := ctx.Cmdline.Svc.BucketSvc().DeleteBucket(userID, bucketID) | |||
| if err != nil { | |||
| @@ -3,16 +3,18 @@ package cmdline | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| ) | |||
| func CacheMovePackage(ctx CommandContext, packageID int64, nodeID int64) error { | |||
| func CacheMovePackage(ctx CommandContext, packageID cdssdk.PackageID, nodeID cdssdk.NodeID) error { | |||
| taskID, err := ctx.Cmdline.Svc.CacheSvc().StartCacheMovePackage(0, packageID, nodeID) | |||
| if err != nil { | |||
| return fmt.Errorf("start cache moving package: %w", err) | |||
| } | |||
| for { | |||
| complete, _, err := ctx.Cmdline.Svc.CacheSvc().WaitCacheMovePackage(nodeID, taskID, time.Second*10) | |||
| complete, err := ctx.Cmdline.Svc.CacheSvc().WaitCacheMovePackage(nodeID, taskID, time.Second*10) | |||
| if complete { | |||
| if err != nil { | |||
| return fmt.Errorf("moving complete with: %w", err) | |||
| @@ -12,8 +12,8 @@ import ( | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| func PackageListBucketPackages(ctx CommandContext, bucketID int64) error { | |||
| userID := int64(0) | |||
| func PackageListBucketPackages(ctx CommandContext, bucketID cdssdk.BucketID) error { | |||
| userID := cdssdk.UserID(0) | |||
| packages, err := ctx.Cmdline.Svc.BucketSvc().GetBucketPackages(userID, bucketID) | |||
| if err != nil { | |||
| @@ -23,17 +23,17 @@ func PackageListBucketPackages(ctx CommandContext, bucketID int64) error { | |||
| fmt.Printf("Find %d packages in bucket %d for user %d:\n", len(packages), bucketID, userID) | |||
| tb := table.NewWriter() | |||
| tb.AppendHeader(table.Row{"ID", "Name", "BucketID", "State", "Redundancy"}) | |||
| tb.AppendHeader(table.Row{"ID", "Name", "BucketID", "State"}) | |||
| for _, obj := range packages { | |||
| tb.AppendRow(table.Row{obj.PackageID, obj.Name, obj.BucketID, obj.State, obj.Redundancy}) | |||
| tb.AppendRow(table.Row{obj.PackageID, obj.Name, obj.BucketID, obj.State}) | |||
| } | |||
| fmt.Print(tb.Render()) | |||
| fmt.Println(tb.Render()) | |||
| return nil | |||
| } | |||
| func PackageDownloadPackage(ctx CommandContext, outputDir string, packageID int64) error { | |||
| func PackageDownloadPackage(ctx CommandContext, packageID cdssdk.PackageID, outputDir string) error { | |||
| err := os.MkdirAll(outputDir, os.ModePerm) | |||
| if err != nil { | |||
| return fmt.Errorf("create output directory %s failed, err: %w", outputDir, err) | |||
| @@ -86,7 +86,7 @@ func PackageDownloadPackage(ctx CommandContext, outputDir string, packageID int6 | |||
| return nil | |||
| } | |||
| func PackageUploadRepPackage(ctx CommandContext, rootPath string, bucketID int64, name string, repCount int, nodeAffinity []int64) error { | |||
| func PackageCreatePackage(ctx CommandContext, name string, rootPath string, bucketID cdssdk.BucketID, nodeAffinity []cdssdk.NodeID) error { | |||
| rootPath = filepath.Clean(rootPath) | |||
| var uploadFilePathes []string | |||
| @@ -105,122 +105,24 @@ func PackageUploadRepPackage(ctx CommandContext, rootPath string, bucketID int64 | |||
| return fmt.Errorf("open directory %s failed, err: %w", rootPath, err) | |||
| } | |||
| var nodeAff *int64 | |||
| var nodeAff *cdssdk.NodeID | |||
| if len(nodeAffinity) > 0 { | |||
| nodeAff = &nodeAffinity[0] | |||
| n := cdssdk.NodeID(nodeAffinity[0]) | |||
| nodeAff = &n | |||
| } | |||
| objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes) | |||
| taskID, err := ctx.Cmdline.Svc.PackageSvc().StartCreatingRepPackage(0, bucketID, name, objIter, cdssdk.NewRepRedundancyInfo(repCount), nodeAff) | |||
| taskID, err := ctx.Cmdline.Svc.PackageSvc().StartCreatingPackage(0, bucketID, name, objIter, nodeAff) | |||
| if err != nil { | |||
| return fmt.Errorf("upload file data failed, err: %w", err) | |||
| } | |||
| for { | |||
| complete, uploadObjectResult, err := ctx.Cmdline.Svc.PackageSvc().WaitCreatingRepPackage(taskID, time.Second*5) | |||
| complete, uploadObjectResult, err := ctx.Cmdline.Svc.PackageSvc().WaitCreatingPackage(taskID, time.Second*5) | |||
| if complete { | |||
| if err != nil { | |||
| return fmt.Errorf("uploading rep object: %w", err) | |||
| } | |||
| tb := table.NewWriter() | |||
| tb.AppendHeader(table.Row{"Path", "ObjectID", "FileHash"}) | |||
| for i := 0; i < len(uploadObjectResult.ObjectResults); i++ { | |||
| tb.AppendRow(table.Row{ | |||
| uploadObjectResult.ObjectResults[i].Info.Path, | |||
| uploadObjectResult.ObjectResults[i].ObjectID, | |||
| uploadObjectResult.ObjectResults[i].FileHash, | |||
| }) | |||
| } | |||
| fmt.Print(tb.Render()) | |||
| return nil | |||
| } | |||
| if err != nil { | |||
| return fmt.Errorf("wait uploading: %w", err) | |||
| } | |||
| } | |||
| } | |||
| func PackageUpdateRepPackage(ctx CommandContext, packageID int64, rootPath string) error { | |||
| //userID := int64(0) | |||
| var uploadFilePathes []string | |||
| err := filepath.WalkDir(rootPath, func(fname string, fi os.DirEntry, err error) error { | |||
| if err != nil { | |||
| return nil | |||
| } | |||
| if !fi.IsDir() { | |||
| uploadFilePathes = append(uploadFilePathes, fname) | |||
| } | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("open directory %s failed, err: %w", rootPath, err) | |||
| } | |||
| objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes) | |||
| taskID, err := ctx.Cmdline.Svc.PackageSvc().StartUpdatingRepPackage(0, packageID, objIter) | |||
| if err != nil { | |||
| return fmt.Errorf("update object %d failed, err: %w", packageID, err) | |||
| } | |||
| for { | |||
| complete, _, err := ctx.Cmdline.Svc.PackageSvc().WaitUpdatingRepPackage(taskID, time.Second*5) | |||
| if complete { | |||
| if err != nil { | |||
| return fmt.Errorf("updating rep object: %w", err) | |||
| } | |||
| return nil | |||
| } | |||
| if err != nil { | |||
| return fmt.Errorf("wait updating: %w", err) | |||
| } | |||
| } | |||
| } | |||
| func PackageUploadECPackage(ctx CommandContext, rootPath string, bucketID int64, name string, ecName string, chunkSize int, nodeAffinity []int64) error { | |||
| rootPath = filepath.Clean(rootPath) | |||
| var uploadFilePathes []string | |||
| err := filepath.WalkDir(rootPath, func(fname string, fi os.DirEntry, err error) error { | |||
| if err != nil { | |||
| return nil | |||
| } | |||
| if !fi.IsDir() { | |||
| uploadFilePathes = append(uploadFilePathes, fname) | |||
| } | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("open directory %s failed, err: %w", rootPath, err) | |||
| } | |||
| var nodeAff *int64 | |||
| if len(nodeAffinity) > 0 { | |||
| nodeAff = &nodeAffinity[0] | |||
| } | |||
| objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes) | |||
| taskID, err := ctx.Cmdline.Svc.PackageSvc().StartCreatingECPackage(0, bucketID, name, objIter, cdssdk.NewECRedundancyInfo(ecName, chunkSize), nodeAff) | |||
| if err != nil { | |||
| return fmt.Errorf("upload file data failed, err: %w", err) | |||
| } | |||
| for { | |||
| complete, uploadObjectResult, err := ctx.Cmdline.Svc.PackageSvc().WaitCreatingECPackage(taskID, time.Second*5) | |||
| if complete { | |||
| if err != nil { | |||
| return fmt.Errorf("uploading ec package: %w", err) | |||
| return fmt.Errorf("uploading package: %w", err) | |||
| } | |||
| tb := table.NewWriter() | |||
| @@ -242,7 +144,7 @@ func PackageUploadECPackage(ctx CommandContext, rootPath string, bucketID int64, | |||
| } | |||
| } | |||
| func PackageUpdateECPackage(ctx CommandContext, packageID int64, rootPath string) error { | |||
| func PackageUpdatePackage(ctx CommandContext, packageID cdssdk.PackageID, rootPath string) error { | |||
| //userID := int64(0) | |||
| var uploadFilePathes []string | |||
| @@ -262,16 +164,16 @@ func PackageUpdateECPackage(ctx CommandContext, packageID int64, rootPath string | |||
| } | |||
| objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes) | |||
| taskID, err := ctx.Cmdline.Svc.PackageSvc().StartUpdatingECPackage(0, packageID, objIter) | |||
| taskID, err := ctx.Cmdline.Svc.PackageSvc().StartUpdatingPackage(0, packageID, objIter) | |||
| if err != nil { | |||
| return fmt.Errorf("update package %d failed, err: %w", packageID, err) | |||
| } | |||
| for { | |||
| complete, _, err := ctx.Cmdline.Svc.PackageSvc().WaitUpdatingECPackage(taskID, time.Second*5) | |||
| complete, _, err := ctx.Cmdline.Svc.PackageSvc().WaitUpdatingPackage(taskID, time.Second*5) | |||
| if complete { | |||
| if err != nil { | |||
| return fmt.Errorf("updating ec package: %w", err) | |||
| return fmt.Errorf("updating package: %w", err) | |||
| } | |||
| return nil | |||
| @@ -283,8 +185,8 @@ func PackageUpdateECPackage(ctx CommandContext, packageID int64, rootPath string | |||
| } | |||
| } | |||
| func PackageDeletePackage(ctx CommandContext, packageID int64) error { | |||
| userID := int64(0) | |||
| func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error { | |||
| userID := cdssdk.UserID(0) | |||
| err := ctx.Cmdline.Svc.PackageSvc().DeletePackage(userID, packageID) | |||
| if err != nil { | |||
| return fmt.Errorf("delete package %d failed, err: %w", packageID, err) | |||
| @@ -292,7 +194,8 @@ func PackageDeletePackage(ctx CommandContext, packageID int64) error { | |||
| return nil | |||
| } | |||
| func PackageGetCachedNodes(ctx CommandContext, packageID int64, userID int64) error { | |||
| func PackageGetCachedNodes(ctx CommandContext, packageID cdssdk.PackageID) error { | |||
| userID := cdssdk.UserID(0) | |||
| resp, err := ctx.Cmdline.Svc.PackageSvc().GetCachedNodes(userID, packageID) | |||
| fmt.Printf("resp: %v\n", resp) | |||
| if err != nil { | |||
| @@ -301,7 +204,8 @@ func PackageGetCachedNodes(ctx CommandContext, packageID int64, userID int64) er | |||
| return nil | |||
| } | |||
| func PackageGetLoadedNodes(ctx CommandContext, packageID int64, userID int64) error { | |||
| func PackageGetLoadedNodes(ctx CommandContext, packageID cdssdk.PackageID) error { | |||
| userID := cdssdk.UserID(0) | |||
| nodeIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedNodes(userID, packageID) | |||
| fmt.Printf("nodeIDs: %v\n", nodeIDs) | |||
| if err != nil { | |||
| @@ -315,13 +219,9 @@ func init() { | |||
| commands.MustAdd(PackageDownloadPackage, "pkg", "get") | |||
| commands.MustAdd(PackageUploadRepPackage, "pkg", "new", "rep") | |||
| commands.MustAdd(PackageUpdateRepPackage, "pkg", "update", "rep") | |||
| commands.MustAdd(PackageUploadECPackage, "pkg", "new", "ec") | |||
| commands.MustAdd(PackageCreatePackage, "pkg", "new") | |||
| commands.MustAdd(PackageUpdateECPackage, "pkg", "update", "ec") | |||
| commands.MustAdd(PackageUpdatePackage, "pkg", "update") | |||
| commands.MustAdd(PackageDeletePackage, "pkg", "delete") | |||
| @@ -7,7 +7,7 @@ import ( | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| ) | |||
| func StorageLoadPackage(ctx CommandContext, packageID int64, storageID int64) error { | |||
| func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageID cdssdk.StorageID) error { | |||
| taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(0, packageID, storageID) | |||
| if err != nil { | |||
| return fmt.Errorf("start loading package to storage: %w", err) | |||
| @@ -30,11 +30,10 @@ func StorageLoadPackage(ctx CommandContext, packageID int64, storageID int64) er | |||
| } | |||
| } | |||
| func StorageCreateRepPackage(ctx CommandContext, bucketID int64, name string, storageID int64, path string, repCount int) error { | |||
| nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageCreatePackage(0, bucketID, name, storageID, path, | |||
| cdssdk.NewTypedRepRedundancyInfo(repCount), nil) | |||
| func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string) error { | |||
| nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageCreatePackage(0, bucketID, name, storageID, path, nil) | |||
| if err != nil { | |||
| return fmt.Errorf("start storage uploading rep package: %w", err) | |||
| return fmt.Errorf("start storage uploading package: %w", err) | |||
| } | |||
| for { | |||
| @@ -55,7 +54,7 @@ func StorageCreateRepPackage(ctx CommandContext, bucketID int64, name string, st | |||
| } | |||
| func init() { | |||
| commands.MustAdd(StorageLoadPackage, "stg", "load", "pkg") | |||
| commands.MustAdd(StorageLoadPackage, "stg", "pkg", "load") | |||
| commands.MustAdd(StorageCreateRepPackage, "stg", "upload", "rep") | |||
| commands.MustAdd(StorageCreatePackage, "stg", "pkg", "new") | |||
| } | |||
| @@ -21,13 +21,11 @@ func (s *Server) CacheSvc() *CacheService { | |||
| } | |||
| type CacheMovePackageReq struct { | |||
| UserID *int64 `json:"userID" binding:"required"` | |||
| PackageID *int64 `json:"packageID" binding:"required"` | |||
| NodeID *int64 `json:"nodeID" binding:"required"` | |||
| } | |||
| type CacheMovePackageResp struct { | |||
| CacheInfos []cdssdk.ObjectCacheInfo `json:"cacheInfos"` | |||
| UserID *cdssdk.UserID `json:"userID" binding:"required"` | |||
| PackageID *cdssdk.PackageID `json:"packageID" binding:"required"` | |||
| NodeID *cdssdk.NodeID `json:"nodeID" binding:"required"` | |||
| } | |||
| type CacheMovePackageResp = cdssdk.CacheMovePackageResp | |||
| func (s *CacheService) MovePackage(ctx *gin.Context) { | |||
| log := logger.WithField("HTTP", "Cache.LoadPackage") | |||
| @@ -47,7 +45,7 @@ func (s *CacheService) MovePackage(ctx *gin.Context) { | |||
| } | |||
| for { | |||
| complete, cacheInfos, err := s.svc.CacheSvc().WaitCacheMovePackage(*req.NodeID, taskID, time.Second*10) | |||
| complete, err := s.svc.CacheSvc().WaitCacheMovePackage(*req.NodeID, taskID, time.Second*10) | |||
| if complete { | |||
| if err != nil { | |||
| log.Warnf("moving complete with: %s", err.Error()) | |||
| @@ -55,9 +53,7 @@ func (s *CacheService) MovePackage(ctx *gin.Context) { | |||
| return | |||
| } | |||
| ctx.JSON(http.StatusOK, OK(CacheMovePackageResp{ | |||
| CacheInfos: cacheInfos, | |||
| })) | |||
| ctx.JSON(http.StatusOK, OK(CacheMovePackageResp{})) | |||
| return | |||
| } | |||
| @@ -68,30 +64,3 @@ func (s *CacheService) MovePackage(ctx *gin.Context) { | |||
| } | |||
| } | |||
| } | |||
| type CacheGetPackageObjectCacheInfosReq struct { | |||
| UserID *int64 `form:"userID" binding:"required"` | |||
| PackageID *int64 `form:"packageID" binding:"required"` | |||
| } | |||
| type CacheGetPackageObjectCacheInfosResp = cdssdk.CacheGetPackageObjectCacheInfosResp | |||
| func (s *CacheService) GetPackageObjectCacheInfos(ctx *gin.Context) { | |||
| log := logger.WithField("HTTP", "Cache.GetPackageObjectCacheInfos") | |||
| var req CacheGetPackageObjectCacheInfosReq | |||
| if err := ctx.ShouldBindQuery(&req); err != nil { | |||
| log.Warnf("binding body: %s", err.Error()) | |||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | |||
| return | |||
| } | |||
| infos, err := s.svc.CacheSvc().GetPackageObjectCacheInfos(*req.UserID, *req.PackageID) | |||
| if err != nil { | |||
| log.Warnf("getting package object cache infos: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package object cache infos failed")) | |||
| return | |||
| } | |||
| ctx.JSON(http.StatusOK, OK(CacheGetPackageObjectCacheInfosResp{Infos: infos})) | |||
| } | |||
| @@ -7,6 +7,7 @@ import ( | |||
| "github.com/gin-gonic/gin" | |||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| ) | |||
| @@ -21,8 +22,8 @@ func (s *Server) ObjectSvc() *ObjectService { | |||
| } | |||
| type ObjectDownloadReq struct { | |||
| UserID *int64 `form:"userID" binding:"required"` | |||
| ObjectID *int64 `form:"objectID" binding:"required"` | |||
| UserID *cdssdk.UserID `form:"userID" binding:"required"` | |||
| ObjectID *cdssdk.ObjectID `form:"objectID" binding:"required"` | |||
| } | |||
| func (s *ObjectService) Download(ctx *gin.Context) { | |||
| @@ -72,3 +73,29 @@ func (s *ObjectService) Download(ctx *gin.Context) { | |||
| return true | |||
| }) | |||
| } | |||
| type GetPackageObjectsReq struct { | |||
| UserID *cdssdk.UserID `form:"userID" binding:"required"` | |||
| PackageID *cdssdk.PackageID `form:"packageID" binding:"required"` | |||
| } | |||
| type GetPackageObjectsResp = cdssdk.ObjectGetPackageObjectsResp | |||
| func (s *ObjectService) GetPackageObjects(ctx *gin.Context) { | |||
| log := logger.WithField("HTTP", "Object.GetPackageObjects") | |||
| var req GetPackageObjectsReq | |||
| if err := ctx.ShouldBindQuery(&req); err != nil { | |||
| log.Warnf("binding body: %s", err.Error()) | |||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) | |||
| return | |||
| } | |||
| objs, err := s.svc.ObjectSvc().GetPackageObjects(*req.UserID, *req.PackageID) | |||
| if err != nil { | |||
| log.Warnf("getting package objects: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package object failed")) | |||
| return | |||
| } | |||
| ctx.JSON(http.StatusOK, OK(GetPackageObjectsResp{Objects: objs})) | |||
| } | |||
| @@ -26,8 +26,8 @@ func (s *Server) PackageSvc() *PackageService { | |||
| } | |||
| type PackageGetReq struct { | |||
| UserID *int64 `form:"userID" binding:"required"` | |||
| PackageID *int64 `form:"packageID" binding:"required"` | |||
| UserID *cdssdk.UserID `form:"userID" binding:"required"` | |||
| PackageID *cdssdk.PackageID `form:"packageID" binding:"required"` | |||
| } | |||
| type PackageGetResp struct { | |||
| model.Package | |||
| @@ -59,15 +59,14 @@ type PackageUploadReq struct { | |||
| } | |||
| type PackageUploadInfo struct { | |||
| UserID *int64 `json:"userID" binding:"required"` | |||
| BucketID *int64 `json:"bucketID" binding:"required"` | |||
| Name string `json:"name" binding:"required"` | |||
| Redundancy cdssdk.TypedRedundancyInfo `json:"redundancy" binding:"required"` | |||
| NodeAffinity *int64 `json:"nodeAffinity"` | |||
| UserID *cdssdk.UserID `json:"userID" binding:"required"` | |||
| BucketID *cdssdk.BucketID `json:"bucketID" binding:"required"` | |||
| Name string `json:"name" binding:"required"` | |||
| NodeAffinity *cdssdk.NodeID `json:"nodeAffinity"` | |||
| } | |||
| type PackageUploadResp struct { | |||
| PackageID int64 `json:"packageID,string"` | |||
| PackageID cdssdk.PackageID `json:"packageID,string"` | |||
| } | |||
| func (s *PackageService) Upload(ctx *gin.Context) { | |||
| @@ -80,77 +79,17 @@ func (s *PackageService) Upload(ctx *gin.Context) { | |||
| return | |||
| } | |||
| if req.Info.Redundancy.IsRepInfo() { | |||
| s.uploadRep(ctx, &req) | |||
| return | |||
| } | |||
| if req.Info.Redundancy.IsECInfo() { | |||
| s.uploadEC(ctx, &req) | |||
| return | |||
| } | |||
| ctx.JSON(http.StatusForbidden, Failed(errorcode.OperationFailed, "not supported yet")) | |||
| } | |||
| func (s *PackageService) uploadRep(ctx *gin.Context, req *PackageUploadReq) { | |||
| log := logger.WithField("HTTP", "Package.Upload") | |||
| var err error | |||
| var repInfo cdssdk.RepRedundancyInfo | |||
| if repInfo, err = req.Info.Redundancy.ToRepInfo(); err != nil { | |||
| log.Warnf("parsing rep redundancy config: %s", err.Error()) | |||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "invalid rep redundancy config")) | |||
| return | |||
| } | |||
| objIter := mapMultiPartFileToUploadingObject(req.Files) | |||
| taskID, err := s.svc.PackageSvc().StartCreatingRepPackage(*req.Info.UserID, *req.Info.BucketID, req.Info.Name, objIter, repInfo, req.Info.NodeAffinity) | |||
| if err != nil { | |||
| log.Warnf("start uploading rep package task: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "start uploading task failed")) | |||
| return | |||
| } | |||
| for { | |||
| complete, createResult, err := s.svc.PackageSvc().WaitCreatingRepPackage(taskID, time.Second*5) | |||
| if complete { | |||
| if err != nil { | |||
| log.Warnf("uploading rep package: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "uploading rep package failed")) | |||
| return | |||
| } | |||
| ctx.JSON(http.StatusOK, OK(PackageUploadResp{ | |||
| PackageID: createResult.PackageID, | |||
| })) | |||
| return | |||
| } | |||
| if err != nil { | |||
| log.Warnf("waiting task: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "wait uploading task failed")) | |||
| return | |||
| } | |||
| } | |||
| s.uploadEC(ctx, &req) | |||
| } | |||
| func (s *PackageService) uploadEC(ctx *gin.Context, req *PackageUploadReq) { | |||
| log := logger.WithField("HTTP", "Package.Upload") | |||
| var err error | |||
| var ecInfo cdssdk.ECRedundancyInfo | |||
| if ecInfo, err = req.Info.Redundancy.ToECInfo(); err != nil { | |||
| log.Warnf("parsing ec redundancy config: %s", err.Error()) | |||
| ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "invalid rep redundancy config")) | |||
| return | |||
| } | |||
| objIter := mapMultiPartFileToUploadingObject(req.Files) | |||
| taskID, err := s.svc.PackageSvc().StartCreatingECPackage(*req.Info.UserID, *req.Info.BucketID, req.Info.Name, objIter, ecInfo, req.Info.NodeAffinity) | |||
| taskID, err := s.svc.PackageSvc().StartCreatingPackage(*req.Info.UserID, *req.Info.BucketID, req.Info.Name, objIter, req.Info.NodeAffinity) | |||
| if err != nil { | |||
| log.Warnf("start uploading ec package task: %s", err.Error()) | |||
| @@ -159,7 +98,7 @@ func (s *PackageService) uploadEC(ctx *gin.Context, req *PackageUploadReq) { | |||
| } | |||
| for { | |||
| complete, createResult, err := s.svc.PackageSvc().WaitCreatingECPackage(taskID, time.Second*5) | |||
| complete, createResult, err := s.svc.PackageSvc().WaitCreatingPackage(taskID, time.Second*5) | |||
| if complete { | |||
| if err != nil { | |||
| log.Warnf("uploading ec package: %s", err.Error()) | |||
| @@ -182,8 +121,8 @@ func (s *PackageService) uploadEC(ctx *gin.Context, req *PackageUploadReq) { | |||
| } | |||
| type PackageDeleteReq struct { | |||
| UserID *int64 `json:"userID" binding:"required"` | |||
| PackageID *int64 `json:"packageID" binding:"required"` | |||
| UserID *cdssdk.UserID `json:"userID" binding:"required"` | |||
| PackageID *cdssdk.PackageID `json:"packageID" binding:"required"` | |||
| } | |||
| func (s *PackageService) Delete(ctx *gin.Context) { | |||
| @@ -207,8 +146,8 @@ func (s *PackageService) Delete(ctx *gin.Context) { | |||
| } | |||
| type GetCachedNodesReq struct { | |||
| UserID *int64 `json:"userID" binding:"required"` | |||
| PackageID *int64 `json:"packageID" binding:"required"` | |||
| UserID *cdssdk.UserID `json:"userID" binding:"required"` | |||
| PackageID *cdssdk.PackageID `json:"packageID" binding:"required"` | |||
| } | |||
| type GetCachedNodesResp struct { | |||
| cdssdk.PackageCachingInfo | |||
| @@ -235,12 +174,12 @@ func (s *PackageService) GetCachedNodes(ctx *gin.Context) { | |||
| } | |||
| type GetLoadedNodesReq struct { | |||
| UserID *int64 `json:"userID" binding:"required"` | |||
| PackageID *int64 `json:"packageID" binding:"required"` | |||
| UserID *cdssdk.UserID `json:"userID" binding:"required"` | |||
| PackageID *cdssdk.PackageID `json:"packageID" binding:"required"` | |||
| } | |||
| type GetLoadedNodesResp struct { | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| NodeIDs []cdssdk.NodeID `json:"nodeIDs"` | |||
| } | |||
| func (s *PackageService) GetLoadedNodes(ctx *gin.Context) { | |||
| @@ -3,6 +3,7 @@ package http | |||
| import ( | |||
| "github.com/gin-gonic/gin" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/client/internal/services" | |||
| ) | |||
| @@ -39,6 +40,7 @@ func (s *Server) Serve() error { | |||
| func (s *Server) initRouters() { | |||
| s.engine.GET("/object/download", s.ObjectSvc().Download) | |||
| s.engine.GET(cdssdk.ObjectGetPackageObjectsPath, s.ObjectSvc().GetPackageObjects) | |||
| s.engine.GET("/package/get", s.PackageSvc().Get) | |||
| s.engine.POST("/package/upload", s.PackageSvc().Upload) | |||
| @@ -50,6 +52,5 @@ func (s *Server) initRouters() { | |||
| s.engine.POST("/storage/createPackage", s.StorageSvc().CreatePackage) | |||
| s.engine.GET("/storage/getInfo", s.StorageSvc().GetInfo) | |||
| s.engine.POST("/cache/movePackage", s.CacheSvc().MovePackage) | |||
| s.engine.GET("/cache/getPackageObjectCacheInfos", s.CacheSvc().GetPackageObjectCacheInfos) | |||
| s.engine.POST(cdssdk.CacheMovePackagePath, s.CacheSvc().MovePackage) | |||
| } | |||
| @@ -21,9 +21,9 @@ func (s *Server) StorageSvc() *StorageService { | |||
| } | |||
| type StorageLoadPackageReq struct { | |||
| UserID *int64 `json:"userID" binding:"required"` | |||
| PackageID *int64 `json:"packageID" binding:"required"` | |||
| StorageID *int64 `json:"storageID" binding:"required"` | |||
| UserID *cdssdk.UserID `json:"userID" binding:"required"` | |||
| PackageID *cdssdk.PackageID `json:"packageID" binding:"required"` | |||
| StorageID *cdssdk.StorageID `json:"storageID" binding:"required"` | |||
| } | |||
| type StorageLoadPackageResp struct { | |||
| @@ -73,17 +73,16 @@ func (s *StorageService) LoadPackage(ctx *gin.Context) { | |||
| } | |||
| type StorageCreatePackageReq struct { | |||
| UserID *int64 `json:"userID" binding:"required"` | |||
| StorageID *int64 `json:"storageID" binding:"required"` | |||
| Path string `json:"path" binding:"required"` | |||
| BucketID *int64 `json:"bucketID" binding:"required"` | |||
| Name string `json:"name" binding:"required"` | |||
| Redundancy cdssdk.TypedRedundancyInfo `json:"redundancy" binding:"required"` | |||
| NodeAffinity *int64 `json:"nodeAffinity"` | |||
| UserID *cdssdk.UserID `json:"userID" binding:"required"` | |||
| StorageID *cdssdk.StorageID `json:"storageID" binding:"required"` | |||
| Path string `json:"path" binding:"required"` | |||
| BucketID *cdssdk.BucketID `json:"bucketID" binding:"required"` | |||
| Name string `json:"name" binding:"required"` | |||
| NodeAffinity *cdssdk.NodeID `json:"nodeAffinity"` | |||
| } | |||
| type StorageCreatePackageResp struct { | |||
| PackageID int64 `json:"packageID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| func (s *StorageService) CreatePackage(ctx *gin.Context) { | |||
| @@ -97,7 +96,7 @@ func (s *StorageService) CreatePackage(ctx *gin.Context) { | |||
| } | |||
| nodeID, taskID, err := s.svc.StorageSvc().StartStorageCreatePackage( | |||
| *req.UserID, *req.BucketID, req.Name, *req.StorageID, req.Path, req.Redundancy, req.NodeAffinity) | |||
| *req.UserID, *req.BucketID, req.Name, *req.StorageID, req.Path, req.NodeAffinity) | |||
| if err != nil { | |||
| log.Warnf("start storage create package: %s", err.Error()) | |||
| ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "storage create package failed")) | |||
| @@ -128,8 +127,8 @@ func (s *StorageService) CreatePackage(ctx *gin.Context) { | |||
| } | |||
| type StorageGetInfoReq struct { | |||
| UserID *int64 `form:"userID" binding:"required"` | |||
| StorageID *int64 `form:"storageID" binding:"required"` | |||
| UserID *cdssdk.UserID `form:"userID" binding:"required"` | |||
| StorageID *cdssdk.StorageID `form:"storageID" binding:"required"` | |||
| } | |||
| type StorageGetInfoResp struct { | |||
| @@ -3,6 +3,7 @@ package services | |||
| import ( | |||
| "fmt" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| @@ -17,12 +18,12 @@ func (svc *Service) BucketSvc() *BucketService { | |||
| return &BucketService{Service: svc} | |||
| } | |||
| func (svc *BucketService) GetBucket(userID int64, bucketID int64) (model.Bucket, error) { | |||
| func (svc *BucketService) GetBucket(userID cdssdk.UserID, bucketID cdssdk.BucketID) (model.Bucket, error) { | |||
| // TODO | |||
| panic("not implement yet") | |||
| } | |||
| func (svc *BucketService) GetUserBuckets(userID int64) ([]model.Bucket, error) { | |||
| func (svc *BucketService) GetUserBuckets(userID cdssdk.UserID) ([]model.Bucket, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -37,7 +38,7 @@ func (svc *BucketService) GetUserBuckets(userID int64) ([]model.Bucket, error) { | |||
| return resp.Buckets, nil | |||
| } | |||
| func (svc *BucketService) GetBucketPackages(userID int64, bucketID int64) ([]model.Package, error) { | |||
| func (svc *BucketService) GetBucketPackages(userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -52,7 +53,7 @@ func (svc *BucketService) GetBucketPackages(userID int64, bucketID int64) ([]mod | |||
| return resp.Packages, nil | |||
| } | |||
| func (svc *BucketService) CreateBucket(userID int64, bucketName string) (int64, error) { | |||
| func (svc *BucketService) CreateBucket(userID cdssdk.UserID, bucketName string) (cdssdk.BucketID, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -80,7 +81,7 @@ func (svc *BucketService) CreateBucket(userID int64, bucketName string) (int64, | |||
| return resp.BucketID, nil | |||
| } | |||
| func (svc *BucketService) DeleteBucket(userID int64, bucketID int64) error { | |||
| func (svc *BucketService) DeleteBucket(userID cdssdk.UserID, bucketID cdssdk.BucketID) error { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -8,7 +8,6 @@ import ( | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type CacheService struct { | |||
| @@ -19,7 +18,7 @@ func (svc *Service) CacheSvc() *CacheService { | |||
| return &CacheService{Service: svc} | |||
| } | |||
| func (svc *CacheService) StartCacheMovePackage(userID int64, packageID int64, nodeID int64) (string, error) { | |||
| func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, nodeID cdssdk.NodeID) (string, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(nodeID) | |||
| if err != nil { | |||
| return "", fmt.Errorf("new agent client: %w", err) | |||
| @@ -34,40 +33,25 @@ func (svc *CacheService) StartCacheMovePackage(userID int64, packageID int64, no | |||
| return startResp.TaskID, nil | |||
| } | |||
| func (svc *CacheService) WaitCacheMovePackage(nodeID int64, taskID string, waitTimeout time.Duration) (bool, []cdssdk.ObjectCacheInfo, error) { | |||
| func (svc *CacheService) WaitCacheMovePackage(nodeID cdssdk.NodeID, taskID string, waitTimeout time.Duration) (bool, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(nodeID) | |||
| if err != nil { | |||
| return true, nil, fmt.Errorf("new agent client: %w", err) | |||
| return true, fmt.Errorf("new agent client: %w", err) | |||
| } | |||
| defer stgglb.AgentMQPool.Release(agentCli) | |||
| waitResp, err := agentCli.WaitCacheMovePackage(agtmq.NewWaitCacheMovePackage(taskID, waitTimeout.Milliseconds())) | |||
| if err != nil { | |||
| return true, nil, fmt.Errorf("wait cache move package: %w", err) | |||
| return true, fmt.Errorf("wait cache move package: %w", err) | |||
| } | |||
| if !waitResp.IsComplete { | |||
| return false, nil, nil | |||
| return false, nil | |||
| } | |||
| if waitResp.Error != "" { | |||
| return true, nil, fmt.Errorf("%s", waitResp.Error) | |||
| return true, fmt.Errorf("%s", waitResp.Error) | |||
| } | |||
| return true, waitResp.CacheInfos, nil | |||
| } | |||
| func (svc *CacheService) GetPackageObjectCacheInfos(userID int64, packageID int64) ([]cdssdk.ObjectCacheInfo, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| getResp, err := coorCli.GetPackageObjectCacheInfos(coormq.NewGetPackageObjectCacheInfos(userID, packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("requesting to coodinator: %w", err) | |||
| } | |||
| return getResp.Infos, nil | |||
| return true, nil | |||
| } | |||
| @@ -4,6 +4,7 @@ import ( | |||
| "fmt" | |||
| "io" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| @@ -17,11 +18,11 @@ func (svc *Service) ObjectSvc() *ObjectService { | |||
| return &ObjectService{Service: svc} | |||
| } | |||
| func (svc *ObjectService) Download(userID int64, objectID int64) (io.ReadCloser, error) { | |||
| func (svc *ObjectService) Download(userID cdssdk.UserID, objectID cdssdk.ObjectID) (io.ReadCloser, error) { | |||
| panic("not implement yet!") | |||
| } | |||
| func (svc *ObjectService) GetPackageObjects(userID int64, packageID int64) ([]model.Object, error) { | |||
| func (svc *ObjectService) GetPackageObjects(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]model.Object, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -23,7 +23,7 @@ func (svc *Service) PackageSvc() *PackageService { | |||
| return &PackageService{Service: svc} | |||
| } | |||
| func (svc *PackageService) Get(userID int64, packageID int64) (*model.Package, error) { | |||
| func (svc *PackageService) Get(userID cdssdk.UserID, packageID cdssdk.PackageID) (*model.Package, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -38,7 +38,7 @@ func (svc *PackageService) Get(userID int64, packageID int64) (*model.Package, e | |||
| return &getResp.Package, nil | |||
| } | |||
| func (svc *PackageService) DownloadPackage(userID int64, packageID int64) (iterator.DownloadingObjectIterator, error) { | |||
| func (svc *PackageService) DownloadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID) (iterator.DownloadingObjectIterator, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -65,137 +65,50 @@ func (svc *PackageService) DownloadPackage(userID int64, packageID int64) (itera | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| getPkgResp, err := coorCli.GetPackage(coormq.NewGetPackage(userID, packageID)) | |||
| getObjsResp, err := coorCli.GetPackageObjectDetails(coormq.NewGetPackageObjectDetails(packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package: %w", err) | |||
| return nil, fmt.Errorf("getting package object details: %w", err) | |||
| } | |||
| getObjsResp, err := coorCli.GetPackageObjects(coormq.NewGetPackageObjects(userID, packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package objects: %w", err) | |||
| } | |||
| if getPkgResp.Redundancy.IsRepInfo() { | |||
| iter, err := svc.downloadRepPackage(packageID, getObjsResp.Objects, coorCli) | |||
| if err != nil { | |||
| mutex.Unlock() | |||
| return nil, err | |||
| } | |||
| iter.OnClosing = func() { | |||
| mutex.Unlock() | |||
| } | |||
| return iter, nil | |||
| } else { | |||
| iter, err := svc.downloadECPackage(getPkgResp.Package, getObjsResp.Objects, coorCli) | |||
| if err != nil { | |||
| mutex.Unlock() | |||
| return nil, err | |||
| } | |||
| iter.OnClosing = func() { | |||
| mutex.Unlock() | |||
| } | |||
| return iter, nil | |||
| } | |||
| } | |||
| func (svc *PackageService) downloadRepPackage(packageID int64, objects []model.Object, coorCli *coormq.Client) (*iterator.RepObjectIterator, error) { | |||
| getObjRepDataResp, err := coorCli.GetPackageObjectRepData(coormq.NewGetPackageObjectRepData(packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package object rep data: %w", err) | |||
| } | |||
| iter := iterator.NewRepObjectIterator(objects, getObjRepDataResp.Data, &iterator.DownloadContext{ | |||
| iter := iterator.NewDownloadObjectIterator(getObjsResp.Objects, &iterator.DownloadContext{ | |||
| Distlock: svc.DistLock, | |||
| }) | |||
| return iter, nil | |||
| } | |||
| func (svc *PackageService) downloadECPackage(pkg model.Package, objects []model.Object, coorCli *coormq.Client) (*iterator.ECObjectIterator, error) { | |||
| getObjECDataResp, err := coorCli.GetPackageObjectECData(coormq.NewGetPackageObjectECData(pkg.PackageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package object ec data: %w", err) | |||
| } | |||
| var ecInfo cdssdk.ECRedundancyInfo | |||
| if ecInfo, err = pkg.Redundancy.ToECInfo(); err != nil { | |||
| return nil, fmt.Errorf("get ec redundancy info: %w", err) | |||
| } | |||
| getECResp, err := coorCli.GetECConfig(coormq.NewGetECConfig(ecInfo.ECName)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting ec: %w", err) | |||
| iter.OnClosing = func() { | |||
| mutex.Unlock() | |||
| } | |||
| iter := iterator.NewECObjectIterator(objects, getObjECDataResp.Data, ecInfo, getECResp.Config, &iterator.DownloadContext{ | |||
| Distlock: svc.DistLock, | |||
| }) | |||
| return iter, nil | |||
| } | |||
| func (svc *PackageService) StartCreatingRepPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, repInfo cdssdk.RepRedundancyInfo, nodeAffinity *int64) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewCreateRepPackage(userID, bucketID, name, objIter, repInfo, nodeAffinity)) | |||
| return tsk.ID(), nil | |||
| } | |||
| func (svc *PackageService) WaitCreatingRepPackage(taskID string, waitTimeout time.Duration) (bool, *mytask.CreateRepPackageResult, error) { | |||
| tsk := svc.TaskMgr.FindByID(taskID) | |||
| if tsk.WaitTimeout(waitTimeout) { | |||
| cteatePkgTask := tsk.Body().(*mytask.CreateRepPackage) | |||
| return true, cteatePkgTask.Result, tsk.Error() | |||
| } | |||
| return false, nil, nil | |||
| } | |||
| func (svc *PackageService) StartUpdatingRepPackage(userID int64, packageID int64, objIter iterator.UploadingObjectIterator) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewUpdateRepPackage(userID, packageID, objIter)) | |||
| return tsk.ID(), nil | |||
| } | |||
| func (svc *PackageService) WaitUpdatingRepPackage(taskID string, waitTimeout time.Duration) (bool, *agtcmd.UpdateRepPackageResult, error) { | |||
| tsk := svc.TaskMgr.FindByID(taskID) | |||
| if tsk.WaitTimeout(waitTimeout) { | |||
| updatePkgTask := tsk.Body().(*mytask.UpdateRepPackage) | |||
| return true, updatePkgTask.Result, tsk.Error() | |||
| } | |||
| return false, nil, nil | |||
| } | |||
| func (svc *PackageService) StartCreatingECPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, ecInfo cdssdk.ECRedundancyInfo, nodeAffinity *int64) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewCreateECPackage(userID, bucketID, name, objIter, ecInfo, nodeAffinity)) | |||
| func (svc *PackageService) StartCreatingPackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewCreatePackage(userID, bucketID, name, objIter, nodeAffinity)) | |||
| return tsk.ID(), nil | |||
| } | |||
| func (svc *PackageService) WaitCreatingECPackage(taskID string, waitTimeout time.Duration) (bool, *agtcmd.CreateECPackageResult, error) { | |||
| func (svc *PackageService) WaitCreatingPackage(taskID string, waitTimeout time.Duration) (bool, *agtcmd.CreatePackageResult, error) { | |||
| tsk := svc.TaskMgr.FindByID(taskID) | |||
| if tsk.WaitTimeout(waitTimeout) { | |||
| cteatePkgTask := tsk.Body().(*mytask.CreateECPackage) | |||
| cteatePkgTask := tsk.Body().(*mytask.CreatePackage) | |||
| return true, cteatePkgTask.Result, tsk.Error() | |||
| } | |||
| return false, nil, nil | |||
| } | |||
| func (svc *PackageService) StartUpdatingECPackage(userID int64, packageID int64, objIter iterator.UploadingObjectIterator) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewUpdateECPackage(userID, packageID, objIter)) | |||
| func (svc *PackageService) StartUpdatingPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(mytask.NewUpdatePackage(userID, packageID, objIter)) | |||
| return tsk.ID(), nil | |||
| } | |||
| func (svc *PackageService) WaitUpdatingECPackage(taskID string, waitTimeout time.Duration) (bool, *agtcmd.UpdateECPackageResult, error) { | |||
| func (svc *PackageService) WaitUpdatingPackage(taskID string, waitTimeout time.Duration) (bool, *agtcmd.UpdatePackageResult, error) { | |||
| tsk := svc.TaskMgr.FindByID(taskID) | |||
| if tsk.WaitTimeout(waitTimeout) { | |||
| updatePkgTask := tsk.Body().(*mytask.UpdateECPackage) | |||
| updatePkgTask := tsk.Body().(*mytask.UpdatePackage) | |||
| return true, updatePkgTask.Result, tsk.Error() | |||
| } | |||
| return false, nil, nil | |||
| } | |||
| func (svc *PackageService) DeletePackage(userID int64, packageID int64) error { | |||
| func (svc *PackageService) DeletePackage(userID cdssdk.UserID, packageID cdssdk.PackageID) error { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -230,7 +143,7 @@ func (svc *PackageService) DeletePackage(userID int64, packageID int64) error { | |||
| return nil | |||
| } | |||
| func (svc *PackageService) GetCachedNodes(userID int64, packageID int64) (cdssdk.PackageCachingInfo, error) { | |||
| func (svc *PackageService) GetCachedNodes(userID cdssdk.UserID, packageID cdssdk.PackageID) (cdssdk.PackageCachingInfo, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return cdssdk.PackageCachingInfo{}, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -243,14 +156,13 @@ func (svc *PackageService) GetCachedNodes(userID int64, packageID int64) (cdssdk | |||
| } | |||
| tmp := cdssdk.PackageCachingInfo{ | |||
| NodeInfos: resp.NodeInfos, | |||
| PackageSize: resp.PackageSize, | |||
| RedunancyType: resp.RedunancyType, | |||
| NodeInfos: resp.NodeInfos, | |||
| PackageSize: resp.PackageSize, | |||
| } | |||
| return tmp, nil | |||
| } | |||
| func (svc *PackageService) GetLoadedNodes(userID int64, packageID int64) ([]int64, error) { | |||
| func (svc *PackageService) GetLoadedNodes(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]cdssdk.NodeID, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -21,7 +21,7 @@ func (svc *Service) StorageSvc() *StorageService { | |||
| return &StorageService{Service: svc} | |||
| } | |||
| func (svc *StorageService) StartStorageLoadPackage(userID int64, packageID int64, storageID int64) (string, error) { | |||
| func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) (string, error) { | |||
| tsk := svc.TaskMgr.StartNew(task.NewStorageLoadPackage(userID, packageID, storageID)) | |||
| return tsk.ID(), nil | |||
| } | |||
| @@ -41,7 +41,7 @@ func (svc *StorageService) DeleteStoragePackage(userID int64, packageID int64, s | |||
| } | |||
| // 请求节点启动从Storage中上传文件的任务。会返回节点ID和任务ID | |||
| func (svc *StorageService) StartStorageCreatePackage(userID int64, bucketID int64, name string, storageID int64, path string, redundancy cdssdk.TypedRedundancyInfo, nodeAffinity *int64) (int64, string, error) { | |||
| func (svc *StorageService) StartStorageCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string, nodeAffinity *cdssdk.NodeID) (cdssdk.NodeID, string, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -59,7 +59,7 @@ func (svc *StorageService) StartStorageCreatePackage(userID int64, bucketID int6 | |||
| } | |||
| defer stgglb.AgentMQPool.Release(agentCli) | |||
| startResp, err := agentCli.StartStorageCreatePackage(agtmq.NewStartStorageCreatePackage(userID, bucketID, name, storageID, path, redundancy, nodeAffinity)) | |||
| startResp, err := agentCli.StartStorageCreatePackage(agtmq.NewStartStorageCreatePackage(userID, bucketID, name, storageID, path, nodeAffinity)) | |||
| if err != nil { | |||
| return 0, "", fmt.Errorf("start storage upload package: %w", err) | |||
| } | |||
| @@ -67,7 +67,7 @@ func (svc *StorageService) StartStorageCreatePackage(userID int64, bucketID int6 | |||
| return stgResp.NodeID, startResp.TaskID, nil | |||
| } | |||
| func (svc *StorageService) WaitStorageCreatePackage(nodeID int64, taskID string, waitTimeout time.Duration) (bool, int64, error) { | |||
| func (svc *StorageService) WaitStorageCreatePackage(nodeID cdssdk.NodeID, taskID string, waitTimeout time.Duration) (bool, cdssdk.PackageID, error) { | |||
| agentCli, err := stgglb.AgentMQPool.Acquire(nodeID) | |||
| if err != nil { | |||
| // TODO 失败是否要当做任务已经结束? | |||
| @@ -92,7 +92,7 @@ func (svc *StorageService) WaitStorageCreatePackage(nodeID int64, taskID string, | |||
| return true, waitResp.PackageID, nil | |||
| } | |||
| func (svc *StorageService) GetInfo(userID int64, storageID int64) (*model.Storage, error) { | |||
| func (svc *StorageService) GetInfo(userID cdssdk.UserID, storageID cdssdk.StorageID) (*model.Storage, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| @@ -1,35 +0,0 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type CreateECPackageResult = cmd.CreateECPackageResult | |||
| type CreateECPackage struct { | |||
| cmd cmd.CreateECPackage | |||
| Result *CreateECPackageResult | |||
| } | |||
| func NewCreateECPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy cdssdk.ECRedundancyInfo, nodeAffinity *int64) *CreateECPackage { | |||
| return &CreateECPackage{ | |||
| cmd: *cmd.NewCreateECPackage(userID, bucketID, name, objIter, redundancy, nodeAffinity), | |||
| } | |||
| } | |||
| func (t *CreateECPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -0,0 +1,35 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type CreatePackageResult = cmd.CreatePackageResult | |||
| type CreatePackage struct { | |||
| cmd cmd.CreatePackage | |||
| Result *CreatePackageResult | |||
| } | |||
| func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage { | |||
| return &CreatePackage{ | |||
| cmd: *cmd.NewCreatePackage(userID, bucketID, name, objIter, nodeAffinity), | |||
| } | |||
| } | |||
| func (t *CreatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -1,35 +0,0 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type CreateRepPackageResult = cmd.CreateRepPackageResult | |||
| type CreateRepPackage struct { | |||
| cmd cmd.CreateRepPackage | |||
| Result *CreateRepPackageResult | |||
| } | |||
| func NewCreateRepPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy cdssdk.RepRedundancyInfo, nodeAffinity *int64) *CreateRepPackage { | |||
| return &CreateRepPackage{ | |||
| cmd: *cmd.NewCreateRepPackage(userID, bucketID, name, objIter, redundancy, nodeAffinity), | |||
| } | |||
| } | |||
| func (t *CreateRepPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -5,6 +5,7 @@ import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" | |||
| @@ -13,14 +14,14 @@ import ( | |||
| // TODO 可以考虑不用Task来实现这些逻辑 | |||
| type StorageLoadPackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| storageID int64 | |||
| userID cdssdk.UserID | |||
| packageID cdssdk.PackageID | |||
| storageID cdssdk.StorageID | |||
| ResultFullPath string | |||
| } | |||
| func NewStorageLoadPackage(userID int64, packageID int64, storageID int64) *StorageLoadPackage { | |||
| func NewStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StorageLoadPackage { | |||
| return &StorageLoadPackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| @@ -39,7 +40,7 @@ func (t *StorageLoadPackage) do(ctx TaskContext) error { | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| Metadata(). | |||
| // 用于判断用户是否有Storage权限 | |||
| UserStorage().ReadOne(t.packageID, t.storageID). | |||
| UserStorage().ReadOne(t.userID, t.storageID). | |||
| // 用于判断用户是否有对象权限 | |||
| UserBucket().ReadAny(). | |||
| // 用于读取包信息 | |||
| @@ -105,7 +106,7 @@ func (t *StorageLoadPackage) do(ctx TaskContext) error { | |||
| } | |||
| } | |||
| _, err = coorCli.StoragePackageLoaded(coormq.NewStoragePackageLoaded(t.userID, t.packageID, t.storageID)) | |||
| _, err = coorCli.StoragePackageLoaded(coormq.NewStoragePackageLoaded(t.userID, t.storageID, t.packageID)) | |||
| if err != nil { | |||
| return fmt.Errorf("loading package to storage: %w", err) | |||
| } | |||
| @@ -1,35 +0,0 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type UpdateECPackageResult = cmd.UpdateECPackageResult | |||
| type UpdateECPackage struct { | |||
| cmd cmd.UpdateECPackage | |||
| Result *UpdateECPackageResult | |||
| } | |||
| func NewUpdateECPackage(userID int64, packageID int64, objectIter iterator.UploadingObjectIterator) *UpdateECPackage { | |||
| return &UpdateECPackage{ | |||
| cmd: *cmd.NewUpdateECPackage(userID, packageID, objectIter), | |||
| } | |||
| } | |||
| func (t *UpdateECPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -0,0 +1,36 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type UpdatePackageResult = cmd.UpdatePackageResult | |||
| type UpdatePackage struct { | |||
| cmd cmd.UpdatePackage | |||
| Result *UpdatePackageResult | |||
| } | |||
| func NewUpdatePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator) *UpdatePackage { | |||
| return &UpdatePackage{ | |||
| cmd: *cmd.NewUpdatePackage(userID, packageID, objectIter), | |||
| } | |||
| } | |||
| func (t *UpdatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -1,35 +0,0 @@ | |||
| package task | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/cmd" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| ) | |||
| type UpdateRepPackageResult = cmd.UpdateRepPackageResult | |||
| type UpdateRepPackage struct { | |||
| cmd cmd.UpdateRepPackage | |||
| Result *UpdateRepPackageResult | |||
| } | |||
| func NewUpdateRepPackage(userID int64, packageID int64, objectIter iterator.UploadingObjectIterator) *UpdateRepPackage { | |||
| return &UpdateRepPackage{ | |||
| cmd: *cmd.NewUpdateRepPackage(userID, packageID, objectIter), | |||
| } | |||
| } | |||
| func (t *UpdateRepPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{ | |||
| Distlock: ctx.distlock, | |||
| }) | |||
| t.Result = ret | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -37,13 +37,15 @@ values | |||
| 5010, | |||
| 1, | |||
| "alive" | |||
| ) create table Storage ( | |||
| StorageID int not null auto_increment primary key comment '存储服务ID', | |||
| Name varchar(100) not null comment '存储服务名称', | |||
| NodeID int not null comment '存储服务所在节点的ID', | |||
| Directory varchar(4096) not null comment '存储服务所在节点的目录', | |||
| State varchar(100) comment '状态' | |||
| ) comment = "存储服务表"; | |||
| ); | |||
| create table Storage ( | |||
| StorageID int not null auto_increment primary key comment '存储服务ID', | |||
| Name varchar(100) not null comment '存储服务名称', | |||
| NodeID int not null comment '存储服务所在节点的ID', | |||
| Directory varchar(4096) not null comment '存储服务所在节点的目录', | |||
| State varchar(100) comment '状态' | |||
| ) comment = "存储服务表"; | |||
| insert into | |||
| Storage (StorageID, Name, NodeID, Directory, State) | |||
| @@ -110,8 +112,7 @@ create table Package ( | |||
| PackageID int not null auto_increment primary key comment '包ID', | |||
| Name varchar(100) not null comment '对象名', | |||
| BucketID int not null comment '桶ID', | |||
| State varchar(100) not null comment '状态', | |||
| Redundancy JSON not null comment '冗余策略' | |||
| State varchar(100) not null comment '状态' | |||
| ); | |||
| create table Object ( | |||
| @@ -119,36 +120,42 @@ create table Object ( | |||
| PackageID int not null comment '包ID', | |||
| Path varchar(500) not null comment '对象路径', | |||
| Size bigint not null comment '对象大小(Byte)', | |||
| FileHash varchar(100) not null comment '完整对象的FileHash', | |||
| Redundancy JSON not null comment '冗余策略', | |||
| UNIQUE KEY PackagePath (PackageID, Path) | |||
| ) comment = '对象表'; | |||
| create table ObjectRep ( | |||
| ObjectID int not null primary key comment '对象ID', | |||
| FileHash varchar(100) not null comment '副本哈希值' | |||
| ) comment = '对象副本表'; | |||
| create table ObjectBlock ( | |||
| ObjectID int not null comment '对象ID', | |||
| `Index` int not null comment '编码块在条带内的排序', | |||
| NodeID int not null comment '此编码块应该存在的节点', | |||
| FileHash varchar(100) not null comment '编码块哈希值', | |||
| primary key(ObjectID, `Index`) | |||
| primary key(ObjectID, `Index`, NodeID) | |||
| ) comment = '对象编码块表'; | |||
| create table Cache ( | |||
| FileHash varchar(100) not null comment '编码块块ID', | |||
| NodeID int not null comment '节点ID', | |||
| State varchar(100) not null comment '状态', | |||
| CacheTime timestamp not null comment '缓存时间', | |||
| FrozenTime timestamp comment '文件被冻结的时间', | |||
| CreateTime timestamp not null comment '缓存时间', | |||
| Priority int not null comment '编码块优先级', | |||
| primary key(FileHash, NodeID) | |||
| ) comment = '缓存表'; | |||
| create table StoragePackage ( | |||
| PackageID int not null comment '包ID', | |||
| StorageID int not null comment '存储服务ID', | |||
| PackageID int not null comment '包ID', | |||
| UserID int not null comment '调度了此文件的用户ID', | |||
| State varchar(100) not null comment '包状态', | |||
| primary key(PackageID, StorageID, UserID) | |||
| primary key(StorageID, PackageID, UserID) | |||
| ); | |||
| create table StoragePackageLog ( | |||
| PackageID int not null comment '包ID', | |||
| StorageID int not null comment '存储服务ID', | |||
| UserID int not null comment '调度了此文件的用户ID', | |||
| CreateTime timestamp not null comment '加载Package完成的时间' | |||
| ); | |||
| create table Location ( | |||
| @@ -159,21 +166,4 @@ create table Location ( | |||
| insert into | |||
| Location (LocationID, Name) | |||
| values | |||
| (1, "Local"); | |||
| create table Ec ( | |||
| EcID int not null primary key comment '纠删码ID', | |||
| Name varchar(128) not null comment '纠删码名称', | |||
| EcK int not null comment 'ecK', | |||
| EcN int not null comment 'ecN' | |||
| ) comment = '纠删码表'; | |||
| insert into | |||
| Ec (EcID, Name, EcK, EcN) | |||
| values | |||
| (1, "rs_9_6", 6, 9); | |||
| insert into | |||
| Ec (EcID, Name, EcK, EcN) | |||
| values | |||
| (2, "rs_5_3", 3, 5); | |||
| (1, "Local"); | |||
| @@ -10,17 +10,6 @@ const ( | |||
| NodeStateUnavailable = "Unavailable" | |||
| ) | |||
| const ( | |||
| PackageStateNormal = "Normal" | |||
| PackageStateDeleted = "Deleted" | |||
| ) | |||
| const ( | |||
| StoragePackageStateNormal = "Normal" | |||
| StoragePackageStateDeleted = "Deleted" | |||
| StoragePackageStateOutdated = "Outdated" | |||
| ) | |||
| const ( | |||
| CacheStatePinned = "Pinned" | |||
| CacheStateTemp = "Temp" | |||
| @@ -1,67 +1,51 @@ | |||
| package stgmod | |||
| import "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| /// TODO 将分散在各处的公共结构体定义集中到这里来 | |||
| type EC struct { | |||
| ID int64 `json:"id"` | |||
| K int `json:"k"` | |||
| N int `json:"n"` | |||
| ChunkSize int `json:"chunkSize"` | |||
| } | |||
| func NewEc(id int64, k int, n int, chunkSize int) EC { | |||
| return EC{ | |||
| ID: id, | |||
| K: k, | |||
| N: n, | |||
| ChunkSize: chunkSize, | |||
| } | |||
| } | |||
| type ObjectBlockData struct { | |||
| Index int `json:"index"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| func NewObjectBlockData(index int, fileHash string, nodeIDs []int64) ObjectBlockData { | |||
| return ObjectBlockData{ | |||
| Index: index, | |||
| FileHash: fileHash, | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| } | |||
| type ObjectRepData struct { | |||
| Object model.Object `json:"object"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| func NewObjectRepData(object model.Object, fileHash string, nodeIDs []int64) ObjectRepData { | |||
| return ObjectRepData{ | |||
| Object: object, | |||
| FileHash: fileHash, | |||
| NodeIDs: nodeIDs, | |||
| import ( | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| ) | |||
| type ObjectBlock struct { | |||
| ObjectID cdssdk.ObjectID `db:"ObjectID" json:"objectID"` | |||
| Index int `db:"Index" json:"index"` | |||
| NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"` // 这个块应该在哪个节点上 | |||
| FileHash string `db:"FileHash" json:"fileHash"` | |||
| } | |||
| type ObjectBlockDetail struct { | |||
| ObjectID cdssdk.ObjectID `json:"objectID"` | |||
| Index int `json:"index"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeIDs []cdssdk.NodeID `json:"nodeID"` // 这个块应该在哪些节点上 | |||
| CachedNodeIDs []cdssdk.NodeID `json:"cachedNodeIDs"` // 哪些节点实际缓存了这个块 | |||
| } | |||
| func NewObjectBlockDetail(objID cdssdk.ObjectID, index int, fileHash string, nodeIDs []cdssdk.NodeID, cachedNodeIDs []cdssdk.NodeID) ObjectBlockDetail { | |||
| return ObjectBlockDetail{ | |||
| ObjectID: objID, | |||
| Index: index, | |||
| FileHash: fileHash, | |||
| NodeIDs: nodeIDs, | |||
| CachedNodeIDs: cachedNodeIDs, | |||
| } | |||
| } | |||
| type ObjectECData struct { | |||
| Object model.Object `json:"object"` | |||
| Blocks []ObjectBlockData `json:"blocks"` | |||
| type ObjectDetail struct { | |||
| Object cdssdk.Object `json:"object"` | |||
| CachedNodeIDs []cdssdk.NodeID `json:"cachedNodeIDs"` // 文件的完整数据在哪些节点上缓存 | |||
| Blocks []ObjectBlockDetail `json:"blocks"` | |||
| } | |||
| func NewObjectECData(object model.Object, blocks []ObjectBlockData) ObjectECData { | |||
| return ObjectECData{ | |||
| Object: object, | |||
| Blocks: blocks, | |||
| func NewObjectDetail(object cdssdk.Object, cachedNodeIDs []cdssdk.NodeID, blocks []ObjectBlockDetail) ObjectDetail { | |||
| return ObjectDetail{ | |||
| Object: object, | |||
| CachedNodeIDs: cachedNodeIDs, | |||
| Blocks: blocks, | |||
| } | |||
| } | |||
| type LocalMachineInfo struct { | |||
| NodeID *int64 `json:"nodeID"` | |||
| ExternalIP string `json:"externalIP"` | |||
| LocalIP string `json:"localIP"` | |||
| NodeID *cdssdk.NodeID `json:"nodeID"` | |||
| ExternalIP string `json:"externalIP"` | |||
| LocalIP string `json:"localIP"` | |||
| LocationID cdssdk.LocationID `json:"locationID"` | |||
| } | |||
| @@ -1,248 +0,0 @@ | |||
| package cmd | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "sync" | |||
| "github.com/samber/lo" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ec" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type CreateECPackage struct { | |||
| userID int64 | |||
| bucketID int64 | |||
| name string | |||
| objectIter iterator.UploadingObjectIterator | |||
| redundancy cdssdk.ECRedundancyInfo | |||
| nodeAffinity *int64 | |||
| } | |||
| type CreateECPackageResult struct { | |||
| PackageID int64 | |||
| ObjectResults []ECObjectUploadResult | |||
| } | |||
| type ECObjectUploadResult struct { | |||
| Info *iterator.IterUploadingObject | |||
| Error error | |||
| ObjectID int64 | |||
| } | |||
| func NewCreateECPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy cdssdk.ECRedundancyInfo, nodeAffinity *int64) *CreateECPackage { | |||
| return &CreateECPackage{ | |||
| userID: userID, | |||
| bucketID: bucketID, | |||
| name: name, | |||
| objectIter: objIter, | |||
| redundancy: redundancy, | |||
| nodeAffinity: nodeAffinity, | |||
| } | |||
| } | |||
| func (t *CreateECPackage) Execute(ctx *UpdatePackageContext) (*CreateECPackageResult, error) { | |||
| defer t.objectIter.Close() | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| Metadata(). | |||
| // 用于判断用户是否有桶的权限 | |||
| UserBucket().ReadOne(t.userID, t.bucketID). | |||
| // 用于查询可用的上传节点 | |||
| Node().ReadAny(). | |||
| // 用于创建包信息 | |||
| Package().CreateOne(t.bucketID, t.name). | |||
| // 用于创建包中的文件的信息 | |||
| Object().CreateAny(). | |||
| // 用于设置EC配置 | |||
| ObjectBlock().CreateAny(). | |||
| // 用于创建Cache记录 | |||
| Cache().CreateAny(). | |||
| MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| createPkgResp, err := coorCli.CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name, | |||
| cdssdk.NewTypedRedundancyInfo(t.redundancy))) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating package: %w", err) | |||
| } | |||
| getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(stgglb.Local.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| uploadNodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| return UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| } | |||
| }) | |||
| getECResp, err := coorCli.GetECConfig(coormq.NewGetECConfig(t.redundancy.ECName)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting ec: %w", err) | |||
| } | |||
| // 给上传节点的IPFS加锁 | |||
| ipfsReqBlder := reqbuilder.NewBuilder() | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if stgglb.Local.NodeID != nil { | |||
| ipfsReqBlder.IPFS().CreateAnyRep(*stgglb.Local.NodeID) | |||
| } | |||
| for _, node := range uploadNodeInfos { | |||
| if stgglb.Local.NodeID != nil && node.Node.NodeID == *stgglb.Local.NodeID { | |||
| continue | |||
| } | |||
| ipfsReqBlder.IPFS().CreateAnyRep(node.Node.NodeID) | |||
| } | |||
| // 防止上传的副本被清除 | |||
| ipfsMutex, err := ipfsReqBlder.MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer ipfsMutex.Unlock() | |||
| // TODO 需要支持设置节点亲和性 | |||
| rets, err := uploadAndUpdateECPackage(createPkgResp.PackageID, t.objectIter, uploadNodeInfos, t.redundancy, getECResp.Config) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &CreateECPackageResult{ | |||
| PackageID: createPkgResp.PackageID, | |||
| ObjectResults: rets, | |||
| }, nil | |||
| } | |||
| func uploadAndUpdateECPackage(packageID int64, objectIter iterator.UploadingObjectIterator, uploadNodes []UploadNodeInfo, ecInfo cdssdk.ECRedundancyInfo, ec model.Ec) ([]ECObjectUploadResult, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| var uploadRets []ECObjectUploadResult | |||
| //上传文件夹 | |||
| var adds []coormq.AddECObjectInfo | |||
| for { | |||
| objInfo, err := objectIter.MoveNext() | |||
| if err == iterator.ErrNoMoreItem { | |||
| break | |||
| } | |||
| if err != nil { | |||
| return nil, fmt.Errorf("reading object: %w", err) | |||
| } | |||
| err = func() error { | |||
| defer objInfo.File.Close() | |||
| fileHashes, uploadedNodeIDs, err := uploadECObject(objInfo, uploadNodes, ecInfo, ec) | |||
| uploadRets = append(uploadRets, ECObjectUploadResult{ | |||
| Info: objInfo, | |||
| Error: err, | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("uploading object: %w", err) | |||
| } | |||
| adds = append(adds, coormq.NewAddECObjectInfo(objInfo.Path, objInfo.Size, fileHashes, uploadedNodeIDs)) | |||
| return nil | |||
| }() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| _, err = coorCli.UpdateECPackage(coormq.NewUpdateECPackage(packageID, adds, nil)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("updating package: %w", err) | |||
| } | |||
| return uploadRets, nil | |||
| } | |||
| // 上传文件 | |||
| func uploadECObject(obj *iterator.IterUploadingObject, uploadNodes []UploadNodeInfo, ecInfo cdssdk.ECRedundancyInfo, ecMod model.Ec) ([]string, []int64, error) { | |||
| uploadNodes = shuffleNodes(uploadNodes, ecMod.EcN) | |||
| rs, err := ec.NewRs(ecMod.EcK, ecMod.EcN, ecInfo.ChunkSize) | |||
| if err != nil { | |||
| return nil, nil, err | |||
| } | |||
| outputs := myio.ChunkedSplit(obj.File, ecInfo.ChunkSize, ecMod.EcK, myio.ChunkedSplitOption{ | |||
| PaddingZeros: true, | |||
| }) | |||
| var readers []io.Reader | |||
| for _, o := range outputs { | |||
| readers = append(readers, o) | |||
| } | |||
| defer func() { | |||
| for _, o := range outputs { | |||
| o.Close() | |||
| } | |||
| }() | |||
| encStrs := rs.EncodeAll(readers) | |||
| wg := sync.WaitGroup{} | |||
| nodeIDs := make([]int64, ecMod.EcN) | |||
| fileHashes := make([]string, ecMod.EcN) | |||
| anyErrs := make([]error, ecMod.EcN) | |||
| for i := range encStrs { | |||
| idx := i | |||
| wg.Add(1) | |||
| nodeIDs[idx] = uploadNodes[idx].Node.NodeID | |||
| go func() { | |||
| defer wg.Done() | |||
| fileHashes[idx], anyErrs[idx] = uploadFile(encStrs[idx], uploadNodes[idx]) | |||
| }() | |||
| } | |||
| wg.Wait() | |||
| for i, e := range anyErrs { | |||
| if e != nil { | |||
| return nil, nil, fmt.Errorf("uploading file to node %d: %w", uploadNodes[i].Node.NodeID, e) | |||
| } | |||
| } | |||
| return fileHashes, nodeIDs, nil | |||
| } | |||
| func shuffleNodes(uploadNodes []UploadNodeInfo, extendTo int) []UploadNodeInfo { | |||
| for i := len(uploadNodes); i < extendTo; i++ { | |||
| uploadNodes = append(uploadNodes, uploadNodes[rand.Intn(len(uploadNodes))]) | |||
| } | |||
| // 随机排列上传节点 | |||
| rand.Shuffle(len(uploadNodes), func(i, j int) { | |||
| uploadNodes[i], uploadNodes[j] = uploadNodes[j], uploadNodes[i] | |||
| }) | |||
| return uploadNodes | |||
| } | |||
| @@ -7,6 +7,7 @@ import ( | |||
| "time" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| @@ -19,48 +20,46 @@ import ( | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type UploadNodeInfo struct { | |||
| Node model.Node | |||
| IsSameLocation bool | |||
| } | |||
| type CreateRepPackage struct { | |||
| userID int64 | |||
| bucketID int64 | |||
| type CreatePackage struct { | |||
| userID cdssdk.UserID | |||
| bucketID cdssdk.BucketID | |||
| name string | |||
| objectIter iterator.UploadingObjectIterator | |||
| redundancy cdssdk.RepRedundancyInfo | |||
| nodeAffinity *int64 | |||
| nodeAffinity *cdssdk.NodeID | |||
| } | |||
| type UpdatePackageContext struct { | |||
| Distlock *distlock.Service | |||
| type CreatePackageResult struct { | |||
| PackageID cdssdk.PackageID | |||
| ObjectResults []ObjectUploadResult | |||
| } | |||
| type ObjectUploadResult struct { | |||
| Info *iterator.IterUploadingObject | |||
| Error error | |||
| // TODO 这个字段没有被赋值 | |||
| ObjectID cdssdk.ObjectID | |||
| } | |||
| type CreateRepPackageResult struct { | |||
| PackageID int64 | |||
| ObjectResults []RepObjectUploadResult | |||
| type UploadNodeInfo struct { | |||
| Node model.Node | |||
| IsSameLocation bool | |||
| } | |||
| type RepObjectUploadResult struct { | |||
| Info *iterator.IterUploadingObject | |||
| Error error | |||
| FileHash string | |||
| ObjectID int64 | |||
| type UpdatePackageContext struct { | |||
| Distlock *distlock.Service | |||
| } | |||
| func NewCreateRepPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy cdssdk.RepRedundancyInfo, nodeAffinity *int64) *CreateRepPackage { | |||
| return &CreateRepPackage{ | |||
| func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage { | |||
| return &CreatePackage{ | |||
| userID: userID, | |||
| bucketID: bucketID, | |||
| name: name, | |||
| objectIter: objIter, | |||
| redundancy: redundancy, | |||
| nodeAffinity: nodeAffinity, | |||
| } | |||
| } | |||
| func (t *CreateRepPackage) Execute(ctx *UpdatePackageContext) (*CreateRepPackageResult, error) { | |||
| func (t *CreatePackage) Execute(ctx *UpdatePackageContext) (*CreatePackageResult, error) { | |||
| defer t.objectIter.Close() | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| @@ -68,12 +67,7 @@ func (t *CreateRepPackage) Execute(ctx *UpdatePackageContext) (*CreateRepPackage | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| reqBlder := reqbuilder.NewBuilder() | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if stgglb.Local.NodeID != nil { | |||
| reqBlder.IPFS().CreateAnyRep(*stgglb.Local.NodeID) | |||
| } | |||
| mutex, err := reqBlder. | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| Metadata(). | |||
| // 用于判断用户是否有桶的权限 | |||
| UserBucket().ReadOne(t.userID, t.bucketID). | |||
| @@ -93,8 +87,7 @@ func (t *CreateRepPackage) Execute(ctx *UpdatePackageContext) (*CreateRepPackage | |||
| } | |||
| defer mutex.Unlock() | |||
| createPkgResp, err := coorCli.CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name, | |||
| cdssdk.NewTypedRedundancyInfo(t.redundancy))) | |||
| createPkgResp, err := coorCli.CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating package: %w", err) | |||
| } | |||
| @@ -104,47 +97,73 @@ func (t *CreateRepPackage) Execute(ctx *UpdatePackageContext) (*CreateRepPackage | |||
| return nil, fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(stgglb.Local.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| userNodes := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| return UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| IsSameLocation: node.LocationID == stgglb.Local.LocationID, | |||
| } | |||
| }) | |||
| uploadNode := t.chooseUploadNode(nodeInfos, t.nodeAffinity) | |||
| // 给上传节点的IPFS加锁 | |||
| ipfsReqBlder := reqbuilder.NewBuilder() | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if stgglb.Local.NodeID != nil { | |||
| ipfsReqBlder.IPFS().CreateAnyRep(*stgglb.Local.NodeID) | |||
| } | |||
| for _, node := range userNodes { | |||
| if stgglb.Local.NodeID != nil && node.Node.NodeID == *stgglb.Local.NodeID { | |||
| continue | |||
| } | |||
| ipfsReqBlder.IPFS().CreateAnyRep(node.Node.NodeID) | |||
| } | |||
| // 防止上传的副本被清除 | |||
| ipfsMutex, err := reqbuilder.NewBuilder(). | |||
| IPFS().CreateAnyRep(uploadNode.Node.NodeID). | |||
| MutexLock(ctx.Distlock) | |||
| ipfsMutex, err := ipfsReqBlder.MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer ipfsMutex.Unlock() | |||
| rets, err := uploadAndUpdateRepPackage(createPkgResp.PackageID, t.objectIter, uploadNode) | |||
| rets, err := uploadAndUpdatePackage(createPkgResp.PackageID, t.objectIter, userNodes, t.nodeAffinity) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &CreateRepPackageResult{ | |||
| return &CreatePackageResult{ | |||
| PackageID: createPkgResp.PackageID, | |||
| ObjectResults: rets, | |||
| }, nil | |||
| } | |||
| func uploadAndUpdateRepPackage(packageID int64, objectIter iterator.UploadingObjectIterator, uploadNode UploadNodeInfo) ([]RepObjectUploadResult, error) { | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // 1. 选择设置了亲和性的节点 | |||
| // 2. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 3. 没有用的话从所有节点中随机选一个 | |||
| func chooseUploadNode(nodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) UploadNodeInfo { | |||
| if nodeAffinity != nil { | |||
| aff, ok := lo.Find(nodes, func(node UploadNodeInfo) bool { return node.Node.NodeID == *nodeAffinity }) | |||
| if ok { | |||
| return aff | |||
| } | |||
| } | |||
| sameLocationNodes := lo.Filter(nodes, func(e UploadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationNodes) > 0 { | |||
| return sameLocationNodes[rand.Intn(len(sameLocationNodes))] | |||
| } | |||
| return nodes[rand.Intn(len(nodes))] | |||
| } | |||
| func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, userNodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) ([]ObjectUploadResult, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| var uploadRets []RepObjectUploadResult | |||
| var adds []coormq.AddRepObjectInfo | |||
| var uploadRets []ObjectUploadResult | |||
| //上传文件夹 | |||
| var adds []coormq.AddObjectInfo | |||
| for { | |||
| objInfo, err := objectIter.MoveNext() | |||
| if err == iterator.ErrNoMoreItem { | |||
| @@ -153,20 +172,25 @@ func uploadAndUpdateRepPackage(packageID int64, objectIter iterator.UploadingObj | |||
| if err != nil { | |||
| return nil, fmt.Errorf("reading object: %w", err) | |||
| } | |||
| err = func() error { | |||
| defer objInfo.File.Close() | |||
| uploadNode := chooseUploadNode(userNodes, nodeAffinity) | |||
| fileHash, err := uploadFile(objInfo.File, uploadNode) | |||
| uploadRets = append(uploadRets, RepObjectUploadResult{ | |||
| Info: objInfo, | |||
| Error: err, | |||
| FileHash: fileHash, | |||
| if err != nil { | |||
| return fmt.Errorf("uploading file: %w", err) | |||
| } | |||
| uploadRets = append(uploadRets, ObjectUploadResult{ | |||
| Info: objInfo, | |||
| Error: err, | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("uploading object: %w", err) | |||
| } | |||
| adds = append(adds, coormq.NewAddRepObjectInfo(objInfo.Path, objInfo.Size, fileHash, []int64{uploadNode.Node.NodeID})) | |||
| adds = append(adds, coormq.NewAddObjectInfo(objInfo.Path, objInfo.Size, fileHash, uploadNode.Node.NodeID)) | |||
| return nil | |||
| }() | |||
| if err != nil { | |||
| @@ -174,7 +198,7 @@ func uploadAndUpdateRepPackage(packageID int64, objectIter iterator.UploadingObj | |||
| } | |||
| } | |||
| _, err = coorCli.UpdateRepPackage(coormq.NewUpdateRepPackage(packageID, adds, nil)) | |||
| _, err = coorCli.UpdateECPackage(coormq.NewUpdatePackage(packageID, adds, nil)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("updating package: %w", err) | |||
| } | |||
| @@ -182,7 +206,6 @@ func uploadAndUpdateRepPackage(packageID int64, objectIter iterator.UploadingObj | |||
| return uploadRets, nil | |||
| } | |||
| // 上传文件 | |||
| func uploadFile(file io.Reader, uploadNode UploadNodeInfo) (string, error) { | |||
| // 本地有IPFS,则直接从本地IPFS上传 | |||
| if stgglb.IPFSPool != nil { | |||
| @@ -217,26 +240,6 @@ func uploadFile(file io.Reader, uploadNode UploadNodeInfo) (string, error) { | |||
| return fileHash, nil | |||
| } | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // 1. 选择设置了亲和性的节点 | |||
| // 2. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 3. 没有用的话从所有节点中随机选一个 | |||
| func (t *CreateRepPackage) chooseUploadNode(nodes []UploadNodeInfo, nodeAffinity *int64) UploadNodeInfo { | |||
| if nodeAffinity != nil { | |||
| aff, ok := lo.Find(nodes, func(node UploadNodeInfo) bool { return node.Node.NodeID == *nodeAffinity }) | |||
| if ok { | |||
| return aff | |||
| } | |||
| } | |||
| sameLocationNodes := lo.Filter(nodes, func(e UploadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationNodes) > 0 { | |||
| return sameLocationNodes[rand.Intn(len(sameLocationNodes))] | |||
| } | |||
| return nodes[rand.Intn(len(nodes))] | |||
| } | |||
| func uploadToNode(file io.Reader, nodeIP string, grpcPort int) (string, error) { | |||
| rpcCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort) | |||
| if err != nil { | |||
| @@ -247,7 +250,7 @@ func uploadToNode(file io.Reader, nodeIP string, grpcPort int) (string, error) { | |||
| return rpcCli.SendIPFSFile(file) | |||
| } | |||
| func uploadToLocalIPFS(file io.Reader, nodeID int64, shouldPin bool) (string, error) { | |||
| func uploadToLocalIPFS(file io.Reader, nodeID cdssdk.NodeID, shouldPin bool) (string, error) { | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| return "", fmt.Errorf("new ipfs client: %w", err) | |||
| @@ -272,7 +275,7 @@ func uploadToLocalIPFS(file io.Reader, nodeID int64, shouldPin bool) (string, er | |||
| return fileHash, nil | |||
| } | |||
| func pinIPFSFile(nodeID int64, fileHash string) error { | |||
| func pinIPFSFile(nodeID cdssdk.NodeID, fileHash string) error { | |||
| agtCli, err := stgglb.AgentMQPool.Acquire(nodeID) | |||
| if err != nil { | |||
| return fmt.Errorf("new agent client: %w", err) | |||
| @@ -10,14 +10,13 @@ import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type DownloadPackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| userID cdssdk.UserID | |||
| packageID cdssdk.PackageID | |||
| outputPath string | |||
| } | |||
| @@ -25,7 +24,7 @@ type DownloadPackageContext struct { | |||
| Distlock *distlock.Service | |||
| } | |||
| func NewDownloadPackage(userID int64, packageID int64, outputPath string) *DownloadPackage { | |||
| func NewDownloadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, outputPath string) *DownloadPackage { | |||
| return &DownloadPackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| @@ -40,85 +39,20 @@ func (t *DownloadPackage) Execute(ctx *DownloadPackageContext) error { | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| getPkgResp, err := coorCli.GetPackage(coormq.NewGetPackage(t.userID, t.packageID)) | |||
| getObjectDetails, err := coorCli.GetPackageObjectDetails(coormq.NewGetPackageObjectDetails(t.packageID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting package: %w", err) | |||
| } | |||
| var objIter iterator.DownloadingObjectIterator | |||
| if getPkgResp.Redundancy.IsRepInfo() { | |||
| objIter, err = t.downloadRep(ctx) | |||
| } else { | |||
| objIter, err = t.downloadEC(ctx, getPkgResp.Package) | |||
| return fmt.Errorf("getting package object details: %w", err) | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer objIter.Close() | |||
| return t.writeObject(objIter) | |||
| } | |||
| func (t *DownloadPackage) downloadRep(ctx *DownloadPackageContext) (iterator.DownloadingObjectIterator, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| getObjsResp, err := coorCli.GetPackageObjects(coormq.NewGetPackageObjects(t.userID, t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package objects: %w", err) | |||
| } | |||
| getObjRepDataResp, err := coorCli.GetPackageObjectRepData(coormq.NewGetPackageObjectRepData(t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package object rep data: %w", err) | |||
| } | |||
| iter := iterator.NewRepObjectIterator(getObjsResp.Objects, getObjRepDataResp.Data, &iterator.DownloadContext{ | |||
| Distlock: ctx.Distlock, | |||
| }) | |||
| return iter, nil | |||
| } | |||
| func (t *DownloadPackage) downloadEC(ctx *DownloadPackageContext, pkg model.Package) (iterator.DownloadingObjectIterator, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| getObjsResp, err := coorCli.GetPackageObjects(coormq.NewGetPackageObjects(t.userID, t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package objects: %w", err) | |||
| } | |||
| getObjECDataResp, err := coorCli.GetPackageObjectECData(coormq.NewGetPackageObjectECData(t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package object ec data: %w", err) | |||
| } | |||
| var ecInfo cdssdk.ECRedundancyInfo | |||
| if ecInfo, err = pkg.Redundancy.ToECInfo(); err != nil { | |||
| return nil, fmt.Errorf("get ec redundancy info: %w", err) | |||
| } | |||
| getECResp, err := coorCli.GetECConfig(coormq.NewGetECConfig(ecInfo.ECName)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting ec: %w", err) | |||
| } | |||
| iter := iterator.NewECObjectIterator(getObjsResp.Objects, getObjECDataResp.Data, ecInfo, getECResp.Config, &iterator.DownloadContext{ | |||
| objIter := iterator.NewDownloadObjectIterator(getObjectDetails.Objects, &iterator.DownloadContext{ | |||
| Distlock: ctx.Distlock, | |||
| }) | |||
| defer objIter.Close() | |||
| return iter, nil | |||
| return t.writeObjects(objIter) | |||
| } | |||
| func (t *DownloadPackage) writeObject(objIter iterator.DownloadingObjectIterator) error { | |||
| func (t *DownloadPackage) writeObjects(objIter iterator.DownloadingObjectIterator) error { | |||
| for { | |||
| objInfo, err := objIter.MoveNext() | |||
| if err == iterator.ErrNoMoreItem { | |||
| @@ -14,25 +14,30 @@ import ( | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type UpdateECPackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| type UpdatePackage struct { | |||
| userID cdssdk.UserID | |||
| packageID cdssdk.PackageID | |||
| objectIter iterator.UploadingObjectIterator | |||
| } | |||
| type UpdateECPackageResult struct { | |||
| ObjectResults []ECObjectUploadResult | |||
| type UpdatePackageResult struct { | |||
| ObjectResults []ObjectUploadResult | |||
| } | |||
| func NewUpdateECPackage(userID int64, packageID int64, objIter iterator.UploadingObjectIterator) *UpdateECPackage { | |||
| return &UpdateECPackage{ | |||
| type UpdateNodeInfo struct { | |||
| UploadNodeInfo | |||
| HasOldObject bool | |||
| } | |||
| func NewUpdatePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator) *UpdatePackage { | |||
| return &UpdatePackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| objectIter: objIter, | |||
| } | |||
| } | |||
| func (t *UpdateECPackage) Execute(ctx *UpdatePackageContext) (*UpdateECPackageResult, error) { | |||
| func (t *UpdatePackage) Execute(ctx *UpdatePackageContext) (*UpdatePackageResult, error) { | |||
| defer t.objectIter.Close() | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| @@ -44,7 +49,7 @@ func (t *UpdateECPackage) Execute(ctx *UpdatePackageContext) (*UpdateECPackageRe | |||
| Metadata(). | |||
| // 用于查询可用的上传节点 | |||
| Node().ReadAny(). | |||
| // 用于创建包信息 | |||
| // 用于修改包信息 | |||
| Package().WriteOne(t.packageID). | |||
| // 用于创建包中的文件的信息 | |||
| Object().CreateAny(). | |||
| @@ -58,45 +63,25 @@ func (t *UpdateECPackage) Execute(ctx *UpdatePackageContext) (*UpdateECPackageRe | |||
| } | |||
| defer mutex.Unlock() | |||
| getPkgResp, err := coorCli.GetPackage(coormq.NewGetPackage(t.userID, t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package: %w", err) | |||
| } | |||
| getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(stgglb.Local.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| userNodes := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| return UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| IsSameLocation: node.LocationID == stgglb.Local.LocationID, | |||
| } | |||
| }) | |||
| var ecInfo cdssdk.ECRedundancyInfo | |||
| if ecInfo, err = getPkgResp.Package.Redundancy.ToECInfo(); err != nil { | |||
| return nil, fmt.Errorf("get ec redundancy info: %w", err) | |||
| } | |||
| getECResp, err := coorCli.GetECConfig(coormq.NewGetECConfig(ecInfo.ECName)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting ec: %w", err) | |||
| } | |||
| // 给上传节点的IPFS加锁 | |||
| ipfsReqBlder := reqbuilder.NewBuilder() | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if stgglb.Local.NodeID != nil { | |||
| ipfsReqBlder.IPFS().CreateAnyRep(*stgglb.Local.NodeID) | |||
| } | |||
| for _, node := range nodeInfos { | |||
| for _, node := range userNodes { | |||
| if stgglb.Local.NodeID != nil && node.Node.NodeID == *stgglb.Local.NodeID { | |||
| continue | |||
| } | |||
| @@ -110,12 +95,12 @@ func (t *UpdateECPackage) Execute(ctx *UpdatePackageContext) (*UpdateECPackageRe | |||
| } | |||
| defer ipfsMutex.Unlock() | |||
| rets, err := uploadAndUpdateECPackage(t.packageID, t.objectIter, nodeInfos, ecInfo, getECResp.Config) | |||
| rets, err := uploadAndUpdatePackage(t.packageID, t.objectIter, userNodes, nil) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &UpdateECPackageResult{ | |||
| return &UpdatePackageResult{ | |||
| ObjectResults: rets, | |||
| }, nil | |||
| } | |||
| @@ -1,128 +0,0 @@ | |||
| package cmd | |||
| import ( | |||
| "fmt" | |||
| "github.com/samber/lo" | |||
| mysort "gitlink.org.cn/cloudream/common/utils/sort" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type UpdateRepPackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| objectIter iterator.UploadingObjectIterator | |||
| } | |||
| type UpdateNodeInfo struct { | |||
| UploadNodeInfo | |||
| HasOldObject bool | |||
| } | |||
| type UpdateRepPackageResult struct { | |||
| ObjectResults []RepObjectUploadResult | |||
| } | |||
| func NewUpdateRepPackage(userID int64, packageID int64, objectIter iterator.UploadingObjectIterator) *UpdateRepPackage { | |||
| return &UpdateRepPackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| objectIter: objectIter, | |||
| } | |||
| } | |||
| func (t *UpdateRepPackage) Execute(ctx *UpdatePackageContext) (*UpdateRepPackageResult, error) { | |||
| defer t.objectIter.Close() | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| reqBlder := reqbuilder.NewBuilder() | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if stgglb.Local.NodeID != nil { | |||
| reqBlder.IPFS().CreateAnyRep(*stgglb.Local.NodeID) | |||
| } | |||
| mutex, err := reqBlder. | |||
| Metadata(). | |||
| // 用于查询可用的上传节点 | |||
| Node().ReadAny(). | |||
| // 用于创建包信息 | |||
| Package().WriteOne(t.packageID). | |||
| // 用于创建包中的文件的信息 | |||
| Object().CreateAny(). | |||
| // 用于设置EC配置 | |||
| ObjectBlock().CreateAny(). | |||
| // 用于创建Cache记录 | |||
| Cache().CreateAny(). | |||
| MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(stgglb.Local.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UpdateNodeInfo { | |||
| return UpdateNodeInfo{ | |||
| UploadNodeInfo: UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| }, | |||
| } | |||
| }) | |||
| // 上传文件的方式优先级: | |||
| // 1. 本地IPFS | |||
| // 2. 包含了旧文件,且与客户端在同地域的节点 | |||
| // 3. 不在同地域,但包含了旧文件的节点 | |||
| // 4. 同地域节点 | |||
| // TODO 需要考虑在多文件的情况下的规则 | |||
| uploadNode := t.chooseUploadNode(nodeInfos) | |||
| // 防止上传的副本被清除 | |||
| ipfsMutex, err := reqbuilder.NewBuilder(). | |||
| IPFS().CreateAnyRep(uploadNode.Node.NodeID). | |||
| MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer ipfsMutex.Unlock() | |||
| rets, err := uploadAndUpdateRepPackage(t.packageID, t.objectIter, uploadNode.UploadNodeInfo) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &UpdateRepPackageResult{ | |||
| ObjectResults: rets, | |||
| }, nil | |||
| } | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (t *UpdateRepPackage) chooseUploadNode(nodes []UpdateNodeInfo) UpdateNodeInfo { | |||
| mysort.Sort(nodes, func(left, right UpdateNodeInfo) int { | |||
| v := -mysort.CmpBool(left.HasOldObject, right.HasOldObject) | |||
| if v != 0 { | |||
| return v | |||
| } | |||
| return -mysort.CmpBool(left.IsSameLocation, right.IsSameLocation) | |||
| }) | |||
| return nodes[0] | |||
| } | |||
| @@ -6,6 +6,7 @@ import ( | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -34,7 +35,7 @@ func (db *BucketDB) GetIDByName(bucketName string) (int64, error) { | |||
| } | |||
| // IsAvailable 判断用户是否有指定Bucekt的权限 | |||
| func (db *BucketDB) IsAvailable(ctx SQLContext, bucketID int64, userID int64) (bool, error) { | |||
| func (db *BucketDB) IsAvailable(ctx SQLContext, bucketID cdssdk.BucketID, userID cdssdk.UserID) (bool, error) { | |||
| _, err := db.GetUserBucket(ctx, userID, bucketID) | |||
| if errors.Is(err, sql.ErrNoRows) { | |||
| return false, nil | |||
| @@ -47,7 +48,7 @@ func (db *BucketDB) IsAvailable(ctx SQLContext, bucketID int64, userID int64) (b | |||
| return true, nil | |||
| } | |||
| func (*BucketDB) GetUserBucket(ctx SQLContext, userID int64, bucketID int64) (model.Bucket, error) { | |||
| func (*BucketDB) GetUserBucket(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) (model.Bucket, error) { | |||
| var ret model.Bucket | |||
| err := sqlx.Get(ctx, &ret, | |||
| "select Bucket.* from UserBucket, Bucket where UserID = ? and"+ | |||
| @@ -56,13 +57,13 @@ func (*BucketDB) GetUserBucket(ctx SQLContext, userID int64, bucketID int64) (mo | |||
| return ret, err | |||
| } | |||
| func (*BucketDB) GetUserBuckets(ctx SQLContext, userID int64) ([]model.Bucket, error) { | |||
| func (*BucketDB) GetUserBuckets(ctx SQLContext, userID cdssdk.UserID) ([]model.Bucket, error) { | |||
| var ret []model.Bucket | |||
| err := sqlx.Select(ctx, &ret, "select Bucket.* from UserBucket, Bucket where UserID = ? and UserBucket.BucketID = Bucket.BucketID", userID) | |||
| return ret, err | |||
| } | |||
| func (db *BucketDB) Create(ctx SQLContext, userID int64, bucketName string) (int64, error) { | |||
| func (db *BucketDB) Create(ctx SQLContext, userID cdssdk.UserID, bucketName string) (cdssdk.BucketID, error) { | |||
| var bucketID int64 | |||
| err := sqlx.Get(ctx, &bucketID, "select Bucket.BucketID from UserBucket, Bucket where UserBucket.UserID = ? and UserBucket.BucketID = Bucket.BucketID and Bucket.Name = ?", userID, bucketName) | |||
| if err == nil { | |||
| @@ -88,10 +89,10 @@ func (db *BucketDB) Create(ctx SQLContext, userID int64, bucketName string) (int | |||
| return 0, fmt.Errorf("insert into user bucket failed, err: %w", err) | |||
| } | |||
| return bucketID, err | |||
| return cdssdk.BucketID(bucketID), err | |||
| } | |||
| func (db *BucketDB) Delete(ctx SQLContext, bucketID int64) error { | |||
| func (db *BucketDB) Delete(ctx SQLContext, bucketID cdssdk.BucketID) error { | |||
| _, err := ctx.Exec("delete from UserBucket where BucketID = ?", bucketID) | |||
| if err != nil { | |||
| return fmt.Errorf("delete user bucket failed, err: %w", err) | |||
| @@ -103,15 +104,15 @@ func (db *BucketDB) Delete(ctx SQLContext, bucketID int64) error { | |||
| } | |||
| // 删除Bucket内的Package | |||
| var objIDs []int64 | |||
| err = sqlx.Select(ctx, &objIDs, "select PackageID from Package where BucketID = ?", bucketID) | |||
| var pkgIDs []cdssdk.PackageID | |||
| err = sqlx.Select(ctx, &pkgIDs, "select PackageID from Package where BucketID = ?", bucketID) | |||
| if err != nil { | |||
| return fmt.Errorf("query package failed, err: %w", err) | |||
| } | |||
| for _, objID := range objIDs { | |||
| for _, pkgID := range pkgIDs { | |||
| // TODO 不一定所有的错误都要中断后续过程 | |||
| err = db.Package().SoftDelete(ctx, objID) | |||
| err = db.Package().SoftDelete(ctx, pkgID) | |||
| if err != nil { | |||
| return fmt.Errorf("set package seleted failed, err: %w", err) | |||
| } | |||
| @@ -4,6 +4,7 @@ import ( | |||
| "time" | |||
| "github.com/jmoiron/sqlx" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -16,7 +17,7 @@ func (db *DB) Cache() *CacheDB { | |||
| return &CacheDB{DB: db} | |||
| } | |||
| func (*CacheDB) Get(ctx SQLContext, fileHash string, nodeID int64) (model.Cache, error) { | |||
| func (*CacheDB) Get(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) (model.Cache, error) { | |||
| var ret model.Cache | |||
| err := sqlx.Get(ctx, &ret, "select * from Cache where FileHash = ? and NodeID = ?", fileHash, nodeID) | |||
| return ret, err | |||
| @@ -28,15 +29,15 @@ func (*CacheDB) BatchGetAllFileHashes(ctx SQLContext, start int, count int) ([]s | |||
| return ret, err | |||
| } | |||
| func (*CacheDB) GetNodeCaches(ctx SQLContext, nodeID int64) ([]model.Cache, error) { | |||
| func (*CacheDB) GetNodeCaches(ctx SQLContext, nodeID cdssdk.NodeID) ([]model.Cache, error) { | |||
| var ret []model.Cache | |||
| err := sqlx.Select(ctx, &ret, "select * from Cache where NodeID = ?", nodeID) | |||
| return ret, err | |||
| } | |||
| // CreateNew 创建一条新的缓存记录 | |||
| func (*CacheDB) CreateNew(ctx SQLContext, fileHash string, nodeID int64) error { | |||
| _, err := ctx.Exec("insert into Cache values(?,?,?,?)", fileHash, nodeID, consts.CacheStatePinned, time.Now()) | |||
| func (*CacheDB) CreateNew(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error { | |||
| _, err := ctx.Exec("insert into Cache values(?,?,?,?,?,?)", fileHash, nodeID, consts.CacheStatePinned, nil, time.Now(), 0) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| @@ -44,35 +45,53 @@ func (*CacheDB) CreateNew(ctx SQLContext, fileHash string, nodeID int64) error { | |||
| return nil | |||
| } | |||
| func (*CacheDB) SetPackageObjectFrozen(ctx SQLContext, pkgID cdssdk.PackageID, nodeID cdssdk.NodeID) error { | |||
| var nowTime = time.Now() | |||
| _, err := ctx.Exec( | |||
| "insert into Cache(FileHash,NodeID,State,FrozenTime,CreateTime,Priority)"+ | |||
| " select FileHash, ?, ?, ?, ?, ? from Object where PackageID = ?"+ | |||
| " on duplicate key update State = ?, FrozenTime = ?", | |||
| nodeID, consts.CacheStatePinned, &nowTime, &nowTime, 0, | |||
| pkgID, | |||
| consts.CacheStatePinned, &nowTime, | |||
| ) | |||
| return err | |||
| } | |||
| // CreatePinned 创建一条缓存记录,如果已存在,但不是pinned状态,则将其设置为pin状态 | |||
| func (*CacheDB) CreatePinned(ctx SQLContext, fileHash string, nodeID int64, priority int) error { | |||
| _, err := ctx.Exec("replace into Cache values(?,?,?,?,?)", fileHash, nodeID, consts.CacheStatePinned, time.Now(), priority) | |||
| func (*CacheDB) CreatePinned(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID, priority int) error { | |||
| _, err := ctx.Exec("insert into Cache values(?,?,?,?,?,?) on duplicate key update State = ?, CreateTime = ?, Priority = ?", | |||
| fileHash, nodeID, consts.CacheStatePinned, nil, time.Now(), priority, | |||
| consts.CacheStatePinned, time.Now(), priority, | |||
| ) | |||
| return err | |||
| } | |||
| func (*CacheDB) BatchCreatePinned(ctx SQLContext, fileHashes []string, nodeID int64, priority int) error { | |||
| func (*CacheDB) BatchCreatePinned(ctx SQLContext, fileHashes []string, nodeID cdssdk.NodeID, priority int) error { | |||
| var caches []model.Cache | |||
| var nowTime = time.Now() | |||
| for _, hash := range fileHashes { | |||
| caches = append(caches, model.Cache{ | |||
| FileHash: hash, | |||
| NodeID: nodeID, | |||
| State: consts.CacheStatePinned, | |||
| CacheTime: nowTime, | |||
| Priority: priority, | |||
| FileHash: hash, | |||
| NodeID: nodeID, | |||
| State: consts.CacheStatePinned, | |||
| FrozenTime: nil, | |||
| CreateTime: nowTime, | |||
| Priority: priority, | |||
| }) | |||
| } | |||
| _, err := sqlx.NamedExec(ctx, "insert into Cache(FileHash,NodeID,State,CacheTime,Priority) values(:FileHash,:NodeID,:State,:CacheTime,:Priority)"+ | |||
| " on duplicate key update State=values(State), CacheTime=values(CacheTime), Priority=values(Priority)", | |||
| _, err := sqlx.NamedExec(ctx, "insert into Cache(FileHash,NodeID,State,FrozenTime,CreateTime,Priority) values(:FileHash,:NodeID,:State,:FrozenTime,:CreateTime,:Priority)"+ | |||
| " on duplicate key update State=values(State), CreateTime=values(CreateTime), Priority=values(Priority)", | |||
| caches, | |||
| ) | |||
| return err | |||
| } | |||
| // Create 创建一条Temp状态的缓存记录,如果已存在则不产生效果 | |||
| func (*CacheDB) CreateTemp(ctx SQLContext, fileHash string, nodeID int64) error { | |||
| _, err := ctx.Exec("insert ignore into Cache values(?,?,?,?)", fileHash, nodeID, consts.CacheStateTemp, time.Now()) | |||
| func (*CacheDB) CreateTemp(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error { | |||
| _, err := ctx.Exec("insert ignore into Cache values(?,?,?,?,?,?)", fileHash, nodeID, consts.CacheStateTemp, nil, time.Now(), 0) | |||
| return err | |||
| } | |||
| @@ -85,19 +104,19 @@ func (*CacheDB) GetCachingFileNodes(ctx SQLContext, fileHash string) ([]model.No | |||
| } | |||
| // DeleteTemp 删除一条Temp状态的记录 | |||
| func (*CacheDB) DeleteTemp(ctx SQLContext, fileHash string, nodeID int64) error { | |||
| func (*CacheDB) DeleteTemp(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error { | |||
| _, err := ctx.Exec("delete from Cache where FileHash = ? and NodeID = ? and State = ?", fileHash, nodeID, consts.CacheStateTemp) | |||
| return err | |||
| } | |||
| // DeleteNodeAll 删除一个节点所有的记录 | |||
| func (*CacheDB) DeleteNodeAll(ctx SQLContext, nodeID int64) error { | |||
| func (*CacheDB) DeleteNodeAll(ctx SQLContext, nodeID cdssdk.NodeID) error { | |||
| _, err := ctx.Exec("delete from Cache where NodeID = ?", nodeID) | |||
| return err | |||
| } | |||
| // FindCachingFileUserNodes 在缓存表中查询指定数据所在的节点 | |||
| func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID int64, fileHash string) ([]model.Node, error) { | |||
| func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID cdssdk.NodeID, fileHash string) ([]model.Node, error) { | |||
| var x []model.Node | |||
| err := sqlx.Select(ctx, &x, | |||
| "select Node.* from Cache, UserNode, Node where"+ | |||
| @@ -106,8 +125,9 @@ func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID int64, fileHash | |||
| return x, err | |||
| } | |||
| func (*CacheDB) SetTemp(ctx SQLContext, fileHash string, nodeID int64) error { | |||
| _, err := ctx.Exec("update Cache set State = ?, CacheTime = ? where FileHash = ? and NodeID = ?", | |||
| // 设置一条记录为Temp,对Frozen的记录无效 | |||
| func (*CacheDB) SetTemp(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) error { | |||
| _, err := ctx.Exec("update Cache set State = ?, CreateTime = ? where FileHash = ? and NodeID = ? and FrozenTime = null", | |||
| consts.CacheStateTemp, | |||
| time.Now(), | |||
| fileHash, | |||
| @@ -1,30 +0,0 @@ | |||
| package db | |||
| import ( | |||
| //"database/sql" | |||
| "github.com/jmoiron/sqlx" | |||
| //"gitlink.org.cn/cloudream/common/consts" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| type EcDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) Ec() *EcDB { | |||
| return &EcDB{DB: db} | |||
| } | |||
| // GetEc 查询纠删码参数 | |||
| func (db *EcDB) GetEc(ctx SQLContext, ecName string) (model.Ec, error) { | |||
| var ret model.Ec | |||
| err := sqlx.Get(ctx, &ret, "select * from Ec where Name = ?", ecName) | |||
| return ret, err | |||
| } | |||
| func (db *EcDB) GetEcName(ctx SQLContext, objectID int) (string, error) { | |||
| var ret string | |||
| err := sqlx.Get(ctx, &ret, "select Redundancy from Object where ObjectID = ?") | |||
| return ret, err | |||
| } | |||
| @@ -1,31 +1,35 @@ | |||
| package model | |||
| import ( | |||
| "fmt" | |||
| "reflect" | |||
| "time" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| ) | |||
| // TODO 可以考虑逐步迁移到cdssdk中。迁移思路:数据对象应该包含的字段都迁移到cdssdk中,内部使用的一些特殊字段则留在这里 | |||
| type Node struct { | |||
| NodeID int64 `db:"NodeID" json:"nodeID"` | |||
| Name string `db:"Name" json:"name"` | |||
| LocalIP string `db:"LocalIP" json:"localIP"` | |||
| ExternalIP string `db:"ExternalIP" json:"externalIP"` | |||
| LocalGRPCPort int `db:"LocalGRPCPort" json:"localGRPCPort"` | |||
| ExternalGRPCPort int `db:"ExternalGRPCPort" json:"externalGRPCPort"` | |||
| LocationID int64 `db:"LocationID" json:"locationID"` | |||
| State string `db:"State" json:"state"` | |||
| LastReportTime *time.Time `db:"LastReportTime" json:"lastReportTime"` | |||
| NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"` | |||
| Name string `db:"Name" json:"name"` | |||
| LocalIP string `db:"LocalIP" json:"localIP"` | |||
| ExternalIP string `db:"ExternalIP" json:"externalIP"` | |||
| LocalGRPCPort int `db:"LocalGRPCPort" json:"localGRPCPort"` | |||
| ExternalGRPCPort int `db:"ExternalGRPCPort" json:"externalGRPCPort"` | |||
| LocationID cdssdk.LocationID `db:"LocationID" json:"locationID"` | |||
| State string `db:"State" json:"state"` | |||
| LastReportTime *time.Time `db:"LastReportTime" json:"lastReportTime"` | |||
| } | |||
| type Storage struct { | |||
| StorageID int64 `db:"StorageID" json:"storageID"` | |||
| Name string `db:"Name" json:"name"` | |||
| NodeID int64 `db:"NodeID" json:"nodeID"` | |||
| Directory string `db:"Directory" json:"directory"` | |||
| State string `db:"State" json:"state"` | |||
| StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"` | |||
| Name string `db:"Name" json:"name"` | |||
| NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"` | |||
| Directory string `db:"Directory" json:"directory"` | |||
| State string `db:"State" json:"state"` | |||
| } | |||
| type NodeDelay struct { | |||
| @@ -35,69 +39,100 @@ type NodeDelay struct { | |||
| } | |||
| type User struct { | |||
| UserID int64 `db:"UserID" json:"userID"` | |||
| Password string `db:"PassWord" json:"password"` | |||
| UserID cdssdk.UserID `db:"UserID" json:"userID"` | |||
| Password string `db:"PassWord" json:"password"` | |||
| } | |||
| type UserBucket struct { | |||
| UserID int64 `db:"UserID" json:"userID"` | |||
| BucketID int64 `db:"BucketID" json:"bucketID"` | |||
| UserID cdssdk.UserID `db:"UserID" json:"userID"` | |||
| BucketID cdssdk.BucketID `db:"BucketID" json:"bucketID"` | |||
| } | |||
| type UserNode struct { | |||
| UserID int64 `db:"UserID" json:"userID"` | |||
| NodeID int64 `db:"NodeID" json:"nodeID"` | |||
| UserID cdssdk.UserID `db:"UserID" json:"userID"` | |||
| NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"` | |||
| } | |||
| type UserStorage struct { | |||
| UserID int64 `db:"UserID" json:"userID"` | |||
| StorageID int64 `db:"StorageID" json:"storageID"` | |||
| UserID cdssdk.UserID `db:"UserID" json:"userID"` | |||
| StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"` | |||
| } | |||
| type Bucket struct { | |||
| BucketID int64 `db:"BucketID" json:"bucketID"` | |||
| Name string `db:"Name" json:"name"` | |||
| CreatorID int64 `db:"CreatorID" json:"creatorID"` | |||
| BucketID cdssdk.BucketID `db:"BucketID" json:"bucketID"` | |||
| Name string `db:"Name" json:"name"` | |||
| CreatorID cdssdk.UserID `db:"CreatorID" json:"creatorID"` | |||
| } | |||
| type Package = cdssdk.Package | |||
| type Object = cdssdk.Object | |||
| type ObjectRep struct { | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| FileHash string `db:"FileHash" json:"fileHash"` | |||
| // 由于Object的Redundancy字段是interface,所以不能直接将查询结果scan成Object,必须先scan成TempObject, | |||
| // 再.ToObject()转成Object | |||
| type TempObject struct { | |||
| cdssdk.Object | |||
| Redundancy RedundancyWarpper `db:"Redundancy"` | |||
| } | |||
| type ObjectBlock struct { | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| Index int `db:"Index" json:"index"` | |||
| FileHash string `db:"FileHash" json:"fileHash"` | |||
| func (o *TempObject) ToObject() cdssdk.Object { | |||
| obj := o.Object | |||
| obj.Redundancy = o.Redundancy.Value | |||
| return obj | |||
| } | |||
| type RedundancyWarpper struct { | |||
| Value cdssdk.Redundancy | |||
| } | |||
| func (o *RedundancyWarpper) Scan(src interface{}) error { | |||
| data, ok := src.([]uint8) | |||
| if !ok { | |||
| return fmt.Errorf("unknow src type: %v", reflect.TypeOf(data)) | |||
| } | |||
| red, err := serder.JSONToObjectEx[cdssdk.Redundancy](data) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| o.Value = red | |||
| return nil | |||
| } | |||
| type ObjectBlock = stgmod.ObjectBlock | |||
| type Cache struct { | |||
| FileHash string `db:"FileHash" json:"fileHash"` | |||
| NodeID int64 `db:"NodeID" json:"nodeID"` | |||
| State string `db:"State" json:"state"` | |||
| CacheTime time.Time `db:"CacheTime" json:"cacheTime"` | |||
| Priority int `db:"Priority" json:"priority"` | |||
| FileHash string `db:"FileHash" json:"fileHash"` | |||
| NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"` | |||
| State string `db:"State" json:"state"` | |||
| FrozenTime *time.Time `db:"FrozenTime" json:"frozenTime"` | |||
| CreateTime time.Time `db:"CreateTime" json:"createTime"` | |||
| Priority int `db:"Priority" json:"priority"` | |||
| } | |||
| const ( | |||
| StoragePackageStateNormal = "Normal" | |||
| StoragePackageStateDeleted = "Deleted" | |||
| StoragePackageStateOutdated = "Outdated" | |||
| ) | |||
| // Storage当前加载的Package | |||
| type StoragePackage struct { | |||
| PackageID int64 `db:"PackageID" json:"packageID"` | |||
| StorageID int64 `db:"StorageID" json:"storageID"` | |||
| UserID int64 `db:"UserID" json:"userID"` | |||
| State string `db:"State" json:"state"` | |||
| StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"` | |||
| PackageID cdssdk.PackageID `db:"PackageID" json:"packageID"` | |||
| UserID cdssdk.UserID `db:"UserID" json:"userID"` | |||
| State string `db:"State" json:"state"` | |||
| } | |||
| type Location struct { | |||
| LocationID int64 `db:"LocationID" json:"locationID"` | |||
| Name string `db:"Name" json:"name"` | |||
| type StoragePackageLog struct { | |||
| StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"` | |||
| PackageID cdssdk.PackageID `db:"PackageID" json:"packageID"` | |||
| UserID cdssdk.UserID `db:"UserID" json:"userID"` | |||
| CreateTime time.Time `db:"CreateTime" json:"createTime"` | |||
| } | |||
| type Ec struct { | |||
| EcID int `db:"EcID" json:"ecID"` | |||
| Name string `db:"Name" json:"name"` | |||
| EcK int `db:"EcK" json:"ecK"` | |||
| EcN int `db:"EcN" json:"ecN"` | |||
| type Location struct { | |||
| LocationID cdssdk.LocationID `db:"LocationID" json:"locationID"` | |||
| Name string `db:"Name" json:"name"` | |||
| } | |||
| @@ -4,6 +4,7 @@ import ( | |||
| "time" | |||
| "github.com/jmoiron/sqlx" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -15,7 +16,7 @@ func (db *DB) Node() *NodeDB { | |||
| return &NodeDB{DB: db} | |||
| } | |||
| func (db *NodeDB) GetByID(ctx SQLContext, nodeID int64) (model.Node, error) { | |||
| func (db *NodeDB) GetByID(ctx SQLContext, nodeID cdssdk.NodeID) (model.Node, error) { | |||
| var ret model.Node | |||
| err := sqlx.Get(ctx, &ret, "select * from Node where NodeID = ?", nodeID) | |||
| return ret, err | |||
| @@ -28,14 +29,14 @@ func (db *NodeDB) GetAllNodes(ctx SQLContext) ([]model.Node, error) { | |||
| } | |||
| // GetUserNodes 根据用户id查询可用node | |||
| func (db *NodeDB) GetUserNodes(ctx SQLContext, userID int64) ([]model.Node, error) { | |||
| func (db *NodeDB) GetUserNodes(ctx SQLContext, userID cdssdk.UserID) ([]model.Node, error) { | |||
| var nodes []model.Node | |||
| err := sqlx.Select(ctx, &nodes, "select Node.* from UserNode, Node where UserNode.NodeID = Node.NodeID and UserNode.UserID=?", userID) | |||
| return nodes, err | |||
| } | |||
| // UpdateState 更新状态,并且设置上次上报时间为现在 | |||
| func (db *NodeDB) UpdateState(ctx SQLContext, nodeID int64, state string) error { | |||
| func (db *NodeDB) UpdateState(ctx SQLContext, nodeID cdssdk.NodeID, state string) error { | |||
| _, err := ctx.Exec("update Node set State = ?, LastReportTime = ? where NodeID = ?", state, time.Now(), nodeID) | |||
| return err | |||
| } | |||
| @@ -5,6 +5,7 @@ import ( | |||
| "github.com/jmoiron/sqlx" | |||
| "github.com/samber/lo" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| @@ -17,15 +18,16 @@ func (db *DB) Object() *ObjectDB { | |||
| return &ObjectDB{DB: db} | |||
| } | |||
| func (db *ObjectDB) GetByID(ctx SQLContext, objectID int64) (model.Object, error) { | |||
| var ret model.Object | |||
| func (db *ObjectDB) GetByID(ctx SQLContext, objectID cdssdk.ObjectID) (model.Object, error) { | |||
| var ret model.TempObject | |||
| err := sqlx.Get(ctx, &ret, "select * from Object where ObjectID = ?", objectID) | |||
| return ret, err | |||
| return ret.ToObject(), err | |||
| } | |||
| func (db *ObjectDB) Create(ctx SQLContext, packageID int64, path string, size int64) (int64, error) { | |||
| sql := "insert into Object(PackageID, Path, Size) values(?,?,?)" | |||
| ret, err := ctx.Exec(sql, packageID, path, size) | |||
| func (db *ObjectDB) Create(ctx SQLContext, packageID cdssdk.PackageID, path string, size int64, fileHash string, redundancy cdssdk.Redundancy) (int64, error) { | |||
| sql := "insert into Object(PackageID, Path, Size, FileHash, Redundancy) values(?,?,?,?,?)" | |||
| ret, err := ctx.Exec(sql, packageID, path, size, redundancy) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("insert object failed, err: %w", err) | |||
| } | |||
| @@ -39,9 +41,13 @@ func (db *ObjectDB) Create(ctx SQLContext, packageID int64, path string, size in | |||
| } | |||
| // 创建或者更新记录,返回值true代表是创建,false代表是更新 | |||
| func (db *ObjectDB) CreateOrUpdate(ctx SQLContext, packageID int64, path string, size int64) (int64, bool, error) { | |||
| sql := "insert into Object(PackageID, Path, Size) values(?,?,?) on duplicate key update Size = ?" | |||
| ret, err := ctx.Exec(sql, packageID, path, size, size) | |||
| func (db *ObjectDB) CreateOrUpdate(ctx SQLContext, packageID cdssdk.PackageID, path string, size int64, fileHash string) (cdssdk.ObjectID, bool, error) { | |||
| // 首次上传Object时,默认使用Rep模式,即使是在更新一个已有的Object也是如此 | |||
| defRed := cdssdk.NewRepRedundancy() | |||
| sql := "insert into Object(PackageID, Path, Size, FileHash, Redundancy) values(?,?,?,?,?) on duplicate key update Size = ?, FileHash = ?, Redundancy = ?" | |||
| ret, err := ctx.Exec(sql, packageID, path, size, fileHash, defRed, size, fileHash, defRed) | |||
| if err != nil { | |||
| return 0, false, fmt.Errorf("insert object failed, err: %w", err) | |||
| } | |||
| @@ -57,10 +63,10 @@ func (db *ObjectDB) CreateOrUpdate(ctx SQLContext, packageID int64, path string, | |||
| if err != nil { | |||
| return 0, false, fmt.Errorf("get id of inserted object failed, err: %w", err) | |||
| } | |||
| return objectID, true, nil | |||
| return cdssdk.ObjectID(objectID), true, nil | |||
| } | |||
| var objID int64 | |||
| var objID cdssdk.ObjectID | |||
| if err = sqlx.Get(ctx, &objID, "select ObjectID from Object where PackageID = ? and Path = ?", packageID, path); err != nil { | |||
| return 0, false, fmt.Errorf("getting object id: %w", err) | |||
| } | |||
| @@ -68,65 +74,7 @@ func (db *ObjectDB) CreateOrUpdate(ctx SQLContext, packageID int64, path string, | |||
| return objID, false, nil | |||
| } | |||
| func (db *ObjectDB) UpdateRepObject(ctx SQLContext, objectID int64, fileSize int64, nodeIDs []int64, fileHash string) error { | |||
| _, err := db.UpdateFileInfo(ctx, objectID, fileSize) | |||
| if err != nil { | |||
| if err != nil { | |||
| return fmt.Errorf("update rep object failed, err: %w", err) | |||
| } | |||
| } | |||
| objRep, err := db.ObjectRep().GetByID(ctx, objectID) | |||
| if err != nil { | |||
| return fmt.Errorf("get object rep failed, err: %w", err) | |||
| } | |||
| // 如果新文件与旧文件的Hash不同,则需要更新关联的FileHash,重新插入Cache记录 | |||
| if objRep.FileHash != fileHash { | |||
| _, err := db.ObjectRep().Update(ctx, objectID, fileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("update rep object file hash failed, err: %w", err) | |||
| } | |||
| for _, nodeID := range nodeIDs { | |||
| err := db.Cache().CreatePinned(ctx, fileHash, nodeID, 0) //priority = 0 | |||
| if err != nil { | |||
| return fmt.Errorf("create cache failed, err: %w", err) | |||
| } | |||
| } | |||
| } else { | |||
| // 如果相同,则只增加Cache中不存在的记录 | |||
| cachedNodes, err := db.Cache().GetCachingFileNodes(ctx, fileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("find caching file nodes failed, err: %w", err) | |||
| } | |||
| // 筛选出不在cachedNodes中的id | |||
| newNodeIDs := lo.Filter(nodeIDs, func(id int64, index int) bool { | |||
| return lo.NoneBy(cachedNodes, func(node model.Node) bool { | |||
| return node.NodeID == id | |||
| }) | |||
| }) | |||
| for _, nodeID := range newNodeIDs { | |||
| err := db.Cache().CreatePinned(ctx, fileHash, nodeID, 0) //priority | |||
| if err != nil { | |||
| return fmt.Errorf("create cache failed, err: %w", err) | |||
| } | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (*ObjectDB) BatchGetAllEcObjectIDs(ctx SQLContext, start int, count int) ([]int64, error) { | |||
| var ret []int64 | |||
| rep := "rep" | |||
| err := sqlx.Select(ctx, &ret, "SELECT ObjectID FROM object where Redundancy != ? limit ?, ?", rep, start, count) | |||
| return ret, err | |||
| } | |||
| func (*ObjectDB) UpdateFileInfo(ctx SQLContext, objectID int64, fileSize int64) (bool, error) { | |||
| func (*ObjectDB) UpdateFileInfo(ctx SQLContext, objectID cdssdk.ObjectID, fileSize int64) (bool, error) { | |||
| ret, err := ctx.Exec("update Object set FileSize = ? where ObjectID = ?", fileSize, objectID) | |||
| if err != nil { | |||
| return false, err | |||
| @@ -140,100 +88,17 @@ func (*ObjectDB) UpdateFileInfo(ctx SQLContext, objectID int64, fileSize int64) | |||
| return cnt > 0, nil | |||
| } | |||
| func (*ObjectDB) GetPackageObjects(ctx SQLContext, packageID int64) ([]model.Object, error) { | |||
| var ret []model.Object | |||
| func (*ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Object, error) { | |||
| var ret []model.TempObject | |||
| err := sqlx.Select(ctx, &ret, "select * from Object where PackageID = ? order by ObjectID asc", packageID) | |||
| return ret, err | |||
| return lo.Map(ret, func(o model.TempObject, idx int) model.Object { return o.ToObject() }), err | |||
| } | |||
| func (db *ObjectDB) BatchAddRep(ctx SQLContext, packageID int64, objs []coormq.AddRepObjectInfo) ([]int64, error) { | |||
| var objIDs []int64 | |||
| func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, objs []coormq.AddObjectInfo) ([]cdssdk.ObjectID, error) { | |||
| objIDs := make([]cdssdk.ObjectID, 0, len(objs)) | |||
| for _, obj := range objs { | |||
| // 创建对象的记录 | |||
| objID, isCreate, err := db.CreateOrUpdate(ctx, packageID, obj.Path, obj.Size) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating object: %w", err) | |||
| } | |||
| objIDs = append(objIDs, objID) | |||
| if isCreate { | |||
| if err := db.createRep(ctx, objID, obj); err != nil { | |||
| return nil, err | |||
| } | |||
| } else { | |||
| if err := db.updateRep(ctx, objID, obj); err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| } | |||
| return objIDs, nil | |||
| } | |||
| func (db *ObjectDB) createRep(ctx SQLContext, objID int64, obj coormq.AddRepObjectInfo) error { | |||
| // 创建对象副本的记录 | |||
| if err := db.ObjectRep().Create(ctx, objID, obj.FileHash); err != nil { | |||
| return fmt.Errorf("creating object rep: %w", err) | |||
| } | |||
| // 创建缓存记录 | |||
| priority := 0 //优先级暂时设置为0 | |||
| for _, nodeID := range obj.NodeIDs { | |||
| if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, priority); err != nil { | |||
| return fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (db *ObjectDB) updateRep(ctx SQLContext, objID int64, obj coormq.AddRepObjectInfo) error { | |||
| objRep, err := db.ObjectRep().GetByID(ctx, objID) | |||
| if err != nil { | |||
| return fmt.Errorf("getting object rep: %w", err) | |||
| } | |||
| // 如果新文件与旧文件的Hash不同,则需要更新关联的FileHash,重新插入Cache记录 | |||
| if objRep.FileHash != obj.FileHash { | |||
| _, err := db.ObjectRep().Update(ctx, objID, obj.FileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("updating rep object file hash: %w", err) | |||
| } | |||
| for _, nodeID := range obj.NodeIDs { | |||
| if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, 0); err != nil { | |||
| return fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| } else { | |||
| // 如果相同,则只增加Cache中不存在的记录 | |||
| cachedNodes, err := db.Cache().GetCachingFileNodes(ctx, obj.FileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("finding caching file nodes: %w", err) | |||
| } | |||
| // 筛选出不在cachedNodes中的id | |||
| newNodeIDs := lo.Filter(obj.NodeIDs, func(id int64, index int) bool { | |||
| return lo.NoneBy(cachedNodes, func(node model.Node) bool { | |||
| return node.NodeID == id | |||
| }) | |||
| }) | |||
| for _, nodeID := range newNodeIDs { | |||
| if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, 0); err != nil { | |||
| return fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (db *ObjectDB) BatchAddEC(ctx SQLContext, packageID int64, objs []coormq.AddECObjectInfo) ([]int64, error) { | |||
| objIDs := make([]int64, 0, len(objs)) | |||
| for _, obj := range objs { | |||
| // 创建对象的记录 | |||
| objID, isCreate, err := db.CreateOrUpdate(ctx, packageID, obj.Path, obj.Size) | |||
| objID, isCreate, err := db.CreateOrUpdate(ctx, packageID, obj.Path, obj.Size, obj.FileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating object: %w", err) | |||
| } | |||
| @@ -245,36 +110,30 @@ func (db *ObjectDB) BatchAddEC(ctx SQLContext, packageID int64, objs []coormq.Ad | |||
| if err = db.ObjectBlock().DeleteObjectAll(ctx, objID); err != nil { | |||
| return nil, fmt.Errorf("deleting all object block: %w", err) | |||
| } | |||
| } | |||
| // 创建编码块的记录 | |||
| for i := 0; i < len(obj.FileHashes); i++ { | |||
| err := db.ObjectBlock().Create(ctx, objID, i, obj.FileHashes[i]) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating object block: %w", err) | |||
| } | |||
| // 首次上传默认使用不分块的rep模式 | |||
| err = db.ObjectBlock().Create(ctx, objID, 0, obj.NodeID, obj.FileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating object block: %w", err) | |||
| } | |||
| // 创建缓存记录 | |||
| priority := 0 //优先级暂时设置为0 | |||
| for i, nodeID := range obj.NodeIDs { | |||
| err = db.Cache().CreatePinned(ctx, obj.FileHashes[i], nodeID, priority) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| err = db.Cache().CreatePinned(ctx, obj.FileHash, obj.NodeID, 0) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| return objIDs, nil | |||
| } | |||
| func (*ObjectDB) BatchDelete(ctx SQLContext, ids []int64) error { | |||
| func (*ObjectDB) BatchDelete(ctx SQLContext, ids []cdssdk.ObjectID) error { | |||
| _, err := ctx.Exec("delete from Object where ObjectID in (?)", ids) | |||
| return err | |||
| } | |||
| func (*ObjectDB) DeleteInPackage(ctx SQLContext, packageID int64) error { | |||
| func (*ObjectDB) DeleteInPackage(ctx SQLContext, packageID cdssdk.PackageID) error { | |||
| _, err := ctx.Exec("delete from Object where PackageID = ?", packageID) | |||
| return err | |||
| } | |||
| @@ -3,9 +3,11 @@ package db | |||
| import ( | |||
| "database/sql" | |||
| "fmt" | |||
| "strconv" | |||
| "strings" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -18,17 +20,17 @@ func (db *DB) ObjectBlock() *ObjectBlockDB { | |||
| return &ObjectBlockDB{DB: db} | |||
| } | |||
| func (db *ObjectBlockDB) Create(ctx SQLContext, objectID int64, index int, fileHash string) error { | |||
| _, err := ctx.Exec("insert into ObjectBlock values(?,?,?)", objectID, index, fileHash) | |||
| func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, nodeID cdssdk.NodeID, fileHash string) error { | |||
| _, err := ctx.Exec("insert into ObjectBlock values(?,?,?,?)", objectID, index, nodeID, fileHash) | |||
| return err | |||
| } | |||
| func (db *ObjectBlockDB) DeleteObjectAll(ctx SQLContext, objectID int64) error { | |||
| func (db *ObjectBlockDB) DeleteObjectAll(ctx SQLContext, objectID cdssdk.ObjectID) error { | |||
| _, err := ctx.Exec("delete from ObjectBlock where ObjectID = ?", objectID) | |||
| return err | |||
| } | |||
| func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID int64) error { | |||
| func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID cdssdk.PackageID) error { | |||
| _, err := ctx.Exec("delete ObjectBlock from ObjectBlock inner join Object on ObjectBlock.ObjectID = Object.ObjectID where PackageID = ?", packageID) | |||
| return err | |||
| } | |||
| @@ -39,7 +41,7 @@ func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (in | |||
| "select count(FileHash) from ObjectBlock, Object, Package where FileHash = ? and"+ | |||
| " ObjectBlock.ObjectID = Object.ObjectID and"+ | |||
| " Object.PackageID = Package.PackageID and"+ | |||
| " Package.State = ?", fileHash, consts.PackageStateNormal) | |||
| " Package.State = ?", fileHash, cdssdk.PackageStateNormal) | |||
| if err == sql.ErrNoRows { | |||
| return 0, nil | |||
| } | |||
| @@ -47,85 +49,81 @@ func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (in | |||
| return cnt, err | |||
| } | |||
| func (db *ObjectBlockDB) GetBatchObjectBlocks(ctx SQLContext, objectIDs []int64) ([][]string, error) { | |||
| blocks := make([][]string, len(objectIDs)) | |||
| var err error | |||
| for i, objectID := range objectIDs { | |||
| var x []model.ObjectBlock | |||
| sql := "select * from ObjectBlock where ObjectID=?" | |||
| err = db.d.Select(&x, sql, objectID) | |||
| xx := make([]string, len(x)) | |||
| for ii := 0; ii < len(x); ii++ { | |||
| xx[x[ii].Index] = x[ii].FileHash | |||
| } | |||
| blocks[i] = xx | |||
| } | |||
| return blocks, err | |||
| } | |||
| func (db *ObjectBlockDB) GetBatchBlocksNodes(ctx SQLContext, hashs [][]string) ([][][]int64, error) { | |||
| nodes := make([][][]int64, len(hashs)) | |||
| var err error | |||
| for i, hs := range hashs { | |||
| fileNodes := make([][]int64, len(hs)) | |||
| for j, h := range hs { | |||
| var x []model.Node | |||
| err = sqlx.Select(ctx, &x, | |||
| "select Node.* from Cache, Node where"+ | |||
| " Cache.FileHash=? and Cache.NodeID = Node.NodeID and Cache.State=?", h, consts.CacheStatePinned) | |||
| xx := make([]int64, len(x)) | |||
| for ii := 0; ii < len(x); ii++ { | |||
| xx[ii] = x[ii].NodeID | |||
| } | |||
| fileNodes[j] = xx | |||
| } | |||
| nodes[i] = fileNodes | |||
| } | |||
| return nodes, err | |||
| } | |||
| func (db *ObjectBlockDB) GetWithNodeIDInPackage(ctx SQLContext, packageID int64) ([]stgmod.ObjectECData, error) { | |||
| var objs []model.Object | |||
| func (db *ObjectBlockDB) GetPackageBlockDetails(ctx SQLContext, packageID cdssdk.PackageID) ([]stgmod.ObjectDetail, error) { | |||
| var objs []model.TempObject | |||
| err := sqlx.Select(ctx, &objs, "select * from Object where PackageID = ? order by ObjectID asc", packageID) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("query objectIDs: %w", err) | |||
| return nil, fmt.Errorf("getting objects: %w", err) | |||
| } | |||
| rets := make([]stgmod.ObjectECData, 0, len(objs)) | |||
| rets := make([]stgmod.ObjectDetail, 0, len(objs)) | |||
| for _, obj := range objs { | |||
| var tmpRets []struct { | |||
| Index int `db:"Index"` | |||
| FileHash string `db:"FileHash"` | |||
| NodeIDs *string `db:"NodeIDs"` | |||
| var cachedObjectNodeIDs []cdssdk.NodeID | |||
| err := sqlx.Select(ctx, &cachedObjectNodeIDs, | |||
| "select NodeID from Object, Cache where"+ | |||
| " ObjectID = ? and Object.FileHash = Cache.FileHash", | |||
| obj.ObjectID, | |||
| ) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| var blockTmpRets []struct { | |||
| Index int `db:"Index"` | |||
| FileHashes string `db:"FileHashes"` | |||
| NodeIDs string `db:"NodeIDs"` | |||
| CachedNodeIDs *string `db:"CachedNodeIDs"` | |||
| } | |||
| err := sqlx.Select(ctx, | |||
| &tmpRets, | |||
| "select ObjectBlock.Index, ObjectBlock.FileHash, group_concat(NodeID) as NodeIDs from ObjectBlock"+ | |||
| " left join Cache on ObjectBlock.FileHash = Cache.FileHash"+ | |||
| " where ObjectID = ? group by ObjectBlock.Index, ObjectBlock.FileHash", | |||
| err = sqlx.Select(ctx, | |||
| &blockTmpRets, | |||
| "select ObjectBlock.Index, group_concat(distinct ObjectBlock.FileHash) as FileHashes, group_concat(distinct ObjectBlock.NodeID) as NodeIDs, group_concat(distinct Cache.NodeID) as CachedNodeIDs"+ | |||
| " from ObjectBlock left join Cache on ObjectBlock.FileHash = Cache.FileHash"+ | |||
| " where ObjectID = ? group by ObjectBlock.Index", | |||
| obj.ObjectID, | |||
| ) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| blocks := make([]stgmod.ObjectBlockData, 0, len(tmpRets)) | |||
| for _, tmp := range tmpRets { | |||
| var block stgmod.ObjectBlockData | |||
| blocks := make([]stgmod.ObjectBlockDetail, 0, len(blockTmpRets)) | |||
| for _, tmp := range blockTmpRets { | |||
| var block stgmod.ObjectBlockDetail | |||
| block.Index = tmp.Index | |||
| block.FileHash = tmp.FileHash | |||
| if tmp.NodeIDs != nil { | |||
| block.NodeIDs = splitIDStringUnsafe(*tmp.NodeIDs) | |||
| block.FileHash = splitConcatedFileHash(tmp.FileHashes)[0] | |||
| block.NodeIDs = splitConcatedNodeID(tmp.NodeIDs) | |||
| if tmp.CachedNodeIDs != nil { | |||
| block.CachedNodeIDs = splitConcatedNodeID(*tmp.CachedNodeIDs) | |||
| } | |||
| blocks = append(blocks, block) | |||
| } | |||
| rets = append(rets, stgmod.NewObjectECData(obj, blocks)) | |||
| rets = append(rets, stgmod.NewObjectDetail(obj.ToObject(), cachedObjectNodeIDs, blocks)) | |||
| } | |||
| return rets, nil | |||
| } | |||
| // 按逗号切割字符串,并将每一个部分解析为一个int64的ID。 | |||
| // 注:需要外部保证分隔的每一个部分都是正确的10进制数字格式 | |||
| func splitConcatedNodeID(idStr string) []cdssdk.NodeID { | |||
| idStrs := strings.Split(idStr, ",") | |||
| ids := make([]cdssdk.NodeID, 0, len(idStrs)) | |||
| for _, str := range idStrs { | |||
| // 假设传入的ID是正确的数字格式 | |||
| id, _ := strconv.ParseInt(str, 10, 64) | |||
| ids = append(ids, cdssdk.NodeID(id)) | |||
| } | |||
| return ids | |||
| } | |||
| // 按逗号切割字符串 | |||
| func splitConcatedFileHash(idStr string) []string { | |||
| idStrs := strings.Split(idStr, ",") | |||
| return idStrs | |||
| } | |||
| @@ -1,154 +0,0 @@ | |||
| package db | |||
| import ( | |||
| "database/sql" | |||
| "fmt" | |||
| "strconv" | |||
| "strings" | |||
| "github.com/jmoiron/sqlx" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| type ObjectRepDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) ObjectRep() *ObjectRepDB { | |||
| return &ObjectRepDB{DB: db} | |||
| } | |||
| // GetObjectRep 查询对象副本表 | |||
| func (db *ObjectRepDB) GetByID(ctx SQLContext, objectID int64) (model.ObjectRep, error) { | |||
| var ret model.ObjectRep | |||
| err := sqlx.Get(ctx, &ret, "select * from ObjectRep where ObjectID = ?", objectID) | |||
| return ret, err | |||
| } | |||
| func (db *ObjectRepDB) Create(ctx SQLContext, objectID int64, fileHash string) error { | |||
| _, err := ctx.Exec("insert into ObjectRep(ObjectID, FileHash) values(?,?)", objectID, fileHash) | |||
| return err | |||
| } | |||
| func (db *ObjectRepDB) Update(ctx SQLContext, objectID int64, fileHash string) (int64, error) { | |||
| ret, err := ctx.Exec("update ObjectRep set FileHash = ? where ObjectID = ?", fileHash, objectID) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| cnt, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get affected rows failed, err: %w", err) | |||
| } | |||
| return cnt, nil | |||
| } | |||
| func (db *ObjectRepDB) Delete(ctx SQLContext, objectID int64) error { | |||
| _, err := ctx.Exec("delete from ObjectRep where ObjectID = ?", objectID) | |||
| return err | |||
| } | |||
| func (db *ObjectRepDB) DeleteInPackage(ctx SQLContext, packageID int64) error { | |||
| _, err := ctx.Exec("delete ObjectRep from ObjectRep inner join Object on ObjectRep.ObjectID = Object.ObjectID where PackageID = ?", packageID) | |||
| return err | |||
| } | |||
| func (db *ObjectRepDB) GetFileMaxRepCount(ctx SQLContext, fileHash string) (int, error) { | |||
| var maxRepCnt *int | |||
| err := sqlx.Get(ctx, &maxRepCnt, | |||
| "select json_extract(Redundancy, '$.info.repCount') from ObjectRep, Object, Package where FileHash = ? and"+ | |||
| " ObjectRep.ObjectID = Object.ObjectID and"+ | |||
| " Object.PackageID = Package.PackageID and"+ | |||
| " Package.State = ?", fileHash, consts.PackageStateNormal) | |||
| if err == sql.ErrNoRows { | |||
| return 0, nil | |||
| } | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| if maxRepCnt == nil { | |||
| return 0, nil | |||
| } | |||
| return *maxRepCnt, err | |||
| } | |||
| func (db *ObjectRepDB) GetWithNodeIDInPackage(ctx SQLContext, packageID int64) ([]stgmod.ObjectRepData, error) { | |||
| var tmpRets []struct { | |||
| model.Object | |||
| FileHash *string `db:"FileHash"` | |||
| NodeIDs *string `db:"NodeIDs"` | |||
| } | |||
| err := sqlx.Select(ctx, | |||
| &tmpRets, | |||
| "select Object.*, ObjectRep.FileHash, group_concat(NodeID) as NodeIDs from Object"+ | |||
| " left join ObjectRep on Object.ObjectID = ObjectRep.ObjectID"+ | |||
| " left join Cache on ObjectRep.FileHash = Cache.FileHash"+ | |||
| " where PackageID = ? group by Object.ObjectID order by Object.ObjectID asc", | |||
| packageID, | |||
| ) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| rets := make([]stgmod.ObjectRepData, 0, len(tmpRets)) | |||
| for _, tmp := range tmpRets { | |||
| var repData stgmod.ObjectRepData | |||
| repData.Object = tmp.Object | |||
| if tmp.FileHash != nil { | |||
| repData.FileHash = *tmp.FileHash | |||
| } | |||
| if tmp.NodeIDs != nil { | |||
| repData.NodeIDs = splitIDStringUnsafe(*tmp.NodeIDs) | |||
| } | |||
| rets = append(rets, repData) | |||
| } | |||
| return rets, nil | |||
| } | |||
| func (db *ObjectRepDB) GetPackageObjectCacheInfos(ctx SQLContext, packageID int64) ([]cdssdk.ObjectCacheInfo, error) { | |||
| var tmpRet []struct { | |||
| cdssdk.Object | |||
| FileHash string `db:"FileHash"` | |||
| } | |||
| err := sqlx.Select(ctx, &tmpRet, "select Object.*, ObjectRep.FileHash from Object"+ | |||
| " left join ObjectRep on Object.ObjectID = ObjectRep.ObjectID"+ | |||
| " where Object.PackageID = ? order by Object.ObjectID asc", packageID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| ret := make([]cdssdk.ObjectCacheInfo, len(tmpRet)) | |||
| for i, r := range tmpRet { | |||
| ret[i] = cdssdk.NewObjectCacheInfo(r.Object, r.FileHash) | |||
| } | |||
| return ret, nil | |||
| } | |||
| // 按逗号切割字符串,并将每一个部分解析为一个int64的ID。 | |||
| // 注:需要外部保证分隔的每一个部分都是正确的10进制数字格式 | |||
| func splitIDStringUnsafe(idStr string) []int64 { | |||
| idStrs := strings.Split(idStr, ",") | |||
| ids := make([]int64, 0, len(idStrs)) | |||
| for _, str := range idStrs { | |||
| // 假设传入的ID是正确的数字格式 | |||
| id, _ := strconv.ParseInt(str, 10, 64) | |||
| ids = append(ids, id) | |||
| } | |||
| return ids | |||
| } | |||
| @@ -8,9 +8,6 @@ import ( | |||
| "github.com/jmoiron/sqlx" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -22,35 +19,35 @@ func (db *DB) Package() *PackageDB { | |||
| return &PackageDB{DB: db} | |||
| } | |||
| func (db *PackageDB) GetByID(ctx SQLContext, packageID int64) (model.Package, error) { | |||
| func (db *PackageDB) GetByID(ctx SQLContext, packageID cdssdk.PackageID) (model.Package, error) { | |||
| var ret model.Package | |||
| err := sqlx.Get(ctx, &ret, "select * from Package where PackageID = ?", packageID) | |||
| return ret, err | |||
| } | |||
| func (db *PackageDB) GetByName(ctx SQLContext, bucketID int64, name string) (model.Package, error) { | |||
| func (db *PackageDB) GetByName(ctx SQLContext, bucketID cdssdk.BucketID, name string) (model.Package, error) { | |||
| var ret model.Package | |||
| err := sqlx.Get(ctx, &ret, "select * from Package where BucketID = ? and Name = ?", bucketID, name) | |||
| return ret, err | |||
| } | |||
| func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([]int64, error) { | |||
| var ret []int64 | |||
| func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([]cdssdk.PackageID, error) { | |||
| var ret []cdssdk.PackageID | |||
| err := sqlx.Select(ctx, &ret, "select PackageID from Package limit ?, ?", start, count) | |||
| return ret, err | |||
| } | |||
| func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID int64, bucketID int64) ([]model.Package, error) { | |||
| func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) { | |||
| var ret []model.Package | |||
| err := sqlx.Select(ctx, &ret, "select Package.* from UserBucket, Package where UserID = ? and UserBucket.BucketID = ? and UserBucket.BucketID = Package.BucketID", userID, bucketID) | |||
| return ret, err | |||
| } | |||
| // IsAvailable 判断一个用户是否拥有指定对象 | |||
| func (db *PackageDB) IsAvailable(ctx SQLContext, userID int64, packageID int64) (bool, error) { | |||
| var objID int64 | |||
| func (db *PackageDB) IsAvailable(ctx SQLContext, userID cdssdk.UserID, packageID cdssdk.PackageID) (bool, error) { | |||
| var pkgID cdssdk.PackageID | |||
| // 先根据PackageID找到Package,然后判断此Package所在的Bucket是不是归此用户所有 | |||
| err := sqlx.Get(ctx, &objID, | |||
| err := sqlx.Get(ctx, &pkgID, | |||
| "select Package.PackageID from Package, UserBucket where "+ | |||
| "Package.PackageID = ? and "+ | |||
| "Package.BucketID = UserBucket.BucketID and "+ | |||
| @@ -69,7 +66,7 @@ func (db *PackageDB) IsAvailable(ctx SQLContext, userID int64, packageID int64) | |||
| } | |||
| // GetUserPackage 获得Package,如果用户没有权限访问,则不会获得结果 | |||
| func (db *PackageDB) GetUserPackage(ctx SQLContext, userID int64, packageID int64) (model.Package, error) { | |||
| func (db *PackageDB) GetUserPackage(ctx SQLContext, userID cdssdk.UserID, packageID cdssdk.PackageID) (model.Package, error) { | |||
| var ret model.Package | |||
| err := sqlx.Get(ctx, &ret, | |||
| "select Package.* from Package, UserBucket where"+ | |||
| @@ -80,7 +77,7 @@ func (db *PackageDB) GetUserPackage(ctx SQLContext, userID int64, packageID int6 | |||
| return ret, err | |||
| } | |||
| func (db *PackageDB) Create(ctx SQLContext, bucketID int64, name string, redundancy cdssdk.TypedRedundancyInfo) (int64, error) { | |||
| func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name string) (cdssdk.PackageID, error) { | |||
| // 根据packagename和bucketid查询,若不存在则插入,若存在则返回错误 | |||
| var packageID int64 | |||
| err := sqlx.Get(ctx, &packageID, "select PackageID from Package where Name = ? AND BucketID = ?", name, bucketID) | |||
| @@ -93,13 +90,8 @@ func (db *PackageDB) Create(ctx SQLContext, bucketID int64, name string, redunda | |||
| return 0, fmt.Errorf("query Package by PackageName and BucketID failed, err: %w", err) | |||
| } | |||
| redundancyJSON, err := serder.ObjectToJSON(redundancy) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("redundancy to json: %w", err) | |||
| } | |||
| sql := "insert into Package(Name, BucketID, State, Redundancy) values(?,?,?,?)" | |||
| r, err := ctx.Exec(sql, name, bucketID, consts.PackageStateNormal, redundancyJSON) | |||
| sql := "insert into Package(Name, BucketID, State) values(?,?,?)" | |||
| r, err := ctx.Exec(sql, name, bucketID, cdssdk.PackageStateNormal) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("insert package failed, err: %w", err) | |||
| } | |||
| @@ -109,11 +101,11 @@ func (db *PackageDB) Create(ctx SQLContext, bucketID int64, name string, redunda | |||
| return 0, fmt.Errorf("get id of inserted package failed, err: %w", err) | |||
| } | |||
| return packageID, nil | |||
| return cdssdk.PackageID(packageID), nil | |||
| } | |||
| // SoftDelete 设置一个对象被删除,并将相关数据删除 | |||
| func (db *PackageDB) SoftDelete(ctx SQLContext, packageID int64) error { | |||
| func (db *PackageDB) SoftDelete(ctx SQLContext, packageID cdssdk.PackageID) error { | |||
| obj, err := db.GetByID(ctx, packageID) | |||
| if err != nil { | |||
| return fmt.Errorf("get package failed, err: %w", err) | |||
| @@ -121,25 +113,18 @@ func (db *PackageDB) SoftDelete(ctx SQLContext, packageID int64) error { | |||
| // 不是正常状态的Package,则不删除 | |||
| // TODO 未来可能有其他状态 | |||
| if obj.State != consts.PackageStateNormal { | |||
| if obj.State != cdssdk.PackageStateNormal { | |||
| return nil | |||
| } | |||
| err = db.ChangeState(ctx, packageID, consts.PackageStateDeleted) | |||
| err = db.ChangeState(ctx, packageID, cdssdk.PackageStateDeleted) | |||
| if err != nil { | |||
| return fmt.Errorf("change package state failed, err: %w", err) | |||
| } | |||
| if obj.Redundancy.IsRepInfo() { | |||
| err = db.ObjectRep().DeleteInPackage(ctx, packageID) | |||
| if err != nil { | |||
| return fmt.Errorf("delete from object rep failed, err: %w", err) | |||
| } | |||
| } else { | |||
| err = db.ObjectBlock().DeleteInPackage(ctx, packageID) | |||
| if err != nil { | |||
| return fmt.Errorf("delete from object rep failed, err: %w", err) | |||
| } | |||
| err = db.ObjectBlock().DeleteInPackage(ctx, packageID) | |||
| if err != nil { | |||
| return fmt.Errorf("delete from object rep failed, err: %w", err) | |||
| } | |||
| if err := db.Object().DeleteInPackage(ctx, packageID); err != nil { | |||
| @@ -155,18 +140,18 @@ func (db *PackageDB) SoftDelete(ctx SQLContext, packageID int64) error { | |||
| } | |||
| // DeleteUnused 删除一个已经是Deleted状态,且不再被使用的对象。目前可能被使用的地方只有StoragePackage | |||
| func (PackageDB) DeleteUnused(ctx SQLContext, packageID int64) error { | |||
| func (PackageDB) DeleteUnused(ctx SQLContext, packageID cdssdk.PackageID) error { | |||
| _, err := ctx.Exec("delete from Package where PackageID = ? and State = ? and "+ | |||
| "not exists(select StorageID from StoragePackage where PackageID = ?)", | |||
| packageID, | |||
| consts.PackageStateDeleted, | |||
| cdssdk.PackageStateDeleted, | |||
| packageID, | |||
| ) | |||
| return err | |||
| } | |||
| func (*PackageDB) ChangeState(ctx SQLContext, packageID int64, state string) error { | |||
| func (*PackageDB) ChangeState(ctx SQLContext, packageID cdssdk.PackageID, state string) error { | |||
| _, err := ctx.Exec("update Package set State = ? where PackageID = ?", state, packageID) | |||
| return err | |||
| } | |||
| @@ -5,6 +5,7 @@ import ( | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -16,19 +17,19 @@ func (db *DB) Storage() *StorageDB { | |||
| return &StorageDB{DB: db} | |||
| } | |||
| func (db *StorageDB) GetByID(ctx SQLContext, stgID int64) (model.Storage, error) { | |||
| func (db *StorageDB) GetByID(ctx SQLContext, stgID cdssdk.StorageID) (model.Storage, error) { | |||
| var stg model.Storage | |||
| err := sqlx.Get(ctx, &stg, "select * from Storage where StorageID = ?", stgID) | |||
| return stg, err | |||
| } | |||
| func (db *StorageDB) BatchGetAllStorageIDs(ctx SQLContext, start int, count int) ([]int64, error) { | |||
| var ret []int64 | |||
| func (db *StorageDB) BatchGetAllStorageIDs(ctx SQLContext, start int, count int) ([]cdssdk.StorageID, error) { | |||
| var ret []cdssdk.StorageID | |||
| err := sqlx.Select(ctx, &ret, "select StorageID from Storage limit ?, ?", start, count) | |||
| return ret, err | |||
| } | |||
| func (db *StorageDB) IsAvailable(ctx SQLContext, userID int64, storageID int64) (bool, error) { | |||
| func (db *StorageDB) IsAvailable(ctx SQLContext, userID cdssdk.UserID, storageID cdssdk.StorageID) (bool, error) { | |||
| var stgID int64 | |||
| err := sqlx.Get(ctx, &stgID, | |||
| "select Storage.StorageID from Storage, UserStorage where"+ | |||
| @@ -48,7 +49,7 @@ func (db *StorageDB) IsAvailable(ctx SQLContext, userID int64, storageID int64) | |||
| return true, nil | |||
| } | |||
| func (db *StorageDB) GetUserStorage(ctx SQLContext, userID int64, storageID int64) (model.Storage, error) { | |||
| func (db *StorageDB) GetUserStorage(ctx SQLContext, userID cdssdk.UserID, storageID cdssdk.StorageID) (model.Storage, error) { | |||
| var stg model.Storage | |||
| err := sqlx.Get(ctx, &stg, | |||
| "select Storage.* from UserStorage, Storage where UserID = ? and UserStorage.StorageID = ? and UserStorage.StorageID = Storage.StorageID", | |||
| @@ -58,7 +59,7 @@ func (db *StorageDB) GetUserStorage(ctx SQLContext, userID int64, storageID int6 | |||
| return stg, err | |||
| } | |||
| func (db *StorageDB) ChangeState(ctx SQLContext, storageID int64, state string) error { | |||
| func (db *StorageDB) ChangeState(ctx SQLContext, storageID cdssdk.StorageID, state string) error { | |||
| _, err := ctx.Exec("update Storage set State = ? where StorageID = ?", state, storageID) | |||
| return err | |||
| } | |||
| @@ -4,7 +4,7 @@ import ( | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -16,47 +16,47 @@ func (db *DB) StoragePackage() *StoragePackageDB { | |||
| return &StoragePackageDB{DB: db} | |||
| } | |||
| func (*StoragePackageDB) Get(ctx SQLContext, storageID int64, packageID int64, userID int64) (model.StoragePackage, error) { | |||
| func (*StoragePackageDB) Get(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) (model.StoragePackage, error) { | |||
| var ret model.StoragePackage | |||
| err := sqlx.Get(ctx, &ret, "select * from StoragePackage where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID) | |||
| return ret, err | |||
| } | |||
| func (*StoragePackageDB) GetAllByStorageAndPackageID(ctx SQLContext, storageID int64, packageID int64) ([]model.StoragePackage, error) { | |||
| func (*StoragePackageDB) GetAllByStorageAndPackageID(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID) ([]model.StoragePackage, error) { | |||
| var ret []model.StoragePackage | |||
| err := sqlx.Select(ctx, &ret, "select * from StoragePackage where StorageID = ? and PackageID = ?", storageID, packageID) | |||
| return ret, err | |||
| } | |||
| func (*StoragePackageDB) GetAllByStorageID(ctx SQLContext, storageID int64) ([]model.StoragePackage, error) { | |||
| func (*StoragePackageDB) GetAllByStorageID(ctx SQLContext, storageID cdssdk.StorageID) ([]model.StoragePackage, error) { | |||
| var ret []model.StoragePackage | |||
| err := sqlx.Select(ctx, &ret, "select * from StoragePackage where StorageID = ?", storageID) | |||
| return ret, err | |||
| } | |||
| func (*StoragePackageDB) LoadPackage(ctx SQLContext, packageID int64, storageID int64, userID int64) error { | |||
| _, err := ctx.Exec("insert into StoragePackage values(?,?,?,?)", packageID, storageID, userID, consts.StoragePackageStateNormal) | |||
| func (*StoragePackageDB) Create(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error { | |||
| _, err := ctx.Exec("insert into StoragePackage values(?,?,?,?)", storageID, packageID, userID, model.StoragePackageStateNormal) | |||
| return err | |||
| } | |||
| func (*StoragePackageDB) ChangeState(ctx SQLContext, storageID int64, packageID int64, userID int64, state string) error { | |||
| func (*StoragePackageDB) ChangeState(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID, state string) error { | |||
| _, err := ctx.Exec("update StoragePackage set State = ? where StorageID = ? and PackageID = ? and UserID = ?", state, storageID, packageID, userID) | |||
| return err | |||
| } | |||
| // SetStateNormal 将状态设置为Normal,如果记录状态是Deleted,则不进行操作 | |||
| func (*StoragePackageDB) SetStateNormal(ctx SQLContext, storageID int64, packageID int64, userID int64) error { | |||
| func (*StoragePackageDB) SetStateNormal(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error { | |||
| _, err := ctx.Exec("update StoragePackage set State = ? where StorageID = ? and PackageID = ? and UserID = ? and State <> ?", | |||
| consts.StoragePackageStateNormal, | |||
| model.StoragePackageStateNormal, | |||
| storageID, | |||
| packageID, | |||
| userID, | |||
| consts.StoragePackageStateDeleted, | |||
| model.StoragePackageStateDeleted, | |||
| ) | |||
| return err | |||
| } | |||
| func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID int64, state string) (int64, error) { | |||
| func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID cdssdk.PackageID, state string) (int64, error) { | |||
| ret, err := ctx.Exec( | |||
| "update StoragePackage set State = ? where PackageID = ?", | |||
| state, | |||
| @@ -76,11 +76,11 @@ func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID int64, sta | |||
| // SetAllPackageOutdated 将Storage中指定对象设置为已过期。 | |||
| // 注:只会设置Normal状态的对象 | |||
| func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID int64) (int64, error) { | |||
| func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) { | |||
| ret, err := ctx.Exec( | |||
| "update StoragePackage set State = ? where State = ? and PackageID = ?", | |||
| consts.StoragePackageStateOutdated, | |||
| consts.StoragePackageStateNormal, | |||
| model.StoragePackageStateOutdated, | |||
| model.StoragePackageStateNormal, | |||
| packageID, | |||
| ) | |||
| if err != nil { | |||
| @@ -95,17 +95,17 @@ func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID int64) | |||
| return cnt, nil | |||
| } | |||
| func (db *StoragePackageDB) SetAllPackageDeleted(ctx SQLContext, packageID int64) (int64, error) { | |||
| return db.SetAllPackageState(ctx, packageID, consts.StoragePackageStateDeleted) | |||
| func (db *StoragePackageDB) SetAllPackageDeleted(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) { | |||
| return db.SetAllPackageState(ctx, packageID, model.StoragePackageStateDeleted) | |||
| } | |||
| func (*StoragePackageDB) Delete(ctx SQLContext, storageID int64, packageID int64, userID int64) error { | |||
| func (*StoragePackageDB) Delete(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error { | |||
| _, err := ctx.Exec("delete from StoragePackage where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID) | |||
| return err | |||
| } | |||
| // FindPackageStorages 查询存储了指定对象的Storage | |||
| func (*StoragePackageDB) FindPackageStorages(ctx SQLContext, packageID int64) ([]model.Storage, error) { | |||
| func (*StoragePackageDB) FindPackageStorages(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Storage, error) { | |||
| var ret []model.Storage | |||
| err := sqlx.Select(ctx, &ret, | |||
| "select Storage.* from StoragePackage, Storage where PackageID = ? and"+ | |||
| @@ -0,0 +1,33 @@ | |||
| package db | |||
| import ( | |||
| "time" | |||
| "github.com/jmoiron/sqlx" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| type StoragePackageLogDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) StoragePackageLog() *StoragePackageLogDB { | |||
| return &StoragePackageLogDB{DB: db} | |||
| } | |||
| func (*StoragePackageLogDB) Get(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) (model.StoragePackageLog, error) { | |||
| var ret model.StoragePackageLog | |||
| err := sqlx.Get(ctx, &ret, "select * from StoragePackageLog where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID) | |||
| return ret, err | |||
| } | |||
| func (*StoragePackageLogDB) Create(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID, createTime time.Time) error { | |||
| _, err := ctx.Exec("insert into StoragePackageLog values(?,?,?,?)", storageID, packageID, userID, createTime) | |||
| return err | |||
| } | |||
| func (*StoragePackageLogDB) Delete(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error { | |||
| _, err := ctx.Exec("delete from StoragePackageLog where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID) | |||
| return err | |||
| } | |||
| @@ -4,6 +4,7 @@ import ( | |||
| "strconv" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -14,7 +15,7 @@ type IPFSLockReqBuilder struct { | |||
| func (b *LockRequestBuilder) IPFS() *IPFSLockReqBuilder { | |||
| return &IPFSLockReqBuilder{LockRequestBuilder: b} | |||
| } | |||
| func (b *IPFSLockReqBuilder) ReadOneRep(nodeID int64, fileHash string) *IPFSLockReqBuilder { | |||
| func (b *IPFSLockReqBuilder) ReadOneRep(nodeID cdssdk.NodeID, fileHash string) *IPFSLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(nodeID), | |||
| Name: lockprovider.IPFS_ELEMENT_READ_LOCK, | |||
| @@ -23,7 +24,7 @@ func (b *IPFSLockReqBuilder) ReadOneRep(nodeID int64, fileHash string) *IPFSLock | |||
| return b | |||
| } | |||
| func (b *IPFSLockReqBuilder) WriteOneRep(nodeID int64, fileHash string) *IPFSLockReqBuilder { | |||
| func (b *IPFSLockReqBuilder) WriteOneRep(nodeID cdssdk.NodeID, fileHash string) *IPFSLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(nodeID), | |||
| Name: lockprovider.IPFS_ELEMENT_WRITE_LOCK, | |||
| @@ -32,7 +33,7 @@ func (b *IPFSLockReqBuilder) WriteOneRep(nodeID int64, fileHash string) *IPFSLoc | |||
| return b | |||
| } | |||
| func (b *IPFSLockReqBuilder) ReadAnyRep(nodeID int64) *IPFSLockReqBuilder { | |||
| func (b *IPFSLockReqBuilder) ReadAnyRep(nodeID cdssdk.NodeID) *IPFSLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(nodeID), | |||
| Name: lockprovider.IPFS_SET_READ_LOCK, | |||
| @@ -41,7 +42,7 @@ func (b *IPFSLockReqBuilder) ReadAnyRep(nodeID int64) *IPFSLockReqBuilder { | |||
| return b | |||
| } | |||
| func (b *IPFSLockReqBuilder) WriteAnyRep(nodeID int64) *IPFSLockReqBuilder { | |||
| func (b *IPFSLockReqBuilder) WriteAnyRep(nodeID cdssdk.NodeID) *IPFSLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(nodeID), | |||
| Name: lockprovider.IPFS_SET_WRITE_LOCK, | |||
| @@ -50,7 +51,7 @@ func (b *IPFSLockReqBuilder) WriteAnyRep(nodeID int64) *IPFSLockReqBuilder { | |||
| return b | |||
| } | |||
| func (b *IPFSLockReqBuilder) CreateAnyRep(nodeID int64) *IPFSLockReqBuilder { | |||
| func (b *IPFSLockReqBuilder) CreateAnyRep(nodeID cdssdk.NodeID) *IPFSLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(nodeID), | |||
| Name: lockprovider.IPFS_SET_CREATE_LOCK, | |||
| @@ -59,6 +60,6 @@ func (b *IPFSLockReqBuilder) CreateAnyRep(nodeID int64) *IPFSLockReqBuilder { | |||
| return b | |||
| } | |||
| func (b *IPFSLockReqBuilder) makePath(nodeID int64) []string { | |||
| return []string{lockprovider.IPFSLockPathPrefix, strconv.FormatInt(nodeID, 10)} | |||
| func (b *IPFSLockReqBuilder) makePath(nodeID cdssdk.NodeID) []string { | |||
| return []string{lockprovider.IPFSLockPathPrefix, strconv.FormatInt(int64(nodeID), 10)} | |||
| } | |||
| @@ -2,6 +2,7 @@ package reqbuilder | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -13,7 +14,7 @@ func (b *MetadataLockReqBuilder) Bucket() *MetadataBucketLockReqBuilder { | |||
| return &MetadataBucketLockReqBuilder{MetadataLockReqBuilder: b} | |||
| } | |||
| func (b *MetadataBucketLockReqBuilder) ReadOne(bucketID int64) *MetadataBucketLockReqBuilder { | |||
| func (b *MetadataBucketLockReqBuilder) ReadOne(bucketID cdssdk.BucketID) *MetadataBucketLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Bucket"), | |||
| Name: lockprovider.METADATA_ELEMENT_READ_LOCK, | |||
| @@ -21,7 +22,7 @@ func (b *MetadataBucketLockReqBuilder) ReadOne(bucketID int64) *MetadataBucketLo | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataBucketLockReqBuilder) WriteOne(bucketID int64) *MetadataBucketLockReqBuilder { | |||
| func (b *MetadataBucketLockReqBuilder) WriteOne(bucketID cdssdk.BucketID) *MetadataBucketLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Bucket"), | |||
| Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK, | |||
| @@ -29,7 +30,7 @@ func (b *MetadataBucketLockReqBuilder) WriteOne(bucketID int64) *MetadataBucketL | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataBucketLockReqBuilder) CreateOne(userID int64, bucketName string) *MetadataBucketLockReqBuilder { | |||
| func (b *MetadataBucketLockReqBuilder) CreateOne(userID cdssdk.UserID, bucketName string) *MetadataBucketLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Bucket"), | |||
| Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK, | |||
| @@ -2,6 +2,7 @@ package reqbuilder | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -13,7 +14,7 @@ func (b *MetadataLockReqBuilder) Cache() *MetadataCacheLockReqBuilder { | |||
| return &MetadataCacheLockReqBuilder{MetadataLockReqBuilder: b} | |||
| } | |||
| func (b *MetadataCacheLockReqBuilder) ReadOne(nodeID int64, fileHash string) *MetadataCacheLockReqBuilder { | |||
| func (b *MetadataCacheLockReqBuilder) ReadOne(nodeID cdssdk.NodeID, fileHash string) *MetadataCacheLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Cache"), | |||
| Name: lockprovider.METADATA_ELEMENT_READ_LOCK, | |||
| @@ -21,7 +22,7 @@ func (b *MetadataCacheLockReqBuilder) ReadOne(nodeID int64, fileHash string) *Me | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataCacheLockReqBuilder) WriteOne(nodeID int64, fileHash string) *MetadataCacheLockReqBuilder { | |||
| func (b *MetadataCacheLockReqBuilder) WriteOne(nodeID cdssdk.NodeID, fileHash string) *MetadataCacheLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Cache"), | |||
| Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK, | |||
| @@ -29,7 +30,7 @@ func (b *MetadataCacheLockReqBuilder) WriteOne(nodeID int64, fileHash string) *M | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataCacheLockReqBuilder) CreateOne(nodeID int64, fileHash string) *MetadataCacheLockReqBuilder { | |||
| func (b *MetadataCacheLockReqBuilder) CreateOne(nodeID cdssdk.NodeID, fileHash string) *MetadataCacheLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Cache"), | |||
| Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK, | |||
| @@ -2,6 +2,7 @@ package reqbuilder | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -13,7 +14,7 @@ func (b *MetadataLockReqBuilder) Node() *MetadataNodeLockReqBuilder { | |||
| return &MetadataNodeLockReqBuilder{MetadataLockReqBuilder: b} | |||
| } | |||
| func (b *MetadataNodeLockReqBuilder) ReadOne(nodeID int64) *MetadataNodeLockReqBuilder { | |||
| func (b *MetadataNodeLockReqBuilder) ReadOne(nodeID cdssdk.NodeID) *MetadataNodeLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Node"), | |||
| Name: lockprovider.METADATA_ELEMENT_READ_LOCK, | |||
| @@ -21,7 +22,7 @@ func (b *MetadataNodeLockReqBuilder) ReadOne(nodeID int64) *MetadataNodeLockReqB | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataNodeLockReqBuilder) WriteOne(nodeID int64) *MetadataNodeLockReqBuilder { | |||
| func (b *MetadataNodeLockReqBuilder) WriteOne(nodeID cdssdk.NodeID) *MetadataNodeLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Node"), | |||
| Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK, | |||
| @@ -2,6 +2,7 @@ package reqbuilder | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -13,7 +14,7 @@ func (b *MetadataLockReqBuilder) Package() *MetadataPackageLockReqBuilder { | |||
| return &MetadataPackageLockReqBuilder{MetadataLockReqBuilder: b} | |||
| } | |||
| func (b *MetadataPackageLockReqBuilder) ReadOne(packageID int64) *MetadataPackageLockReqBuilder { | |||
| func (b *MetadataPackageLockReqBuilder) ReadOne(packageID cdssdk.PackageID) *MetadataPackageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Package"), | |||
| Name: lockprovider.METADATA_ELEMENT_READ_LOCK, | |||
| @@ -21,7 +22,7 @@ func (b *MetadataPackageLockReqBuilder) ReadOne(packageID int64) *MetadataPackag | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataPackageLockReqBuilder) WriteOne(packageID int64) *MetadataPackageLockReqBuilder { | |||
| func (b *MetadataPackageLockReqBuilder) WriteOne(packageID cdssdk.PackageID) *MetadataPackageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Package"), | |||
| Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK, | |||
| @@ -29,7 +30,7 @@ func (b *MetadataPackageLockReqBuilder) WriteOne(packageID int64) *MetadataPacka | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataPackageLockReqBuilder) CreateOne(bucketID int64, packageName string) *MetadataPackageLockReqBuilder { | |||
| func (b *MetadataPackageLockReqBuilder) CreateOne(bucketID cdssdk.BucketID, packageName string) *MetadataPackageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("Package"), | |||
| Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK, | |||
| @@ -2,6 +2,7 @@ package reqbuilder | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -13,7 +14,7 @@ func (b *MetadataLockReqBuilder) StoragePackage() *MetadataStoragePackageLockReq | |||
| return &MetadataStoragePackageLockReqBuilder{MetadataLockReqBuilder: b} | |||
| } | |||
| func (b *MetadataStoragePackageLockReqBuilder) ReadOne(storageID int64, userID int64, packageID int64) *MetadataStoragePackageLockReqBuilder { | |||
| func (b *MetadataStoragePackageLockReqBuilder) ReadOne(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("StoragePackage"), | |||
| Name: lockprovider.METADATA_ELEMENT_READ_LOCK, | |||
| @@ -21,7 +22,7 @@ func (b *MetadataStoragePackageLockReqBuilder) ReadOne(storageID int64, userID i | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataStoragePackageLockReqBuilder) WriteOne(storageID int64, userID int64, packageID int64) *MetadataStoragePackageLockReqBuilder { | |||
| func (b *MetadataStoragePackageLockReqBuilder) WriteOne(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("StoragePackage"), | |||
| Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK, | |||
| @@ -29,7 +30,7 @@ func (b *MetadataStoragePackageLockReqBuilder) WriteOne(storageID int64, userID | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataStoragePackageLockReqBuilder) CreateOne(storageID int64, userID int64, packageID int64) *MetadataStoragePackageLockReqBuilder { | |||
| func (b *MetadataStoragePackageLockReqBuilder) CreateOne(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("StoragePackage"), | |||
| Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK, | |||
| @@ -2,6 +2,7 @@ package reqbuilder | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -13,7 +14,7 @@ func (b *MetadataLockReqBuilder) UserBucket() *MetadataUserBucketLockReqBuilder | |||
| return &MetadataUserBucketLockReqBuilder{MetadataLockReqBuilder: b} | |||
| } | |||
| func (b *MetadataUserBucketLockReqBuilder) ReadOne(userID int64, bucketID int64) *MetadataUserBucketLockReqBuilder { | |||
| func (b *MetadataUserBucketLockReqBuilder) ReadOne(userID cdssdk.UserID, bucketID cdssdk.BucketID) *MetadataUserBucketLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("UserBucket"), | |||
| Name: lockprovider.METADATA_ELEMENT_READ_LOCK, | |||
| @@ -21,7 +22,7 @@ func (b *MetadataUserBucketLockReqBuilder) ReadOne(userID int64, bucketID int64) | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataUserBucketLockReqBuilder) WriteOne(userID int64, bucketID int64) *MetadataUserBucketLockReqBuilder { | |||
| func (b *MetadataUserBucketLockReqBuilder) WriteOne(userID cdssdk.UserID, bucketID cdssdk.BucketID) *MetadataUserBucketLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("UserBucket"), | |||
| Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK, | |||
| @@ -29,7 +30,7 @@ func (b *MetadataUserBucketLockReqBuilder) WriteOne(userID int64, bucketID int64 | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataUserBucketLockReqBuilder) CreateOne(userID int64, bucketID int64) *MetadataUserBucketLockReqBuilder { | |||
| func (b *MetadataUserBucketLockReqBuilder) CreateOne(userID cdssdk.UserID, bucketID cdssdk.BucketID) *MetadataUserBucketLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("UserBucket"), | |||
| Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK, | |||
| @@ -2,6 +2,7 @@ package reqbuilder | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -13,7 +14,7 @@ func (b *MetadataLockReqBuilder) UserStorage() *MetadataUserStorageLockReqBuilde | |||
| return &MetadataUserStorageLockReqBuilder{MetadataLockReqBuilder: b} | |||
| } | |||
| func (b *MetadataUserStorageLockReqBuilder) ReadOne(userID int64, storageID int64) *MetadataUserStorageLockReqBuilder { | |||
| func (b *MetadataUserStorageLockReqBuilder) ReadOne(userID cdssdk.UserID, storageID cdssdk.StorageID) *MetadataUserStorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("UserStorage"), | |||
| Name: lockprovider.METADATA_ELEMENT_READ_LOCK, | |||
| @@ -21,7 +22,7 @@ func (b *MetadataUserStorageLockReqBuilder) ReadOne(userID int64, storageID int6 | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataUserStorageLockReqBuilder) WriteOne(userID int64, storageID int64) *MetadataUserStorageLockReqBuilder { | |||
| func (b *MetadataUserStorageLockReqBuilder) WriteOne(userID cdssdk.UserID, storageID cdssdk.StorageID) *MetadataUserStorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("UserStorage"), | |||
| Name: lockprovider.METADATA_ELEMENT_WRITE_LOCK, | |||
| @@ -29,7 +30,7 @@ func (b *MetadataUserStorageLockReqBuilder) WriteOne(userID int64, storageID int | |||
| }) | |||
| return b | |||
| } | |||
| func (b *MetadataUserStorageLockReqBuilder) CreateOne(userID int64, storageID int64) *MetadataUserStorageLockReqBuilder { | |||
| func (b *MetadataUserStorageLockReqBuilder) CreateOne(userID cdssdk.UserID, storageID cdssdk.StorageID) *MetadataUserStorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath("UserStorage"), | |||
| Name: lockprovider.METADATA_ELEMENT_CREATE_LOCK, | |||
| @@ -4,6 +4,7 @@ import ( | |||
| "strconv" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider" | |||
| ) | |||
| @@ -15,7 +16,7 @@ func (b *LockRequestBuilder) Storage() *StorageLockReqBuilder { | |||
| return &StorageLockReqBuilder{LockRequestBuilder: b} | |||
| } | |||
| func (b *StorageLockReqBuilder) ReadOnePackage(storageID int64, userID int64, packageID int64) *StorageLockReqBuilder { | |||
| func (b *StorageLockReqBuilder) ReadOnePackage(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *StorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(storageID), | |||
| Name: lockprovider.STORAGE_ELEMENT_READ_LOCK, | |||
| @@ -24,7 +25,7 @@ func (b *StorageLockReqBuilder) ReadOnePackage(storageID int64, userID int64, pa | |||
| return b | |||
| } | |||
| func (b *StorageLockReqBuilder) WriteOnePackage(storageID int64, userID int64, packageID int64) *StorageLockReqBuilder { | |||
| func (b *StorageLockReqBuilder) WriteOnePackage(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *StorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(storageID), | |||
| Name: lockprovider.STORAGE_ELEMENT_WRITE_LOCK, | |||
| @@ -33,7 +34,7 @@ func (b *StorageLockReqBuilder) WriteOnePackage(storageID int64, userID int64, p | |||
| return b | |||
| } | |||
| func (b *StorageLockReqBuilder) CreateOnePackage(storageID int64, userID int64, packageID int64) *StorageLockReqBuilder { | |||
| func (b *StorageLockReqBuilder) CreateOnePackage(storageID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID) *StorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(storageID), | |||
| Name: lockprovider.STORAGE_ELEMENT_WRITE_LOCK, | |||
| @@ -42,7 +43,7 @@ func (b *StorageLockReqBuilder) CreateOnePackage(storageID int64, userID int64, | |||
| return b | |||
| } | |||
| func (b *StorageLockReqBuilder) ReadAnyPackage(storageID int64) *StorageLockReqBuilder { | |||
| func (b *StorageLockReqBuilder) ReadAnyPackage(storageID cdssdk.StorageID) *StorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(storageID), | |||
| Name: lockprovider.STORAGE_SET_READ_LOCK, | |||
| @@ -51,7 +52,7 @@ func (b *StorageLockReqBuilder) ReadAnyPackage(storageID int64) *StorageLockReqB | |||
| return b | |||
| } | |||
| func (b *StorageLockReqBuilder) WriteAnyPackage(storageID int64) *StorageLockReqBuilder { | |||
| func (b *StorageLockReqBuilder) WriteAnyPackage(storageID cdssdk.StorageID) *StorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(storageID), | |||
| Name: lockprovider.STORAGE_SET_WRITE_LOCK, | |||
| @@ -60,7 +61,7 @@ func (b *StorageLockReqBuilder) WriteAnyPackage(storageID int64) *StorageLockReq | |||
| return b | |||
| } | |||
| func (b *StorageLockReqBuilder) CreateAnyPackage(storageID int64) *StorageLockReqBuilder { | |||
| func (b *StorageLockReqBuilder) CreateAnyPackage(storageID cdssdk.StorageID) *StorageLockReqBuilder { | |||
| b.locks = append(b.locks, distlock.Lock{ | |||
| Path: b.makePath(storageID), | |||
| Name: lockprovider.STORAGE_SET_CREATE_LOCK, | |||
| @@ -69,6 +70,6 @@ func (b *StorageLockReqBuilder) CreateAnyPackage(storageID int64) *StorageLockRe | |||
| return b | |||
| } | |||
| func (b *StorageLockReqBuilder) makePath(storageID int64) []string { | |||
| return []string{lockprovider.StorageLockPathPrefix, strconv.FormatInt(storageID, 10)} | |||
| func (b *StorageLockReqBuilder) makePath(storageID cdssdk.StorageID) []string { | |||
| return []string{lockprovider.StorageLockPathPrefix, strconv.FormatInt(int64(storageID), 10)} | |||
| } | |||
| @@ -5,14 +5,14 @@ import ( | |||
| "io" | |||
| "sync" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ec" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type ECCompute struct { | |||
| EC stgmod.EC `json:"ec"` | |||
| EC cdssdk.ECRedundancy `json:"ec"` | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputIDs []ioswitch.StreamID `json:"outputIDs"` | |||
| InputBlockIndexes []int `json:"inputBlockIndexes"` | |||
| @@ -55,7 +55,7 @@ func (o *ECCompute) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| } | |||
| type ECReconstruct struct { | |||
| EC stgmod.EC `json:"ec"` | |||
| EC cdssdk.ECRedundancy `json:"ec"` | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputIDs []ioswitch.StreamID `json:"outputIDs"` | |||
| InputBlockIndexes []int `json:"inputBlockIndexes"` | |||
| @@ -0,0 +1,91 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type IPFSRead struct { | |||
| Output ioswitch.StreamID `json:"output"` | |||
| FileHash string `json:"fileHash"` | |||
| } | |||
| func (o *IPFSRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| logger. | |||
| WithField("FileHash", o.FileHash). | |||
| WithField("Output", o.Output). | |||
| Debugf("ipfs read op") | |||
| defer logger.Debugf("ipfs read op finished") | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| return fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| defer stgglb.IPFSPool.Release(ipfsCli) | |||
| file, err := ipfsCli.OpenRead(o.FileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("reading ipfs: %w", err) | |||
| } | |||
| fut := future.NewSetVoid() | |||
| file = myio.AfterReadClosedOnce(file, func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }) | |||
| sw.StreamReady(planID, ioswitch.NewStream(o.Output, file)) | |||
| // TODO context | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| } | |||
| type IPFSWrite struct { | |||
| Input ioswitch.StreamID `json:"input"` | |||
| ResultKey string `json:"resultKey"` | |||
| } | |||
| func (o *IPFSWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| logger. | |||
| WithField("ResultKey", o.ResultKey). | |||
| WithField("Input", o.Input). | |||
| Debugf("ipfs write op") | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| return fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| defer stgglb.IPFSPool.Release(ipfsCli) | |||
| strs, err := sw.WaitStreams(planID, o.Input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer strs[0].Stream.Close() | |||
| fileHash, err := ipfsCli.CreateFile(strs[0].Stream) | |||
| if err != nil { | |||
| return fmt.Errorf("creating ipfs file: %w", err) | |||
| } | |||
| if o.ResultKey != "" { | |||
| sw.AddResultValue(planID, ioswitch.ResultKV{ | |||
| Key: o.ResultKey, | |||
| Value: fileHash, | |||
| }) | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*IPFSRead)(nil)) | |||
| OpUnion.AddT((*IPFSWrite)(nil)) | |||
| } | |||
| @@ -0,0 +1,49 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type Join struct { | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputID ioswitch.StreamID `json:"outputID"` | |||
| Length int64 `json:"length"` | |||
| } | |||
| func (o *Join) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| strs, err := sw.WaitStreams(planID, o.InputIDs...) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| var strReaders []io.Reader | |||
| for _, s := range strs { | |||
| strReaders = append(strReaders, s.Stream) | |||
| } | |||
| defer func() { | |||
| for _, str := range strs { | |||
| str.Stream.Close() | |||
| } | |||
| }() | |||
| fut := future.NewSetVoid() | |||
| sw.StreamReady(planID, | |||
| ioswitch.NewStream(o.OutputID, | |||
| myio.AfterReadClosedOnce(myio.Length(myio.Join(strReaders), o.Length), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }), | |||
| ), | |||
| ) | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*Join)(nil)) | |||
| } | |||
| @@ -1,129 +1,9 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/types" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| var OpUnion = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[ioswitch.Op]( | |||
| (*IPFSRead)(nil), | |||
| (*IPFSWrite)(nil), | |||
| (*Join)(nil), | |||
| ))) | |||
| type IPFSRead struct { | |||
| Output ioswitch.StreamID `json:"output"` | |||
| FileHash string `json:"fileHash"` | |||
| } | |||
| func (o *IPFSRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| logger. | |||
| WithField("FileHash", o.FileHash). | |||
| WithField("Output", o.Output). | |||
| Debugf("ipfs read op") | |||
| defer logger.Debugf("ipfs read op finished") | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| return fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| defer stgglb.IPFSPool.Release(ipfsCli) | |||
| file, err := ipfsCli.OpenRead(o.FileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("reading ipfs: %w", err) | |||
| } | |||
| fut := future.NewSetVoid() | |||
| file = myio.AfterReadClosedOnce(file, func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }) | |||
| sw.StreamReady(planID, ioswitch.NewStream(o.Output, file)) | |||
| // TODO context | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| } | |||
| type IPFSWrite struct { | |||
| Input ioswitch.StreamID `json:"input"` | |||
| ResultKey string `json:"resultKey"` | |||
| } | |||
| func (o *IPFSWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| logger. | |||
| WithField("ResultKey", o.ResultKey). | |||
| WithField("Input", o.Input). | |||
| Debugf("ipfs write op") | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| return fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| defer stgglb.IPFSPool.Release(ipfsCli) | |||
| strs, err := sw.WaitStreams(planID, o.Input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer strs[0].Stream.Close() | |||
| fileHash, err := ipfsCli.CreateFile(strs[0].Stream) | |||
| if err != nil { | |||
| return fmt.Errorf("creating ipfs file: %w", err) | |||
| } | |||
| if o.ResultKey != "" { | |||
| sw.AddResultValue(planID, ioswitch.ResultKV{ | |||
| Key: o.ResultKey, | |||
| Value: fileHash, | |||
| }) | |||
| } | |||
| return nil | |||
| } | |||
| type Join struct { | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputID ioswitch.StreamID `json:"outputID"` | |||
| Length int64 `json:"length"` | |||
| } | |||
| func (o *Join) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| strs, err := sw.WaitStreams(planID, o.InputIDs...) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| var strReaders []io.Reader | |||
| for _, s := range strs { | |||
| strReaders = append(strReaders, s.Stream) | |||
| } | |||
| defer func() { | |||
| for _, str := range strs { | |||
| str.Stream.Close() | |||
| } | |||
| }() | |||
| fut := future.NewSetVoid() | |||
| sw.StreamReady(planID, | |||
| ioswitch.NewStream(o.OutputID, | |||
| myio.AfterReadClosedOnce(myio.Length(myio.Join(strReaders), o.Length), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }), | |||
| ), | |||
| ) | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| } | |||
| var OpUnion = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[ioswitch.Op]())) | |||
| @@ -1,12 +1,35 @@ | |||
| package plans | |||
| import ( | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops" | |||
| ) | |||
| type AgentPlanBuilder struct { | |||
| owner *PlanBuilder | |||
| node model.Node | |||
| ops []ioswitch.Op | |||
| } | |||
| type AgentStream struct { | |||
| owner *AgentPlanBuilder | |||
| info *StreamInfo | |||
| } | |||
| func (b *AgentPlanBuilder) Build(planID ioswitch.PlanID) (AgentPlan, error) { | |||
| plan := ioswitch.Plan{ | |||
| ID: planID, | |||
| Ops: b.ops, | |||
| } | |||
| return AgentPlan{ | |||
| Plan: plan, | |||
| Node: b.node, | |||
| }, nil | |||
| } | |||
| func (b *AgentPlanBuilder) GRCPFetch(node model.Node, str *AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| @@ -37,6 +60,27 @@ func (s *AgentStream) GRPCSend(node model.Node) *AgentStream { | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) IPFSRead(fileHash string) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| b.ops = append(b.ops, &ops.IPFSRead{ | |||
| Output: agtStr.info.ID, | |||
| FileHash: fileHash, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (s *AgentStream) IPFSWrite(resultKey string) { | |||
| s.owner.ops = append(s.owner.ops, &ops.IPFSWrite{ | |||
| Input: s.info.ID, | |||
| ResultKey: resultKey, | |||
| }) | |||
| } | |||
| func (b *AgentPlanBuilder) FileRead(filePath string) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| @@ -58,7 +102,7 @@ func (b *AgentStream) FileWrite(filePath string) { | |||
| }) | |||
| } | |||
| func (b *AgentPlanBuilder) ECCompute(ec stgmod.EC, inBlockIndexes []int, outBlockIndexes []int, streams ...*AgentStream) *MultiStream { | |||
| func (b *AgentPlanBuilder) ECCompute(ec cdssdk.ECRedundancy, inBlockIndexes []int, outBlockIndexes []int, streams ...*AgentStream) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var inputStrIDs []ioswitch.StreamID | |||
| @@ -87,7 +131,7 @@ func (b *AgentPlanBuilder) ECCompute(ec stgmod.EC, inBlockIndexes []int, outBloc | |||
| return mstr | |||
| } | |||
| func (b *AgentPlanBuilder) ECReconstruct(ec stgmod.EC, inBlockIndexes []int, streams ...*AgentStream) *MultiStream { | |||
| func (b *AgentPlanBuilder) ECReconstruct(ec cdssdk.ECRedundancy, inBlockIndexes []int, streams ...*AgentStream) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var inputStrIDs []ioswitch.StreamID | |||
| @@ -114,3 +158,74 @@ func (b *AgentPlanBuilder) ECReconstruct(ec stgmod.EC, inBlockIndexes []int, str | |||
| return mstr | |||
| } | |||
| func (b *AgentStream) ChunkedSplit(chunkSize int, streamCount int, paddingZeros bool) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var outputStrIDs []ioswitch.StreamID | |||
| for i := 0; i < streamCount; i++ { | |||
| info := b.owner.owner.newStream() | |||
| mstr.Streams = append(mstr.Streams, &AgentStream{ | |||
| owner: b.owner, | |||
| info: info, | |||
| }) | |||
| outputStrIDs = append(outputStrIDs, info.ID) | |||
| } | |||
| b.owner.ops = append(b.owner.ops, &ops.ChunkedSplit{ | |||
| InputID: b.info.ID, | |||
| OutputIDs: outputStrIDs, | |||
| ChunkSize: chunkSize, | |||
| StreamCount: streamCount, | |||
| PaddingZeros: paddingZeros, | |||
| }) | |||
| return mstr | |||
| } | |||
| func (s *AgentStream) ToExecutor() *ToExecutorStream { | |||
| return &ToExecutorStream{ | |||
| info: s.info, | |||
| fromNode: &s.owner.node, | |||
| } | |||
| } | |||
| func (b *AgentPlanBuilder) Join(length int64, streams ...*AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.Join{ | |||
| InputIDs: inputStrIDs, | |||
| OutputID: agtStr.info.ID, | |||
| Length: length, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) ChunkedJoin(chunkSize int, streams ...*AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.ChunkedJoin{ | |||
| InputIDs: inputStrIDs, | |||
| OutputID: agtStr.info.ID, | |||
| ChunkSize: chunkSize, | |||
| }) | |||
| return agtStr | |||
| } | |||
| @@ -4,9 +4,9 @@ import ( | |||
| "fmt" | |||
| "github.com/google/uuid" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops" | |||
| ) | |||
| type StreamInfo struct { | |||
| @@ -15,7 +15,7 @@ type StreamInfo struct { | |||
| type PlanBuilder struct { | |||
| streams []*StreamInfo | |||
| agentPlans map[int64]*AgentPlanBuilder | |||
| agentPlans map[cdssdk.NodeID]*AgentPlanBuilder | |||
| } | |||
| func (b *PlanBuilder) Build() (*ComposedPlan, error) { | |||
| @@ -49,7 +49,7 @@ func (b *PlanBuilder) newStream() *StreamInfo { | |||
| func NewPlanBuilder() PlanBuilder { | |||
| return PlanBuilder{ | |||
| agentPlans: make(map[int64]*AgentPlanBuilder), | |||
| agentPlans: make(map[cdssdk.NodeID]*AgentPlanBuilder), | |||
| } | |||
| } | |||
| @@ -92,121 +92,6 @@ type ToExecutorStream struct { | |||
| fromNode *model.Node | |||
| } | |||
| type AgentStream struct { | |||
| owner *AgentPlanBuilder | |||
| info *StreamInfo | |||
| } | |||
| func (s *AgentStream) IPFSWrite(resultKey string) { | |||
| s.owner.ops = append(s.owner.ops, &ops.IPFSWrite{ | |||
| Input: s.info.ID, | |||
| ResultKey: resultKey, | |||
| }) | |||
| } | |||
| func (b *AgentStream) ChunkSplit(chunkSize int, streamCount int, paddingZeros bool) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var outputStrIDs []ioswitch.StreamID | |||
| for i := 0; i < streamCount; i++ { | |||
| info := b.owner.owner.newStream() | |||
| mstr.Streams = append(mstr.Streams, &AgentStream{ | |||
| owner: b.owner, | |||
| info: info, | |||
| }) | |||
| outputStrIDs = append(outputStrIDs, info.ID) | |||
| } | |||
| b.owner.ops = append(b.owner.ops, &ops.ChunkedSplit{ | |||
| InputID: b.info.ID, | |||
| OutputIDs: outputStrIDs, | |||
| ChunkSize: chunkSize, | |||
| StreamCount: streamCount, | |||
| PaddingZeros: paddingZeros, | |||
| }) | |||
| return mstr | |||
| } | |||
| func (s *AgentStream) ToExecutor() *ToExecutorStream { | |||
| return &ToExecutorStream{ | |||
| info: s.info, | |||
| fromNode: &s.owner.node, | |||
| } | |||
| } | |||
| type AgentPlanBuilder struct { | |||
| owner *PlanBuilder | |||
| node model.Node | |||
| ops []ioswitch.Op | |||
| } | |||
| func (b *AgentPlanBuilder) IPFSRead(fileHash string) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| b.ops = append(b.ops, &ops.IPFSRead{ | |||
| Output: agtStr.info.ID, | |||
| FileHash: fileHash, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) Join(length int64, streams ...*AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.Join{ | |||
| InputIDs: inputStrIDs, | |||
| OutputID: agtStr.info.ID, | |||
| Length: length, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) ChunkJoin(chunkSize int, streams ...*AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.ChunkedJoin{ | |||
| InputIDs: inputStrIDs, | |||
| OutputID: agtStr.info.ID, | |||
| ChunkSize: chunkSize, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) Build(planID ioswitch.PlanID) (AgentPlan, error) { | |||
| plan := ioswitch.Plan{ | |||
| ID: planID, | |||
| Ops: b.ops, | |||
| } | |||
| return AgentPlan{ | |||
| Plan: plan, | |||
| Node: b.node, | |||
| }, nil | |||
| } | |||
| type MultiStream struct { | |||
| Streams []*AgentStream | |||
| } | |||
| @@ -0,0 +1,313 @@ | |||
| package iterator | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "reflect" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| stgmodels "gitlink.org.cn/cloudream/storage/common/models" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ec" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type DownloadingObjectIterator = Iterator[*IterDownloadingObject] | |||
| type IterDownloadingObject struct { | |||
| Object model.Object | |||
| File io.ReadCloser | |||
| } | |||
| type DownloadNodeInfo struct { | |||
| Node model.Node | |||
| IsSameLocation bool | |||
| } | |||
| type DownloadContext struct { | |||
| Distlock *distlock.Service | |||
| } | |||
| type DownloadObjectIterator struct { | |||
| OnClosing func() | |||
| objectDetails []stgmodels.ObjectDetail | |||
| currentIndex int | |||
| downloadCtx *DownloadContext | |||
| } | |||
| func NewDownloadObjectIterator(objectDetails []stgmodels.ObjectDetail, downloadCtx *DownloadContext) *DownloadObjectIterator { | |||
| return &DownloadObjectIterator{ | |||
| objectDetails: objectDetails, | |||
| downloadCtx: downloadCtx, | |||
| } | |||
| } | |||
| func (i *DownloadObjectIterator) MoveNext() (*IterDownloadingObject, error) { | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| if i.currentIndex >= len(i.objectDetails) { | |||
| return nil, ErrNoMoreItem | |||
| } | |||
| item, err := i.doMove(coorCli) | |||
| i.currentIndex++ | |||
| return item, err | |||
| } | |||
| func (iter *DownloadObjectIterator) doMove(coorCli *coormq.Client) (*IterDownloadingObject, error) { | |||
| obj := iter.objectDetails[iter.currentIndex] | |||
| switch red := obj.Object.Redundancy.(type) { | |||
| case *cdssdk.RepRedundancy: | |||
| reader, err := iter.downloadRepObject(coorCli, iter.downloadCtx, obj, red) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("downloading rep object: %w", err) | |||
| } | |||
| return &IterDownloadingObject{ | |||
| Object: obj.Object, | |||
| File: reader, | |||
| }, nil | |||
| case *cdssdk.ECRedundancy: | |||
| reader, err := iter.downloadECObject(coorCli, iter.downloadCtx, obj, red) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("downloading ec object: %w", err) | |||
| } | |||
| return &IterDownloadingObject{ | |||
| Object: obj.Object, | |||
| File: reader, | |||
| }, nil | |||
| } | |||
| return nil, fmt.Errorf("unsupported redundancy type: %v", reflect.TypeOf(obj.Object.Redundancy)) | |||
| } | |||
| func (i *DownloadObjectIterator) Close() { | |||
| if i.OnClosing != nil { | |||
| i.OnClosing() | |||
| } | |||
| } | |||
| // chooseDownloadNode 选择一个下载节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (i *DownloadObjectIterator) chooseDownloadNode(entries []DownloadNodeInfo) DownloadNodeInfo { | |||
| sameLocationEntries := lo.Filter(entries, func(e DownloadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationEntries) > 0 { | |||
| return sameLocationEntries[rand.Intn(len(sameLocationEntries))] | |||
| } | |||
| return entries[rand.Intn(len(entries))] | |||
| } | |||
| func (iter *DownloadObjectIterator) downloadRepObject(coorCli *coormq.Client, ctx *DownloadContext, obj stgmodels.ObjectDetail, repRed *cdssdk.RepRedundancy) (io.ReadCloser, error) { | |||
| //采取直接读,优先选内网节点 | |||
| var chosenNodes []DownloadNodeInfo | |||
| for i := range obj.Blocks { | |||
| if len(obj.Blocks[i].CachedNodeIDs) == 0 { | |||
| return nil, fmt.Errorf("no node has block %d", obj.Blocks[i].Index) | |||
| } | |||
| getNodesResp, err := coorCli.GetNodes(coormq.NewGetNodes(obj.Blocks[i].CachedNodeIDs)) | |||
| if err != nil { | |||
| continue | |||
| } | |||
| downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo { | |||
| return DownloadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == stgglb.Local.LocationID, | |||
| } | |||
| }) | |||
| chosenNodes = append(chosenNodes, iter.chooseDownloadNode(downloadNodes)) | |||
| } | |||
| var fileStrs []io.ReadCloser | |||
| for i := range obj.Blocks { | |||
| str, err := downloadFile(ctx, chosenNodes[i], obj.Blocks[i].FileHash) | |||
| if err != nil { | |||
| for i -= 1; i >= 0; i-- { | |||
| fileStrs[i].Close() | |||
| } | |||
| return nil, fmt.Errorf("donwloading file: %w", err) | |||
| } | |||
| fileStrs = append(fileStrs, str) | |||
| } | |||
| fileReaders, filesCloser := myio.ToReaders(fileStrs) | |||
| return myio.AfterReadClosed(myio.Length(myio.Join(fileReaders), obj.Object.Size), func(c io.ReadCloser) { | |||
| filesCloser() | |||
| }), nil | |||
| } | |||
| func (iter *DownloadObjectIterator) downloadECObject(coorCli *coormq.Client, ctx *DownloadContext, obj stgmodels.ObjectDetail, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, error) { | |||
| //采取直接读,优先选内网节点 | |||
| var chosenNodes []DownloadNodeInfo | |||
| var chosenBlocks []stgmodels.ObjectBlockDetail | |||
| for i := range obj.Blocks { | |||
| if len(chosenBlocks) == ecRed.K { | |||
| break | |||
| } | |||
| // 块没有被任何节点缓存或者获取失败都没关系,只要能获取到k个块的信息就行 | |||
| if len(obj.Blocks[i].CachedNodeIDs) == 0 { | |||
| continue | |||
| } | |||
| getNodesResp, err := coorCli.GetNodes(coormq.NewGetNodes(obj.Blocks[i].CachedNodeIDs)) | |||
| if err != nil { | |||
| continue | |||
| } | |||
| downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo { | |||
| return DownloadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == stgglb.Local.LocationID, | |||
| } | |||
| }) | |||
| chosenBlocks = append(chosenBlocks, obj.Blocks[i]) | |||
| chosenNodes = append(chosenNodes, iter.chooseDownloadNode(downloadNodes)) | |||
| } | |||
| if len(chosenBlocks) < ecRed.K { | |||
| return nil, fmt.Errorf("no enough blocks to reconstruct the file, want %d, get only %d", ecRed.K, len(chosenBlocks)) | |||
| } | |||
| var fileStrs []io.ReadCloser | |||
| rs, err := ec.NewRs(ecRed.K, ecRed.N, ecRed.ChunkSize) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new rs: %w", err) | |||
| } | |||
| for i := range chosenBlocks { | |||
| str, err := downloadFile(ctx, chosenNodes[i], chosenBlocks[i].FileHash) | |||
| if err != nil { | |||
| for i -= 1; i >= 0; i-- { | |||
| fileStrs[i].Close() | |||
| } | |||
| return nil, fmt.Errorf("donwloading file: %w", err) | |||
| } | |||
| fileStrs = append(fileStrs, str) | |||
| } | |||
| fileReaders, filesCloser := myio.ToReaders(fileStrs) | |||
| var indexes []int | |||
| for _, b := range chosenBlocks { | |||
| indexes = append(indexes, b.Index) | |||
| } | |||
| outputs, outputsCloser := myio.ToReaders(rs.ReconstructData(fileReaders, indexes)) | |||
| return myio.AfterReadClosed(myio.Length(myio.ChunkedJoin(outputs, int(ecRed.ChunkSize)), obj.Object.Size), func(c io.ReadCloser) { | |||
| filesCloser() | |||
| outputsCloser() | |||
| }), nil | |||
| } | |||
| func downloadFile(ctx *DownloadContext, node DownloadNodeInfo, fileHash string) (io.ReadCloser, error) { | |||
| // 如果客户端与节点在同一个地域,则使用内网地址连接节点 | |||
| nodeIP := node.Node.ExternalIP | |||
| grpcPort := node.Node.ExternalGRPCPort | |||
| if node.IsSameLocation { | |||
| nodeIP = node.Node.LocalIP | |||
| grpcPort = node.Node.LocalGRPCPort | |||
| logger.Infof("client and node %d are at the same location, use local ip", node.Node.NodeID) | |||
| } | |||
| if stgglb.IPFSPool != nil { | |||
| logger.Infof("try to use local IPFS to download file") | |||
| reader, err := downloadFromLocalIPFS(ctx, fileHash) | |||
| if err == nil { | |||
| return reader, nil | |||
| } | |||
| logger.Warnf("download from local IPFS failed, so try to download from node %s, err: %s", nodeIP, err.Error()) | |||
| } | |||
| return downloadFromNode(ctx, node.Node.NodeID, nodeIP, grpcPort, fileHash) | |||
| } | |||
| func downloadFromNode(ctx *DownloadContext, nodeID cdssdk.NodeID, nodeIP string, grpcPort int, fileHash string) (io.ReadCloser, error) { | |||
| // 二次获取锁 | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| // 用于从IPFS下载文件 | |||
| IPFS().ReadOneRep(nodeID, fileHash). | |||
| MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| // 连接grpc | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new agent grpc client: %w", err) | |||
| } | |||
| reader, err := agtCli.GetIPFSFile(fileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting ipfs file: %w", err) | |||
| } | |||
| reader = myio.AfterReadClosed(reader, func(io.ReadCloser) { | |||
| mutex.Unlock() | |||
| }) | |||
| return reader, nil | |||
| } | |||
| func downloadFromLocalIPFS(ctx *DownloadContext, fileHash string) (io.ReadCloser, error) { | |||
| onClosed := func() {} | |||
| if stgglb.Local.NodeID != nil { | |||
| // 二次获取锁 | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| // 用于从IPFS下载文件 | |||
| IPFS().ReadOneRep(*stgglb.Local.NodeID, fileHash). | |||
| MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| onClosed = func() { | |||
| mutex.Unlock() | |||
| } | |||
| } | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| reader, err := ipfsCli.OpenRead(fileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("read ipfs file failed, err: %w", err) | |||
| } | |||
| reader = myio.AfterReadClosed(reader, func(io.ReadCloser) { | |||
| onClosed() | |||
| }) | |||
| return reader, nil | |||
| } | |||
| @@ -1,171 +0,0 @@ | |||
| package iterator | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "github.com/samber/lo" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| stgmodels "gitlink.org.cn/cloudream/storage/common/models" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ec" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type ECObjectIterator struct { | |||
| OnClosing func() | |||
| objects []model.Object | |||
| objectECData []stgmodels.ObjectECData | |||
| currentIndex int | |||
| inited bool | |||
| ecInfo cdssdk.ECRedundancyInfo | |||
| ec model.Ec | |||
| downloadCtx *DownloadContext | |||
| cliLocation model.Location | |||
| } | |||
| func NewECObjectIterator(objects []model.Object, objectECData []stgmodels.ObjectECData, ecInfo cdssdk.ECRedundancyInfo, ec model.Ec, downloadCtx *DownloadContext) *ECObjectIterator { | |||
| return &ECObjectIterator{ | |||
| objects: objects, | |||
| objectECData: objectECData, | |||
| ecInfo: ecInfo, | |||
| ec: ec, | |||
| downloadCtx: downloadCtx, | |||
| } | |||
| } | |||
| func (i *ECObjectIterator) MoveNext() (*IterDownloadingObject, error) { | |||
| // TODO 加锁 | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| if !i.inited { | |||
| i.inited = true | |||
| findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(stgglb.Local.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| i.cliLocation = findCliLocResp.Location | |||
| } | |||
| if i.currentIndex >= len(i.objects) { | |||
| return nil, ErrNoMoreItem | |||
| } | |||
| item, err := i.doMove(coorCli) | |||
| i.currentIndex++ | |||
| return item, err | |||
| } | |||
| func (iter *ECObjectIterator) doMove(coorCli *coormq.Client) (*IterDownloadingObject, error) { | |||
| obj := iter.objects[iter.currentIndex] | |||
| ecData := iter.objectECData[iter.currentIndex] | |||
| //采取直接读,优先选内网节点 | |||
| var chosenNodes []DownloadNodeInfo | |||
| var chosenBlocks []stgmodels.ObjectBlockData | |||
| for i := range ecData.Blocks { | |||
| if len(chosenBlocks) == iter.ec.EcK { | |||
| break | |||
| } | |||
| // 块没有被任何节点缓存或者获取失败都没关系,只要能获取到k个块的信息就行 | |||
| if len(ecData.Blocks[i].NodeIDs) == 0 { | |||
| continue | |||
| } | |||
| getNodesResp, err := coorCli.GetNodes(coormq.NewGetNodes(ecData.Blocks[i].NodeIDs)) | |||
| if err != nil { | |||
| continue | |||
| } | |||
| downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo { | |||
| return DownloadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == iter.cliLocation.LocationID, | |||
| } | |||
| }) | |||
| chosenBlocks = append(chosenBlocks, ecData.Blocks[i]) | |||
| chosenNodes = append(chosenNodes, iter.chooseDownloadNode(downloadNodes)) | |||
| } | |||
| if len(chosenBlocks) < iter.ec.EcK { | |||
| return nil, fmt.Errorf("no enough blocks to reconstruct the file, want %d, get only %d", iter.ec.EcK, len(chosenBlocks)) | |||
| } | |||
| reader, err := iter.downloadEcObject(iter.downloadCtx, obj.Size, chosenNodes, chosenBlocks) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("ec read failed, err: %w", err) | |||
| } | |||
| return &IterDownloadingObject{ | |||
| Object: obj, | |||
| File: reader, | |||
| }, nil | |||
| } | |||
| func (i *ECObjectIterator) Close() { | |||
| if i.OnClosing != nil { | |||
| i.OnClosing() | |||
| } | |||
| } | |||
| // chooseDownloadNode 选择一个下载节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (i *ECObjectIterator) chooseDownloadNode(entries []DownloadNodeInfo) DownloadNodeInfo { | |||
| sameLocationEntries := lo.Filter(entries, func(e DownloadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationEntries) > 0 { | |||
| return sameLocationEntries[rand.Intn(len(sameLocationEntries))] | |||
| } | |||
| return entries[rand.Intn(len(entries))] | |||
| } | |||
| func (iter *ECObjectIterator) downloadEcObject(ctx *DownloadContext, fileSize int64, nodes []DownloadNodeInfo, blocks []stgmodels.ObjectBlockData) (io.ReadCloser, error) { | |||
| var fileStrs []io.ReadCloser | |||
| rs, err := ec.NewRs(iter.ec.EcK, iter.ec.EcN, iter.ecInfo.ChunkSize) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new rs: %w", err) | |||
| } | |||
| for i := range blocks { | |||
| str, err := downloadFile(ctx, nodes[i], blocks[i].FileHash) | |||
| if err != nil { | |||
| for i -= 1; i >= 0; i-- { | |||
| fileStrs[i].Close() | |||
| } | |||
| return nil, fmt.Errorf("donwloading file: %w", err) | |||
| } | |||
| fileStrs = append(fileStrs, str) | |||
| } | |||
| fileReaders, filesCloser := myio.ToReaders(fileStrs) | |||
| var indexes []int | |||
| for _, b := range blocks { | |||
| indexes = append(indexes, b.Index) | |||
| } | |||
| outputs, outputsCloser := myio.ToReaders(rs.ReconstructData(fileReaders, indexes)) | |||
| return myio.AfterReadClosed(myio.Length(myio.ChunkedJoin(outputs, int(iter.ecInfo.ChunkSize)), fileSize), func(c io.ReadCloser) { | |||
| filesCloser() | |||
| outputsCloser() | |||
| }), nil | |||
| } | |||
| @@ -1,212 +0,0 @@ | |||
| package iterator | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| type DownloadingObjectIterator = Iterator[*IterDownloadingObject] | |||
| type RepObjectIterator struct { | |||
| OnClosing func() | |||
| objects []model.Object | |||
| objectRepData []stgmod.ObjectRepData | |||
| currentIndex int | |||
| inited bool | |||
| downloadCtx *DownloadContext | |||
| cliLocation model.Location | |||
| } | |||
| type IterDownloadingObject struct { | |||
| Object model.Object | |||
| File io.ReadCloser | |||
| } | |||
| type DownloadNodeInfo struct { | |||
| Node model.Node | |||
| IsSameLocation bool | |||
| } | |||
| type DownloadContext struct { | |||
| Distlock *distlock.Service | |||
| } | |||
| func NewRepObjectIterator(objects []model.Object, objectRepData []stgmod.ObjectRepData, downloadCtx *DownloadContext) *RepObjectIterator { | |||
| return &RepObjectIterator{ | |||
| objects: objects, | |||
| objectRepData: objectRepData, | |||
| downloadCtx: downloadCtx, | |||
| } | |||
| } | |||
| func (i *RepObjectIterator) MoveNext() (*IterDownloadingObject, error) { | |||
| // TODO 加锁 | |||
| coorCli, err := stgglb.CoordinatorMQPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new coordinator client: %w", err) | |||
| } | |||
| defer stgglb.CoordinatorMQPool.Release(coorCli) | |||
| if !i.inited { | |||
| i.inited = true | |||
| findCliLocResp, err := coorCli.FindClientLocation(coormq.NewFindClientLocation(stgglb.Local.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| i.cliLocation = findCliLocResp.Location | |||
| } | |||
| if i.currentIndex >= len(i.objects) { | |||
| return nil, ErrNoMoreItem | |||
| } | |||
| item, err := i.doMove(coorCli) | |||
| i.currentIndex++ | |||
| return item, err | |||
| } | |||
| func (i *RepObjectIterator) doMove(coorCli *coormq.Client) (*IterDownloadingObject, error) { | |||
| repData := i.objectRepData[i.currentIndex] | |||
| if len(repData.NodeIDs) == 0 { | |||
| return nil, fmt.Errorf("no node has this file %s", repData.FileHash) | |||
| } | |||
| getNodesResp, err := coorCli.GetNodes(coormq.NewGetNodes(repData.NodeIDs)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting nodes: %w", err) | |||
| } | |||
| downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo { | |||
| return DownloadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == i.cliLocation.LocationID, | |||
| } | |||
| }) | |||
| reader, err := downloadFile(i.downloadCtx, i.chooseDownloadNode(downloadNodes), repData.FileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("rep read failed, err: %w", err) | |||
| } | |||
| return &IterDownloadingObject{ | |||
| Object: i.objects[i.currentIndex], | |||
| File: reader, | |||
| }, nil | |||
| } | |||
| func (i *RepObjectIterator) Close() { | |||
| if i.OnClosing != nil { | |||
| i.OnClosing() | |||
| } | |||
| } | |||
| // chooseDownloadNode 选择一个下载节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (i *RepObjectIterator) chooseDownloadNode(entries []DownloadNodeInfo) DownloadNodeInfo { | |||
| sameLocationEntries := lo.Filter(entries, func(e DownloadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationEntries) > 0 { | |||
| return sameLocationEntries[rand.Intn(len(sameLocationEntries))] | |||
| } | |||
| return entries[rand.Intn(len(entries))] | |||
| } | |||
| func downloadFile(ctx *DownloadContext, node DownloadNodeInfo, fileHash string) (io.ReadCloser, error) { | |||
| // 如果客户端与节点在同一个地域,则使用内网地址连接节点 | |||
| nodeIP := node.Node.ExternalIP | |||
| grpcPort := node.Node.ExternalGRPCPort | |||
| if node.IsSameLocation { | |||
| nodeIP = node.Node.LocalIP | |||
| grpcPort = node.Node.LocalGRPCPort | |||
| logger.Infof("client and node %d are at the same location, use local ip", node.Node.NodeID) | |||
| } | |||
| if stgglb.IPFSPool != nil { | |||
| logger.Infof("try to use local IPFS to download file") | |||
| reader, err := downloadFromLocalIPFS(ctx, fileHash) | |||
| if err == nil { | |||
| return reader, nil | |||
| } | |||
| logger.Warnf("download from local IPFS failed, so try to download from node %s, err: %s", nodeIP, err.Error()) | |||
| } | |||
| return downloadFromNode(ctx, node.Node.NodeID, nodeIP, grpcPort, fileHash) | |||
| } | |||
| func downloadFromNode(ctx *DownloadContext, nodeID int64, nodeIP string, grpcPort int, fileHash string) (io.ReadCloser, error) { | |||
| // 二次获取锁 | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| // 用于从IPFS下载文件 | |||
| IPFS().ReadOneRep(nodeID, fileHash). | |||
| MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| // 连接grpc | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new agent grpc client: %w", err) | |||
| } | |||
| reader, err := agtCli.GetIPFSFile(fileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting ipfs file: %w", err) | |||
| } | |||
| reader = myio.AfterReadClosed(reader, func(io.ReadCloser) { | |||
| mutex.Unlock() | |||
| }) | |||
| return reader, nil | |||
| } | |||
| func downloadFromLocalIPFS(ctx *DownloadContext, fileHash string) (io.ReadCloser, error) { | |||
| onClosed := func() {} | |||
| if stgglb.Local.NodeID != nil { | |||
| // 二次获取锁 | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| // 用于从IPFS下载文件 | |||
| IPFS().ReadOneRep(*stgglb.Local.NodeID, fileHash). | |||
| MutexLock(ctx.Distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| onClosed = func() { | |||
| mutex.Unlock() | |||
| } | |||
| } | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| reader, err := ipfsCli.OpenRead(fileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("read ipfs file failed, err: %w", err) | |||
| } | |||
| reader = myio.AfterReadClosed(reader, func(io.ReadCloser) { | |||
| onClosed() | |||
| }) | |||
| return reader, nil | |||
| } | |||
| @@ -61,15 +61,15 @@ var _ = Register(Service.StartCacheMovePackage) | |||
| type StartCacheMovePackage struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| type StartCacheMovePackageResp struct { | |||
| mq.MessageBodyBase | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartCacheMovePackage(userID int64, packageID int64) *StartCacheMovePackage { | |||
| func NewStartCacheMovePackage(userID cdssdk.UserID, packageID cdssdk.PackageID) *StartCacheMovePackage { | |||
| return &StartCacheMovePackage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| @@ -94,9 +94,8 @@ type WaitCacheMovePackage struct { | |||
| } | |||
| type WaitCacheMovePackageResp struct { | |||
| mq.MessageBodyBase | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| CacheInfos []cdssdk.ObjectCacheInfo `json:"cacheInfos"` | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| } | |||
| func NewWaitCacheMovePackage(taskID string, waitTimeoutMs int64) *WaitCacheMovePackage { | |||
| @@ -105,11 +104,10 @@ func NewWaitCacheMovePackage(taskID string, waitTimeoutMs int64) *WaitCacheMoveP | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| func NewWaitCacheMovePackageResp(isComplete bool, err string, cacheInfos []cdssdk.ObjectCacheInfo) *WaitCacheMovePackageResp { | |||
| func NewWaitCacheMovePackageResp(isComplete bool, err string) *WaitCacheMovePackageResp { | |||
| return &WaitCacheMovePackageResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| CacheInfos: cacheInfos, | |||
| } | |||
| } | |||
| func (client *Client) WaitCacheMovePackage(msg *WaitCacheMovePackage, opts ...mq.RequestOption) (*WaitCacheMovePackageResp, error) { | |||
| @@ -2,16 +2,17 @@ package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" | |||
| ) | |||
| type Client struct { | |||
| rabbitCli *mq.RabbitMQTransport | |||
| id int64 | |||
| id cdssdk.NodeID | |||
| } | |||
| func NewClient(id int64, cfg *stgmq.Config) (*Client, error) { | |||
| rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.MakeAgentQueueName(id), "") | |||
| func NewClient(id cdssdk.NodeID, cfg *stgmq.Config) (*Client, error) { | |||
| rabbitCli, err := mq.NewRabbitMQTransport(cfg.MakeConnectingURL(), stgmq.MakeAgentQueueName(int64(id)), "") | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| @@ -27,7 +28,7 @@ func (c *Client) Close() { | |||
| } | |||
| type Pool interface { | |||
| Acquire(id int64) (*Client, error) | |||
| Acquire(id cdssdk.NodeID) (*Client, error) | |||
| Release(cli *Client) | |||
| } | |||
| @@ -40,7 +41,7 @@ func NewPool(mqcfg *stgmq.Config) Pool { | |||
| mqcfg: mqcfg, | |||
| } | |||
| } | |||
| func (p *pool) Acquire(id int64) (*Client, error) { | |||
| func (p *pool) Acquire(id cdssdk.NodeID) (*Client, error) { | |||
| return NewClient(id, p.mqcfg) | |||
| } | |||
| @@ -24,16 +24,16 @@ var _ = Register(Service.StartStorageLoadPackage) | |||
| type StartStorageLoadPackage struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| StorageID int64 `json:"storageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| } | |||
| type StartStorageLoadPackageResp struct { | |||
| mq.MessageBodyBase | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartStorageLoadPackage(userID int64, packageID int64, storageID int64) *StartStorageLoadPackage { | |||
| func NewStartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StartStorageLoadPackage { | |||
| return &StartStorageLoadPackage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| @@ -91,7 +91,7 @@ const ( | |||
| type StorageCheck struct { | |||
| mq.MessageBodyBase | |||
| StorageID int64 `json:"storageID"` | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| Directory string `json:"directory"` | |||
| IsComplete bool `json:"isComplete"` | |||
| Packages []model.StoragePackage `json:"packages"` | |||
| @@ -102,12 +102,12 @@ type StorageCheckResp struct { | |||
| Entries []StorageCheckRespEntry `json:"entries"` | |||
| } | |||
| type StorageCheckRespEntry struct { | |||
| PackageID int64 `json:"packageID"` | |||
| UserID int64 `json:"userID"` | |||
| Operation string `json:"operation"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| Operation string `json:"operation"` | |||
| } | |||
| func NewStorageCheck(storageID int64, directory string, isComplete bool, packages []model.StoragePackage) *StorageCheck { | |||
| func NewStorageCheck(storageID cdssdk.StorageID, directory string, isComplete bool, packages []model.StoragePackage) *StorageCheck { | |||
| return &StorageCheck{ | |||
| StorageID: storageID, | |||
| Directory: directory, | |||
| @@ -121,7 +121,7 @@ func NewStorageCheckResp(dirState string, entries []StorageCheckRespEntry) *Stor | |||
| Entries: entries, | |||
| } | |||
| } | |||
| func NewStorageCheckRespEntry(packageID int64, userID int64, op string) StorageCheckRespEntry { | |||
| func NewStorageCheckRespEntry(packageID cdssdk.PackageID, userID cdssdk.UserID, op string) StorageCheckRespEntry { | |||
| return StorageCheckRespEntry{ | |||
| PackageID: packageID, | |||
| UserID: userID, | |||
| @@ -137,27 +137,25 @@ var _ = Register(Service.StartStorageCreatePackage) | |||
| type StartStorageCreatePackage struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| StorageID int64 `json:"storageID"` | |||
| Path string `json:"path"` | |||
| Redundancy cdssdk.TypedRedundancyInfo `json:"redundancy"` | |||
| NodeAffinity *int64 `json:"nodeAffinity"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| Path string `json:"path"` | |||
| NodeAffinity *cdssdk.NodeID `json:"nodeAffinity"` | |||
| } | |||
| type StartStorageCreatePackageResp struct { | |||
| mq.MessageBodyBase | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartStorageCreatePackage(userID int64, bucketID int64, name string, storageID int64, path string, redundancy cdssdk.TypedRedundancyInfo, nodeAffinity *int64) *StartStorageCreatePackage { | |||
| func NewStartStorageCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string, nodeAffinity *cdssdk.NodeID) *StartStorageCreatePackage { | |||
| return &StartStorageCreatePackage{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| Name: name, | |||
| StorageID: storageID, | |||
| Path: path, | |||
| Redundancy: redundancy, | |||
| NodeAffinity: nodeAffinity, | |||
| } | |||
| } | |||
| @@ -180,9 +178,9 @@ type WaitStorageCreatePackage struct { | |||
| } | |||
| type WaitStorageCreatePackageResp struct { | |||
| mq.MessageBodyBase | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| PackageID int64 `json:"packageID"` | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| func NewWaitStorageCreatePackage(taskID string, waitTimeoutMs int64) *WaitStorageCreatePackage { | |||
| @@ -191,7 +189,7 @@ func NewWaitStorageCreatePackage(taskID string, waitTimeoutMs int64) *WaitStorag | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| func NewWaitStorageCreatePackageResp(isComplete bool, err string, packageID int64) *WaitStorageCreatePackageResp { | |||
| func NewWaitStorageCreatePackageResp(isComplete bool, err string, packageID cdssdk.PackageID) *WaitStorageCreatePackageResp { | |||
| return &WaitStorageCreatePackageResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| @@ -2,6 +2,7 @@ package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -20,14 +21,14 @@ var _ = Register(Service.GetUserBuckets) | |||
| type GetUserBuckets struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| } | |||
| type GetUserBucketsResp struct { | |||
| mq.MessageBodyBase | |||
| Buckets []model.Bucket `json:"buckets"` | |||
| } | |||
| func NewGetUserBuckets(userID int64) *GetUserBuckets { | |||
| func NewGetUserBuckets(userID cdssdk.UserID) *GetUserBuckets { | |||
| return &GetUserBuckets{ | |||
| UserID: userID, | |||
| } | |||
| @@ -46,15 +47,15 @@ var _ = Register(Service.GetBucketPackages) | |||
| type GetBucketPackages struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||
| } | |||
| type GetBucketPackagesResp struct { | |||
| mq.MessageBodyBase | |||
| Packages []model.Package `json:"packages"` | |||
| } | |||
| func NewGetBucketPackages(userID int64, bucketID int64) *GetBucketPackages { | |||
| func NewGetBucketPackages(userID cdssdk.UserID, bucketID cdssdk.BucketID) *GetBucketPackages { | |||
| return &GetBucketPackages{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| @@ -74,21 +75,21 @@ var _ = Register(Service.CreateBucket) | |||
| type CreateBucket struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| BucketName string `json:"bucketName"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| BucketName string `json:"bucketName"` | |||
| } | |||
| type CreateBucketResp struct { | |||
| mq.MessageBodyBase | |||
| BucketID int64 `json:"bucketID"` | |||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||
| } | |||
| func NewCreateBucket(userID int64, bucketName string) *CreateBucket { | |||
| func NewCreateBucket(userID cdssdk.UserID, bucketName string) *CreateBucket { | |||
| return &CreateBucket{ | |||
| UserID: userID, | |||
| BucketName: bucketName, | |||
| } | |||
| } | |||
| func NewCreateBucketResp(bucketID int64) *CreateBucketResp { | |||
| func NewCreateBucketResp(bucketID cdssdk.BucketID) *CreateBucketResp { | |||
| return &CreateBucketResp{ | |||
| BucketID: bucketID, | |||
| } | |||
| @@ -102,14 +103,14 @@ var _ = Register(Service.DeleteBucket) | |||
| type DeleteBucket struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||
| } | |||
| type DeleteBucketResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| func NewDeleteBucket(userID int64, bucketID int64) *DeleteBucket { | |||
| func NewDeleteBucket(userID cdssdk.UserID, bucketID cdssdk.BucketID) *DeleteBucket { | |||
| return &DeleteBucket{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| @@ -7,8 +7,6 @@ import ( | |||
| type CacheService interface { | |||
| CachePackageMoved(msg *CachePackageMoved) (*CachePackageMovedResp, *mq.CodeMessage) | |||
| GetPackageObjectCacheInfos(msg *GetPackageObjectCacheInfos) (*GetPackageObjectCacheInfosResp, *mq.CodeMessage) | |||
| } | |||
| // Package的Object移动到了节点的Cache中 | |||
| @@ -16,19 +14,17 @@ var _ = Register(Service.CachePackageMoved) | |||
| type CachePackageMoved struct { | |||
| mq.MessageBodyBase | |||
| PackageID int64 `json:"packageID"` | |||
| NodeID int64 `json:"nodeID"` | |||
| FileHashes []string `json:"fileHashes"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| NodeID cdssdk.NodeID `json:"nodeID"` | |||
| } | |||
| type CachePackageMovedResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| func NewCachePackageMoved(packageID int64, nodeID int64, fileHashes []string) *CachePackageMoved { | |||
| func NewCachePackageMoved(packageID cdssdk.PackageID, nodeID cdssdk.NodeID) *CachePackageMoved { | |||
| return &CachePackageMoved{ | |||
| PackageID: packageID, | |||
| NodeID: nodeID, | |||
| FileHashes: fileHashes, | |||
| PackageID: packageID, | |||
| NodeID: nodeID, | |||
| } | |||
| } | |||
| func NewCachePackageMovedResp() *CachePackageMovedResp { | |||
| @@ -37,31 +33,3 @@ func NewCachePackageMovedResp() *CachePackageMovedResp { | |||
| func (client *Client) CachePackageMoved(msg *CachePackageMoved) (*CachePackageMovedResp, error) { | |||
| return mq.Request(Service.CachePackageMoved, client.rabbitCli, msg) | |||
| } | |||
| // 获取Package中所有Object的FileHash | |||
| var _ = Register(Service.GetPackageObjectCacheInfos) | |||
| type GetPackageObjectCacheInfos struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| type GetPackageObjectCacheInfosResp struct { | |||
| mq.MessageBodyBase | |||
| Infos []cdssdk.ObjectCacheInfo | |||
| } | |||
| func NewGetPackageObjectCacheInfos(userID int64, packageID int64) *GetPackageObjectCacheInfos { | |||
| return &GetPackageObjectCacheInfos{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageObjectCacheInfosResp(infos []cdssdk.ObjectCacheInfo) *GetPackageObjectCacheInfosResp { | |||
| return &GetPackageObjectCacheInfosResp{ | |||
| Infos: infos, | |||
| } | |||
| } | |||
| func (client *Client) GetPackageObjectCacheInfos(msg *GetPackageObjectCacheInfos) (*GetPackageObjectCacheInfosResp, error) { | |||
| return mq.Request(Service.GetPackageObjectCacheInfos, client.rabbitCli, msg) | |||
| } | |||
| @@ -1,64 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| type CommonService interface { | |||
| FindClientLocation(msg *FindClientLocation) (*FindClientLocationResp, *mq.CodeMessage) | |||
| GetECConfig(msg *GetECConfig) (*GetECConfigResp, *mq.CodeMessage) | |||
| } | |||
| // 查询指定IP所属的地域 | |||
| var _ = Register(Service.FindClientLocation) | |||
| type FindClientLocation struct { | |||
| mq.MessageBodyBase | |||
| IP string `json:"ip"` | |||
| } | |||
| type FindClientLocationResp struct { | |||
| mq.MessageBodyBase | |||
| Location model.Location `json:"location"` | |||
| } | |||
| func NewFindClientLocation(ip string) *FindClientLocation { | |||
| return &FindClientLocation{ | |||
| IP: ip, | |||
| } | |||
| } | |||
| func NewFindClientLocationResp(location model.Location) *FindClientLocationResp { | |||
| return &FindClientLocationResp{ | |||
| Location: location, | |||
| } | |||
| } | |||
| func (client *Client) FindClientLocation(msg *FindClientLocation) (*FindClientLocationResp, error) { | |||
| return mq.Request(Service.FindClientLocation, client.rabbitCli, msg) | |||
| } | |||
| // 获取EC具体配置 | |||
| var _ = Register(Service.GetECConfig) | |||
| type GetECConfig struct { | |||
| mq.MessageBodyBase | |||
| ECName string `json:"ecName"` | |||
| } | |||
| type GetECConfigResp struct { | |||
| mq.MessageBodyBase | |||
| Config model.Ec `json:"config"` | |||
| } | |||
| func NewGetECConfig(ecName string) *GetECConfig { | |||
| return &GetECConfig{ | |||
| ECName: ecName, | |||
| } | |||
| } | |||
| func NewGetECConfigResp(config model.Ec) *GetECConfigResp { | |||
| return &GetECConfigResp{ | |||
| Config: config, | |||
| } | |||
| } | |||
| func (client *Client) GetECConfig(msg *GetECConfig) (*GetECConfigResp, error) { | |||
| return mq.Request(Service.GetECConfig, client.rabbitCli, msg) | |||
| } | |||
| @@ -2,6 +2,7 @@ package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -16,14 +17,14 @@ var _ = Register(Service.GetUserNodes) | |||
| type GetUserNodes struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| } | |||
| type GetUserNodesResp struct { | |||
| mq.MessageBodyBase | |||
| Nodes []model.Node `json:"nodes"` | |||
| } | |||
| func NewGetUserNodes(userID int64) *GetUserNodes { | |||
| func NewGetUserNodes(userID cdssdk.UserID) *GetUserNodes { | |||
| return &GetUserNodes{ | |||
| UserID: userID, | |||
| } | |||
| @@ -42,14 +43,14 @@ var _ = Register(Service.GetNodes) | |||
| type GetNodes struct { | |||
| mq.MessageBodyBase | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| NodeIDs []cdssdk.NodeID `json:"nodeIDs"` | |||
| } | |||
| type GetNodesResp struct { | |||
| mq.MessageBodyBase | |||
| Nodes []model.Node `json:"nodes"` | |||
| } | |||
| func NewGetNodes(nodeIDs []int64) *GetNodes { | |||
| func NewGetNodes(nodeIDs []cdssdk.NodeID) *GetNodes { | |||
| return &GetNodes{ | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| @@ -2,6 +2,7 @@ package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgmod "gitlink.org.cn/cloudream/storage/common/models" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| @@ -10,9 +11,7 @@ import ( | |||
| type ObjectService interface { | |||
| GetPackageObjects(msg *GetPackageObjects) (*GetPackageObjectsResp, *mq.CodeMessage) | |||
| GetPackageObjectRepData(msg *GetPackageObjectRepData) (*GetPackageObjectRepDataResp, *mq.CodeMessage) | |||
| GetPackageObjectECData(msg *GetPackageObjectECData) (*GetPackageObjectECDataResp, *mq.CodeMessage) | |||
| GetPackageObjectDetails(msg *GetPackageObjectDetails) (*GetPackageObjectDetailsResp, *mq.CodeMessage) | |||
| } | |||
| // 查询Package中的所有Object,返回的Objects会按照ObjectID升序 | |||
| @@ -20,15 +19,15 @@ var _ = Register(Service.GetPackageObjects) | |||
| type GetPackageObjects struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| type GetPackageObjectsResp struct { | |||
| mq.MessageBodyBase | |||
| Objects []model.Object `json:"objects"` | |||
| } | |||
| func NewGetPackageObjects(userID int64, packageID int64) *GetPackageObjects { | |||
| func NewGetPackageObjects(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageObjects { | |||
| return &GetPackageObjects{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| @@ -43,54 +42,28 @@ func (client *Client) GetPackageObjects(msg *GetPackageObjects) (*GetPackageObje | |||
| return mq.Request(Service.GetPackageObjects, client.rabbitCli, msg) | |||
| } | |||
| // 获取指定Object的Rep数据,返回的Objects会按照ObjectID升序 | |||
| var _ = Register(Service.GetPackageObjectRepData) | |||
| // 获取Package中所有Object以及它们的分块详细信息,返回的Objects会按照ObjectID升序 | |||
| var _ = Register(Service.GetPackageObjectDetails) | |||
| type GetPackageObjectRepData struct { | |||
| type GetPackageObjectDetails struct { | |||
| mq.MessageBodyBase | |||
| PackageID int64 `json:"packageID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| type GetPackageObjectRepDataResp struct { | |||
| type GetPackageObjectDetailsResp struct { | |||
| mq.MessageBodyBase | |||
| Data []stgmod.ObjectRepData `json:"data"` | |||
| Objects []stgmod.ObjectDetail `json:"objects"` | |||
| } | |||
| func NewGetPackageObjectRepData(packageID int64) *GetPackageObjectRepData { | |||
| return &GetPackageObjectRepData{ | |||
| func NewGetPackageObjectDetails(packageID cdssdk.PackageID) *GetPackageObjectDetails { | |||
| return &GetPackageObjectDetails{ | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageObjectRepDataResp(data []stgmod.ObjectRepData) *GetPackageObjectRepDataResp { | |||
| return &GetPackageObjectRepDataResp{ | |||
| Data: data, | |||
| } | |||
| } | |||
| func (client *Client) GetPackageObjectRepData(msg *GetPackageObjectRepData) (*GetPackageObjectRepDataResp, error) { | |||
| return mq.Request(Service.GetPackageObjectRepData, client.rabbitCli, msg) | |||
| } | |||
| // 获取指定Object的EC数据,返回的Objects会按照ObjectID升序 | |||
| var _ = Register(Service.GetPackageObjectECData) | |||
| type GetPackageObjectECData struct { | |||
| mq.MessageBodyBase | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| type GetPackageObjectECDataResp struct { | |||
| mq.MessageBodyBase | |||
| Data []stgmod.ObjectECData `json:"data"` | |||
| } | |||
| func NewGetPackageObjectECData(packageID int64) *GetPackageObjectECData { | |||
| return &GetPackageObjectECData{ | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageObjectECDataResp(data []stgmod.ObjectECData) *GetPackageObjectECDataResp { | |||
| return &GetPackageObjectECDataResp{ | |||
| Data: data, | |||
| func NewGetPackageObjectDetailsResp(objects []stgmod.ObjectDetail) *GetPackageObjectDetailsResp { | |||
| return &GetPackageObjectDetailsResp{ | |||
| Objects: objects, | |||
| } | |||
| } | |||
| func (client *Client) GetPackageObjectECData(msg *GetPackageObjectECData) (*GetPackageObjectECDataResp, error) { | |||
| return mq.Request(Service.GetPackageObjectECData, client.rabbitCli, msg) | |||
| func (client *Client) GetPackageObjectDetails(msg *GetPackageObjectDetails) (*GetPackageObjectDetailsResp, error) { | |||
| return mq.Request(Service.GetPackageObjectDetails, client.rabbitCli, msg) | |||
| } | |||
| @@ -12,9 +12,7 @@ type PackageService interface { | |||
| CreatePackage(msg *CreatePackage) (*CreatePackageResp, *mq.CodeMessage) | |||
| UpdateRepPackage(msg *UpdateRepPackage) (*UpdateRepPackageResp, *mq.CodeMessage) | |||
| UpdateECPackage(msg *UpdateECPackage) (*UpdateECPackageResp, *mq.CodeMessage) | |||
| UpdateECPackage(msg *UpdatePackage) (*UpdatePackageResp, *mq.CodeMessage) | |||
| DeletePackage(msg *DeletePackage) (*DeletePackageResp, *mq.CodeMessage) | |||
| @@ -28,15 +26,15 @@ var _ = Register(Service.GetPackage) | |||
| type GetPackage struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| type GetPackageResp struct { | |||
| mq.MessageBodyBase | |||
| model.Package | |||
| } | |||
| func NewGetPackage(userID int64, packageID int64) *GetPackage { | |||
| func NewGetPackage(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackage { | |||
| return &GetPackage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| @@ -56,25 +54,23 @@ var _ = Register(Service.CreatePackage) | |||
| type CreatePackage struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| Redundancy cdssdk.TypedRedundancyInfo `json:"redundancy"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| BucketID cdssdk.BucketID `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| } | |||
| type CreatePackageResp struct { | |||
| mq.MessageBodyBase | |||
| PackageID int64 `json:"packageID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| func NewCreatePackage(userID int64, bucketID int64, name string, redundancy cdssdk.TypedRedundancyInfo) *CreatePackage { | |||
| func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string) *CreatePackage { | |||
| return &CreatePackage{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| Name: name, | |||
| Redundancy: redundancy, | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| Name: name, | |||
| } | |||
| } | |||
| func NewCreatePackageResp(packageID int64) *CreatePackageResp { | |||
| func NewCreatePackageResp(packageID cdssdk.PackageID) *CreatePackageResp { | |||
| return &CreatePackageResp{ | |||
| PackageID: packageID, | |||
| } | |||
| @@ -83,85 +79,44 @@ func (client *Client) CreatePackage(msg *CreatePackage) (*CreatePackageResp, err | |||
| return mq.Request(Service.CreatePackage, client.rabbitCli, msg) | |||
| } | |||
| // 更新Rep备份模式的Package | |||
| var _ = Register(Service.UpdateRepPackage) | |||
| // 更新EC备份模式的Package | |||
| var _ = Register(Service.UpdateECPackage) | |||
| type UpdateRepPackage struct { | |||
| type UpdatePackage struct { | |||
| mq.MessageBodyBase | |||
| PackageID int64 `json:"packageID"` | |||
| Adds []AddRepObjectInfo `json:"objects"` | |||
| Deletes []int64 `json:"deletes"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| Adds []AddObjectInfo `json:"objects"` | |||
| Deletes []cdssdk.ObjectID `json:"deletes"` | |||
| } | |||
| type UpdateRepPackageResp struct { | |||
| type UpdatePackageResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| type AddRepObjectInfo struct { | |||
| Path string `json:"path"` | |||
| Size int64 `json:"size,string"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| type AddObjectInfo struct { | |||
| Path string `json:"path"` | |||
| Size int64 `json:"size,string"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeID cdssdk.NodeID `json:"nodeID"` | |||
| } | |||
| func NewUpdateRepPackage(packageID int64, adds []AddRepObjectInfo, deletes []int64) *UpdateRepPackage { | |||
| return &UpdateRepPackage{ | |||
| func NewUpdatePackage(packageID cdssdk.PackageID, adds []AddObjectInfo, deletes []cdssdk.ObjectID) *UpdatePackage { | |||
| return &UpdatePackage{ | |||
| PackageID: packageID, | |||
| Adds: adds, | |||
| Deletes: deletes, | |||
| } | |||
| } | |||
| func NewUpdateRepPackageResp() *UpdateRepPackageResp { | |||
| return &UpdateRepPackageResp{} | |||
| func NewUpdatePackageResp() *UpdatePackageResp { | |||
| return &UpdatePackageResp{} | |||
| } | |||
| func NewAddRepObjectInfo(path string, size int64, fileHash string, nodeIDs []int64) AddRepObjectInfo { | |||
| return AddRepObjectInfo{ | |||
| func NewAddObjectInfo(path string, size int64, fileHash string, nodeIDs cdssdk.NodeID) AddObjectInfo { | |||
| return AddObjectInfo{ | |||
| Path: path, | |||
| Size: size, | |||
| FileHash: fileHash, | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| } | |||
| func (client *Client) UpdateRepPackage(msg *UpdateRepPackage) (*UpdateRepPackageResp, error) { | |||
| return mq.Request(Service.UpdateRepPackage, client.rabbitCli, msg) | |||
| } | |||
| // 更新EC备份模式的Package | |||
| var _ = Register(Service.UpdateECPackage) | |||
| type UpdateECPackage struct { | |||
| mq.MessageBodyBase | |||
| PackageID int64 `json:"packageID"` | |||
| Adds []AddECObjectInfo `json:"objects"` | |||
| Deletes []int64 `json:"deletes"` | |||
| } | |||
| type UpdateECPackageResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| type AddECObjectInfo struct { | |||
| Path string `json:"path"` | |||
| Size int64 `json:"size,string"` | |||
| FileHashes []string `json:"fileHashes"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| func NewUpdateECPackage(packageID int64, adds []AddECObjectInfo, deletes []int64) *UpdateECPackage { | |||
| return &UpdateECPackage{ | |||
| PackageID: packageID, | |||
| Adds: adds, | |||
| Deletes: deletes, | |||
| } | |||
| } | |||
| func NewUpdateECPackageResp() *UpdateECPackageResp { | |||
| return &UpdateECPackageResp{} | |||
| } | |||
| func NewAddECObjectInfo(path string, size int64, fileHashes []string, nodeIDs []int64) AddECObjectInfo { | |||
| return AddECObjectInfo{ | |||
| Path: path, | |||
| Size: size, | |||
| FileHashes: fileHashes, | |||
| NodeIDs: nodeIDs, | |||
| NodeID: nodeIDs, | |||
| } | |||
| } | |||
| func (client *Client) UpdateECPackage(msg *UpdateECPackage) (*UpdateECPackageResp, error) { | |||
| func (client *Client) UpdateECPackage(msg *UpdatePackage) (*UpdatePackageResp, error) { | |||
| return mq.Request(Service.UpdateECPackage, client.rabbitCli, msg) | |||
| } | |||
| @@ -170,14 +125,14 @@ var _ = Register(Service.DeletePackage) | |||
| type DeletePackage struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `db:"userID"` | |||
| PackageID int64 `db:"packageID"` | |||
| UserID cdssdk.UserID `db:"userID"` | |||
| PackageID cdssdk.PackageID `db:"packageID"` | |||
| } | |||
| type DeletePackageResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| func NewDeletePackage(userID int64, packageID int64) *DeletePackage { | |||
| func NewDeletePackage(userID cdssdk.UserID, packageID cdssdk.PackageID) *DeletePackage { | |||
| return &DeletePackage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| @@ -195,8 +150,8 @@ var _ = Register(Service.GetPackageCachedNodes) | |||
| type GetPackageCachedNodes struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| type PackageCachedNodeInfo struct { | |||
| @@ -210,19 +165,18 @@ type GetPackageCachedNodesResp struct { | |||
| cdssdk.PackageCachingInfo | |||
| } | |||
| func NewGetPackageCachedNodes(userID int64, packageID int64) *GetPackageCachedNodes { | |||
| func NewGetPackageCachedNodes(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageCachedNodes { | |||
| return &GetPackageCachedNodes{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageCachedNodesResp(nodeInfos []cdssdk.NodePackageCachingInfo, packageSize int64, redunancyType string) *GetPackageCachedNodesResp { | |||
| func NewGetPackageCachedNodesResp(nodeInfos []cdssdk.NodePackageCachingInfo, packageSize int64) *GetPackageCachedNodesResp { | |||
| return &GetPackageCachedNodesResp{ | |||
| PackageCachingInfo: cdssdk.PackageCachingInfo{ | |||
| NodeInfos: nodeInfos, | |||
| PackageSize: packageSize, | |||
| RedunancyType: redunancyType, | |||
| NodeInfos: nodeInfos, | |||
| PackageSize: packageSize, | |||
| }, | |||
| } | |||
| } | |||
| @@ -236,23 +190,23 @@ var _ = Register(Service.GetPackageLoadedNodes) | |||
| type GetPackageLoadedNodes struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| type GetPackageLoadedNodesResp struct { | |||
| mq.MessageBodyBase | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| NodeIDs []cdssdk.NodeID `json:"nodeIDs"` | |||
| } | |||
| func NewGetPackageLoadedNodes(userID int64, packageID int64) *GetPackageLoadedNodes { | |||
| func NewGetPackageLoadedNodes(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageLoadedNodes { | |||
| return &GetPackageLoadedNodes{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageLoadedNodesResp(nodeIDs []int64) *GetPackageLoadedNodesResp { | |||
| func NewGetPackageLoadedNodesResp(nodeIDs []cdssdk.NodeID) *GetPackageLoadedNodesResp { | |||
| return &GetPackageLoadedNodesResp{ | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| @@ -13,8 +13,6 @@ type Service interface { | |||
| CacheService | |||
| CommonService | |||
| NodeService | |||
| ObjectService | |||
| @@ -2,6 +2,7 @@ package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| ) | |||
| @@ -16,21 +17,21 @@ var _ = Register(Service.GetStorageInfo) | |||
| type GetStorageInfo struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| StorageID int64 `json:"storageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| } | |||
| type GetStorageInfoResp struct { | |||
| mq.MessageBodyBase | |||
| model.Storage | |||
| } | |||
| func NewGetStorageInfo(userID int64, storageID int64) *GetStorageInfo { | |||
| func NewGetStorageInfo(userID cdssdk.UserID, storageID cdssdk.StorageID) *GetStorageInfo { | |||
| return &GetStorageInfo{ | |||
| UserID: userID, | |||
| StorageID: storageID, | |||
| } | |||
| } | |||
| func NewGetStorageInfoResp(storageID int64, name string, nodeID int64, dir string, state string) *GetStorageInfoResp { | |||
| func NewGetStorageInfoResp(storageID cdssdk.StorageID, name string, nodeID cdssdk.NodeID, dir string, state string) *GetStorageInfoResp { | |||
| return &GetStorageInfoResp{ | |||
| Storage: model.Storage{ | |||
| StorageID: storageID, | |||
| @@ -50,15 +51,15 @@ var _ = Register(Service.StoragePackageLoaded) | |||
| type StoragePackageLoaded struct { | |||
| mq.MessageBodyBase | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| StorageID int64 `json:"storageID"` | |||
| UserID cdssdk.UserID `json:"userID"` | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| PackageID cdssdk.PackageID `json:"packageID"` | |||
| } | |||
| type StoragePackageLoadedResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| func NewStoragePackageLoaded(userID int64, packageID int64, stgID int64) *StoragePackageLoaded { | |||
| func NewStoragePackageLoaded(userID cdssdk.UserID, stgID cdssdk.StorageID, packageID cdssdk.PackageID) *StoragePackageLoaded { | |||
| return &StoragePackageLoaded{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| @@ -1,12 +1,14 @@ | |||
| package event | |||
| import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| type AgentCheckCache struct { | |||
| EventBase | |||
| NodeID int64 `json:"nodeID"` | |||
| FileHashes []string `json:"fileHashes"` // 需要检查的FileHash列表,如果为nil(不是为空),则代表进行全量检查 | |||
| NodeID cdssdk.NodeID `json:"nodeID"` | |||
| FileHashes []string `json:"fileHashes"` // 需要检查的FileHash列表,如果为nil(不是为空),则代表进行全量检查 | |||
| } | |||
| func NewAgentCheckCache(nodeID int64, fileHashes []string) *AgentCheckCache { | |||
| func NewAgentCheckCache(nodeID cdssdk.NodeID, fileHashes []string) *AgentCheckCache { | |||
| return &AgentCheckCache{ | |||
| NodeID: nodeID, | |||
| FileHashes: fileHashes, | |||
| @@ -1,11 +1,13 @@ | |||
| package event | |||
| import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| type AgentCheckState struct { | |||
| EventBase | |||
| NodeID int64 `json:"nodeID"` | |||
| NodeID cdssdk.NodeID `json:"nodeID"` | |||
| } | |||
| func NewAgentCheckState(nodeID int64) *AgentCheckState { | |||
| func NewAgentCheckState(nodeID cdssdk.NodeID) *AgentCheckState { | |||
| return &AgentCheckState{ | |||
| NodeID: nodeID, | |||
| } | |||
| @@ -1,12 +1,14 @@ | |||
| package event | |||
| import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| type AgentCheckStorage struct { | |||
| EventBase | |||
| StorageID int64 `json:"storageID"` | |||
| PackageIDs []int64 `json:"packageIDs"` // 需要检查的Package文件列表,如果为nil(不是为空),则代表进行全量检查 | |||
| StorageID cdssdk.StorageID `json:"storageID"` | |||
| PackageIDs []cdssdk.PackageID `json:"packageIDs"` // 需要检查的Package文件列表,如果为nil(不是为空),则代表进行全量检查 | |||
| } | |||
| func NewAgentCheckStorage(storageID int64, packageIDs []int64) *AgentCheckStorage { | |||
| func NewAgentCheckStorage(storageID cdssdk.StorageID, packageIDs []cdssdk.PackageID) *AgentCheckStorage { | |||
| return &AgentCheckStorage{ | |||
| StorageID: storageID, | |||
| PackageIDs: packageIDs, | |||
| @@ -1,11 +1,13 @@ | |||
| package event | |||
| import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| type CheckCache struct { | |||
| EventBase | |||
| NodeID int64 `json:"nodeID"` | |||
| NodeID cdssdk.NodeID `json:"nodeID"` | |||
| } | |||
| func NewCheckCache(nodeID int64) *CheckCache { | |||
| func NewCheckCache(nodeID cdssdk.NodeID) *CheckCache { | |||
| return &CheckCache{ | |||
| NodeID: nodeID, | |||
| } | |||
| @@ -1,11 +1,13 @@ | |||
| package event | |||
| import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| type CheckPackage struct { | |||
| EventBase | |||
| PackageIDs []int64 `json:"packageIDs"` | |||
| PackageIDs []cdssdk.PackageID `json:"packageIDs"` | |||
| } | |||
| func NewCheckPackage(packageIDs []int64) *CheckPackage { | |||
| func NewCheckPackage(packageIDs []cdssdk.PackageID) *CheckPackage { | |||
| return &CheckPackage{ | |||
| PackageIDs: packageIDs, | |||
| } | |||
| @@ -3,9 +3,11 @@ package utils | |||
| import ( | |||
| "path/filepath" | |||
| "strconv" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| ) | |||
| // MakeStorageLoadPackagePath Load操作时,写入的文件夹的名称 | |||
| func MakeStorageLoadPackagePath(stgDir string, userID int64, packageID int64) string { | |||
| return filepath.Join(stgDir, strconv.FormatInt(userID, 10), "packages", strconv.FormatInt(packageID, 10)) | |||
| func MakeStorageLoadPackagePath(stgDir string, userID cdssdk.UserID, packageID cdssdk.PackageID) string { | |||
| return filepath.Join(stgDir, strconv.FormatInt(int64(userID), 10), "packages", strconv.FormatInt(int64(packageID), 10)) | |||
| } | |||
| @@ -7,11 +7,12 @@ import ( | |||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| func (svc *Service) GetBucket(userID int, bucketID int) (model.Bucket, error) { | |||
| func (svc *Service) GetBucket(userID cdssdk.UserID, bucketID cdssdk.BucketID) (model.Bucket, error) { | |||
| // TODO | |||
| panic("not implement yet") | |||
| } | |||
| @@ -42,7 +43,7 @@ func (svc *Service) GetBucketPackages(msg *coormq.GetBucketPackages) (*coormq.Ge | |||
| } | |||
| func (svc *Service) CreateBucket(msg *coormq.CreateBucket) (*coormq.CreateBucketResp, *mq.CodeMessage) { | |||
| var bucketID int64 | |||
| var bucketID cdssdk.BucketID | |||
| var err error | |||
| svc.db.DoTx(sql.LevelDefault, func(tx *sqlx.Tx) error { | |||
| // 这里用的是外部的err | |||
| @@ -8,22 +8,10 @@ import ( | |||
| ) | |||
| func (svc *Service) CachePackageMoved(msg *coormq.CachePackageMoved) (*coormq.CachePackageMovedResp, *mq.CodeMessage) { | |||
| pkg, err := svc.db.Package().GetByID(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("getting package: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package failed") | |||
| if err := svc.db.Cache().SetPackageObjectFrozen(svc.db.SQLCtx(), msg.PackageID, msg.NodeID); err != nil { | |||
| logger.Warnf("setting package object frozen: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "set package object frozen failed") | |||
| } | |||
| if pkg.Redundancy.IsRepInfo() { | |||
| // TODO 优先级 | |||
| if err := svc.db.Cache().BatchCreatePinned(svc.db.SQLCtx(), msg.FileHashes, msg.NodeID, 0); err != nil { | |||
| logger.Warnf("batch creating pinned cache: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "batch create pinned cache failed") | |||
| } | |||
| } | |||
| // TODO EC的逻辑 | |||
| return mq.ReplyOK(coormq.NewCachePackageMovedResp()) | |||
| } | |||
| @@ -1,30 +0,0 @@ | |||
| package services | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| func (svc *Service) FindClientLocation(msg *coormq.FindClientLocation) (*coormq.FindClientLocationResp, *mq.CodeMessage) { | |||
| location, err := svc.db.Location().FindLocationByExternalIP(svc.db.SQLCtx(), msg.IP) | |||
| if err != nil { | |||
| logger.WithField("IP", msg.IP). | |||
| Warnf("finding location by external ip: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query client location failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewFindClientLocationResp(location)) | |||
| } | |||
| func (svc *Service) GetECConfig(msg *coormq.GetECConfig) (*coormq.GetECConfigResp, *mq.CodeMessage) { | |||
| ec, err := svc.db.Ec().GetEc(svc.db.SQLCtx(), msg.ECName) | |||
| if err != nil { | |||
| logger.WithField("ECName", msg.ECName). | |||
| Warnf("query ec failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query ec failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewGetECConfigResp(ec)) | |||
| } | |||
| @@ -7,51 +7,27 @@ import ( | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| ) | |||
| func (svc *Service) GetPackageObjectCacheInfos(msg *coormq.GetPackageObjectCacheInfos) (*coormq.GetPackageObjectCacheInfosResp, *mq.CodeMessage) { | |||
| pkg, err := svc.db.Package().GetUserPackage(svc.db.SQLCtx(), msg.UserID, msg.PackageID) | |||
| func (svc *Service) GetPackageObjects(msg *coormq.GetPackageObjects) (*coormq.GetPackageObjectsResp, *mq.CodeMessage) { | |||
| // TODO 检查用户是否有权限 | |||
| objs, err := svc.db.Object().GetPackageObjects(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("getting package: %s", err.Error()) | |||
| Warnf("get package objects: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package failed") | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package objects failed") | |||
| } | |||
| if pkg.Redundancy.IsRepInfo() { | |||
| infos, err := svc.db.ObjectRep().GetPackageObjectCacheInfos(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("getting rep package object cache infos: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get rep package object cache infos failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewGetPackageObjectCacheInfosResp(infos)) | |||
| } | |||
| // TODO EC | |||
| return nil, mq.Failed(errorcode.OperationFailed, "not implement yet") | |||
| } | |||
| func (svc *Service) GetPackageObjectRepData(msg *coormq.GetPackageObjectRepData) (*coormq.GetPackageObjectRepDataResp, *mq.CodeMessage) { | |||
| data, err := svc.db.ObjectRep().GetWithNodeIDInPackage(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("query object rep and node id in package: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query object rep and node id in package failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewGetPackageObjectRepDataResp(data)) | |||
| return mq.ReplyOK(coormq.NewGetPackageObjectsResp(objs)) | |||
| } | |||
| func (svc *Service) GetPackageObjectECData(msg *coormq.GetPackageObjectECData) (*coormq.GetPackageObjectECDataResp, *mq.CodeMessage) { | |||
| data, err := svc.db.ObjectBlock().GetWithNodeIDInPackage(svc.db.SQLCtx(), msg.PackageID) | |||
| func (svc *Service) GetPackageObjectDetails(msg *coormq.GetPackageObjectDetails) (*coormq.GetPackageObjectDetailsResp, *mq.CodeMessage) { | |||
| data, err := svc.db.ObjectBlock().GetPackageBlockDetails(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("query object ec and node id in package: %s", err.Error()) | |||
| Warnf("getting package block details: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "query object ec and node id in package failed") | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package object block details failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewGetPackageObjectECDataResp(data)) | |||
| return mq.ReplyOK(coormq.NewGetPackageObjectDetailsResp(data)) | |||
| } | |||
| @@ -27,24 +27,11 @@ func (svc *Service) GetPackage(msg *coormq.GetPackage) (*coormq.GetPackageResp, | |||
| return mq.ReplyOK(coormq.NewGetPackageResp(pkg)) | |||
| } | |||
| func (svc *Service) GetPackageObjects(msg *coormq.GetPackageObjects) (*coormq.GetPackageObjectsResp, *mq.CodeMessage) { | |||
| // TODO 检查用户是否有权限 | |||
| objs, err := svc.db.Object().GetPackageObjects(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("get package objects: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package objects failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewGetPackageObjectsResp(objs)) | |||
| } | |||
| func (svc *Service) CreatePackage(msg *coormq.CreatePackage) (*coormq.CreatePackageResp, *mq.CodeMessage) { | |||
| var pkgID int64 | |||
| var pkgID cdssdk.PackageID | |||
| err := svc.db.DoTx(sql.LevelDefault, func(tx *sqlx.Tx) error { | |||
| var err error | |||
| pkgID, err = svc.db.Package().Create(svc.db.SQLCtx(), msg.BucketID, msg.Name, msg.Redundancy) | |||
| pkgID, err = svc.db.Package().Create(svc.db.SQLCtx(), msg.BucketID, msg.Name) | |||
| return err | |||
| }) | |||
| if err != nil { | |||
| @@ -58,7 +45,7 @@ func (svc *Service) CreatePackage(msg *coormq.CreatePackage) (*coormq.CreatePack | |||
| return mq.ReplyOK(coormq.NewCreatePackageResp(pkgID)) | |||
| } | |||
| func (svc *Service) UpdateRepPackage(msg *coormq.UpdateRepPackage) (*coormq.UpdateRepPackageResp, *mq.CodeMessage) { | |||
| func (svc *Service) UpdateECPackage(msg *coormq.UpdatePackage) (*coormq.UpdatePackageResp, *mq.CodeMessage) { | |||
| _, err := svc.db.Package().GetByID(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| @@ -77,7 +64,7 @@ func (svc *Service) UpdateRepPackage(msg *coormq.UpdateRepPackage) (*coormq.Upda | |||
| // 再执行添加操作 | |||
| if len(msg.Adds) > 0 { | |||
| if _, err := svc.db.Object().BatchAddRep(tx, msg.PackageID, msg.Adds); err != nil { | |||
| if _, err := svc.db.Object().BatchAdd(tx, msg.PackageID, msg.Adds); err != nil { | |||
| return fmt.Errorf("adding objects: %w", err) | |||
| } | |||
| } | |||
| @@ -86,55 +73,10 @@ func (svc *Service) UpdateRepPackage(msg *coormq.UpdateRepPackage) (*coormq.Upda | |||
| }) | |||
| if err != nil { | |||
| logger.Warn(err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "update rep package failed") | |||
| return nil, mq.Failed(errorcode.OperationFailed, "update package failed") | |||
| } | |||
| // 紧急任务 | |||
| var affectFileHashes []string | |||
| for _, add := range msg.Adds { | |||
| affectFileHashes = append(affectFileHashes, add.FileHash) | |||
| } | |||
| err = svc.scanner.PostEvent(scmq.NewPostEvent(scevt.NewCheckRepCount(affectFileHashes), true, true)) | |||
| if err != nil { | |||
| logger.Warnf("post event to scanner failed, but this will not affect creating, err: %s", err.Error()) | |||
| } | |||
| return mq.ReplyOK(coormq.NewUpdateRepPackageResp()) | |||
| } | |||
| func (svc *Service) UpdateECPackage(msg *coormq.UpdateECPackage) (*coormq.UpdateECPackageResp, *mq.CodeMessage) { | |||
| _, err := svc.db.Package().GetByID(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("get package: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package failed") | |||
| } | |||
| err = svc.db.DoTx(sql.LevelDefault, func(tx *sqlx.Tx) error { | |||
| // 先执行删除操作 | |||
| if len(msg.Deletes) > 0 { | |||
| if err := svc.db.Object().BatchDelete(tx, msg.Deletes); err != nil { | |||
| return fmt.Errorf("deleting objects: %w", err) | |||
| } | |||
| } | |||
| // 再执行添加操作 | |||
| if len(msg.Adds) > 0 { | |||
| if _, err := svc.db.Object().BatchAddEC(tx, msg.PackageID, msg.Adds); err != nil { | |||
| return fmt.Errorf("adding objects: %w", err) | |||
| } | |||
| } | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| logger.Warn(err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "update ec package failed") | |||
| } | |||
| return mq.ReplyOK(coormq.NewUpdateECPackageResp()) | |||
| return mq.ReplyOK(coormq.NewUpdatePackageResp()) | |||
| } | |||
| func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePackageResp, *mq.CodeMessage) { | |||
| @@ -171,7 +113,7 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack | |||
| // 不追求及时、准确 | |||
| if len(stgs) == 0 { | |||
| // 如果没有被引用,直接投递CheckPackage的任务 | |||
| err := svc.scanner.PostEvent(scmq.NewPostEvent(scevt.NewCheckPackage([]int64{msg.PackageID}), false, false)) | |||
| err := svc.scanner.PostEvent(scmq.NewPostEvent(scevt.NewCheckPackage([]cdssdk.PackageID{msg.PackageID}), false, false)) | |||
| if err != nil { | |||
| logger.Warnf("post event to scanner failed, but this will not affect deleting, err: %s", err.Error()) | |||
| } | |||
| @@ -180,7 +122,7 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack | |||
| } else { | |||
| // 有引用则让Agent去检查StoragePackage | |||
| for _, stg := range stgs { | |||
| err := svc.scanner.PostEvent(scmq.NewPostEvent(scevt.NewAgentCheckStorage(stg.StorageID, []int64{msg.PackageID}), false, false)) | |||
| err := svc.scanner.PostEvent(scmq.NewPostEvent(scevt.NewAgentCheckStorage(stg.StorageID, []cdssdk.PackageID{msg.PackageID}), false, false)) | |||
| if err != nil { | |||
| logger.Warnf("post event to scanner failed, but this will not affect deleting, err: %s", err.Error()) | |||
| } | |||
| @@ -206,76 +148,33 @@ func (svc *Service) GetPackageCachedNodes(msg *coormq.GetPackageCachedNodes) (*c | |||
| return nil, mq.Failed(errorcode.OperationFailed, "package is not available to the user") | |||
| } | |||
| pkg, err := svc.db.Package().GetByID(svc.db.SQLCtx(), msg.PackageID) | |||
| objDetails, err := svc.db.ObjectBlock().GetPackageBlockDetails(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("get package: %s", err.Error()) | |||
| Warnf("get package block details: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package failed") | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get package block details failed") | |||
| } | |||
| var packageSize int64 | |||
| nodeInfoMap := make(map[int64]*cdssdk.NodePackageCachingInfo) | |||
| if pkg.Redundancy.IsRepInfo() { | |||
| // 备份方式为rep | |||
| objectRepDatas, err := svc.db.ObjectRep().GetWithNodeIDInPackage(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("get objectRepDatas by packageID failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get objectRepDatas by packageID failed") | |||
| } | |||
| for _, data := range objectRepDatas { | |||
| packageSize += data.Object.Size | |||
| for _, nodeID := range data.NodeIDs { | |||
| nodeInfo, exists := nodeInfoMap[nodeID] | |||
| if !exists { | |||
| nodeInfo = &cdssdk.NodePackageCachingInfo{ | |||
| NodeID: nodeID, | |||
| FileSize: data.Object.Size, | |||
| ObjectCount: 1, | |||
| nodeInfoMap := make(map[cdssdk.NodeID]*cdssdk.NodePackageCachingInfo) | |||
| for _, obj := range objDetails { | |||
| // 只要存了文件的一个块,就认为此节点存了整个文件 | |||
| for _, block := range obj.Blocks { | |||
| for _, nodeID := range block.CachedNodeIDs { | |||
| info, ok := nodeInfoMap[nodeID] | |||
| if !ok { | |||
| info = &cdssdk.NodePackageCachingInfo{ | |||
| NodeID: nodeID, | |||
| } | |||
| } else { | |||
| nodeInfo.FileSize += data.Object.Size | |||
| nodeInfo.ObjectCount++ | |||
| } | |||
| nodeInfoMap[nodeID] = nodeInfo | |||
| } | |||
| } | |||
| } else if pkg.Redundancy.IsECInfo() { | |||
| // 备份方式为ec | |||
| objectECDatas, err := svc.db.ObjectBlock().GetWithNodeIDInPackage(svc.db.SQLCtx(), msg.PackageID) | |||
| if err != nil { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("get objectECDatas by packageID failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get objectECDatas by packageID failed") | |||
| } | |||
| nodeInfoMap[nodeID] = info | |||
| for _, ecData := range objectECDatas { | |||
| packageSize += ecData.Object.Size | |||
| for _, block := range ecData.Blocks { | |||
| for _, nodeID := range block.NodeIDs { | |||
| nodeInfo, exists := nodeInfoMap[nodeID] | |||
| if !exists { | |||
| nodeInfo = &cdssdk.NodePackageCachingInfo{ | |||
| NodeID: nodeID, | |||
| FileSize: ecData.Object.Size, | |||
| ObjectCount: 1, | |||
| } | |||
| } else { | |||
| nodeInfo.FileSize += ecData.Object.Size | |||
| nodeInfo.ObjectCount++ | |||
| } | |||
| nodeInfoMap[nodeID] = nodeInfo | |||
| } | |||
| info.FileSize += obj.Object.Size | |||
| info.ObjectCount++ | |||
| } | |||
| } | |||
| } else { | |||
| logger.WithField("PackageID", msg.PackageID). | |||
| Warnf("Redundancy type %s is wrong", pkg.Redundancy.Type) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "redundancy type is wrong") | |||
| } | |||
| var nodeInfos []cdssdk.NodePackageCachingInfo | |||
| @@ -286,7 +185,7 @@ func (svc *Service) GetPackageCachedNodes(msg *coormq.GetPackageCachedNodes) (*c | |||
| sort.Slice(nodeInfos, func(i, j int) bool { | |||
| return nodeInfos[i].NodeID < nodeInfos[j].NodeID | |||
| }) | |||
| return mq.ReplyOK(coormq.NewGetPackageCachedNodesResp(nodeInfos, packageSize, pkg.Redundancy.Type)) | |||
| return mq.ReplyOK(coormq.NewGetPackageCachedNodesResp(nodeInfos, packageSize)) | |||
| } | |||
| func (svc *Service) GetPackageLoadedNodes(msg *coormq.GetPackageLoadedNodes) (*coormq.GetPackageLoadedNodesResp, *mq.CodeMessage) { | |||
| @@ -297,8 +196,8 @@ func (svc *Service) GetPackageLoadedNodes(msg *coormq.GetPackageLoadedNodes) (*c | |||
| return nil, mq.Failed(errorcode.OperationFailed, "get storages by packageID failed") | |||
| } | |||
| uniqueNodeIDs := make(map[int64]bool) | |||
| var nodeIDs []int64 | |||
| uniqueNodeIDs := make(map[cdssdk.NodeID]bool) | |||
| var nodeIDs []cdssdk.NodeID | |||
| for _, stg := range storages { | |||
| if !uniqueNodeIDs[stg.NodeID] { | |||
| uniqueNodeIDs[stg.NodeID] = true | |||
| @@ -2,6 +2,8 @@ package services | |||
| import ( | |||
| "database/sql" | |||
| "fmt" | |||
| "time" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||
| @@ -24,12 +26,22 @@ func (svc *Service) GetStorageInfo(msg *coormq.GetStorageInfo) (*coormq.GetStora | |||
| func (svc *Service) StoragePackageLoaded(msg *coormq.StoragePackageLoaded) (*coormq.StoragePackageLoadedResp, *mq.CodeMessage) { | |||
| // TODO: 对于的storage中已经存在的文件,直接覆盖已有文件 | |||
| err := svc.db.DoTx(sql.LevelDefault, func(tx *sqlx.Tx) error { | |||
| return svc.db.StoragePackage().LoadPackage(tx, msg.PackageID, msg.StorageID, msg.UserID) | |||
| err := svc.db.StoragePackage().Create(tx, msg.StorageID, msg.PackageID, msg.UserID) | |||
| if err != nil { | |||
| return fmt.Errorf("creating storage package: %w", err) | |||
| } | |||
| err = svc.db.StoragePackageLog().Create(tx, msg.StorageID, msg.PackageID, msg.UserID, time.Now()) | |||
| if err != nil { | |||
| return fmt.Errorf("creating storage package log: %w", err) | |||
| } | |||
| return nil | |||
| }) | |||
| if err != nil { | |||
| logger.WithField("UserID", msg.UserID). | |||
| WithField("PackageID", msg.PackageID). | |||
| WithField("StorageID", msg.StorageID). | |||
| WithField("PackageID", msg.PackageID). | |||
| Warnf("user load package to storage failed, err: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "user load package to storage failed") | |||
| } | |||
| @@ -7,6 +7,7 @@ import ( | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| @@ -19,7 +20,7 @@ type AgentCheckCache struct { | |||
| *scevt.AgentCheckCache | |||
| } | |||
| func NewAgentCheckCache(nodeID int64, fileHashes []string) *AgentCheckCache { | |||
| func NewAgentCheckCache(nodeID cdssdk.NodeID, fileHashes []string) *AgentCheckCache { | |||
| return &AgentCheckCache{ | |||
| AgentCheckCache: scevt.NewAgentCheckCache(nodeID, fileHashes), | |||
| } | |||
| @@ -4,12 +4,11 @@ import ( | |||
| "database/sql" | |||
| "time" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" | |||
| scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" | |||
| @@ -20,7 +19,7 @@ type AgentCheckState struct { | |||
| *scevt.AgentCheckState | |||
| } | |||
| func NewAgentCheckState(nodeID int64) *AgentCheckState { | |||
| func NewAgentCheckState(nodeID cdssdk.NodeID) *AgentCheckState { | |||
| return &AgentCheckState{ | |||
| AgentCheckState: scevt.NewAgentCheckState(nodeID), | |||
| } | |||
| @@ -80,15 +79,16 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) { | |||
| log.WithField("NodeID", t.NodeID).Warnf("set node state failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| // 补充备份数 | |||
| execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash }))) | |||
| /* | |||
| caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| // 补充备份数 | |||
| execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash }))) | |||
| */ | |||
| return | |||
| } | |||
| return | |||
| @@ -7,6 +7,7 @@ import ( | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| @@ -19,7 +20,7 @@ type AgentCheckStorage struct { | |||
| *scevt.AgentCheckStorage | |||
| } | |||
| func NewAgentCheckStorage(storageID int64, packageIDs []int64) *AgentCheckStorage { | |||
| func NewAgentCheckStorage(storageID cdssdk.StorageID, packageIDs []cdssdk.PackageID) *AgentCheckStorage { | |||
| return &AgentCheckStorage{ | |||
| AgentCheckStorage: scevt.NewAgentCheckStorage(storageID, packageIDs), | |||
| } | |||
| @@ -157,7 +158,7 @@ func (t *AgentCheckStorage) startCheck(execCtx ExecuteContext, stg model.Storage | |||
| } | |||
| // 根据返回结果修改数据库 | |||
| var chkObjIDs []int64 | |||
| var chkObjIDs []cdssdk.PackageID | |||
| for _, entry := range checkResp.Entries { | |||
| switch entry.Operation { | |||
| case agtmq.CHECK_STORAGE_RESP_OP_DELETE: | |||
| @@ -3,10 +3,9 @@ package event | |||
| import ( | |||
| "database/sql" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" | |||
| ) | |||
| @@ -15,7 +14,7 @@ type CheckCache struct { | |||
| *scevt.CheckCache | |||
| } | |||
| func NewCheckCache(nodeID int64) *CheckCache { | |||
| func NewCheckCache(nodeID cdssdk.NodeID) *CheckCache { | |||
| return &CheckCache{ | |||
| CheckCache: scevt.NewCheckCache(nodeID), | |||
| } | |||
| @@ -63,20 +62,20 @@ func (t *CheckCache) Execute(execCtx ExecuteContext) { | |||
| if node.State != consts.NodeStateUnavailable { | |||
| return | |||
| } | |||
| /* | |||
| caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| caches, err := execCtx.Args.DB.Cache().GetNodeCaches(execCtx.Args.DB.SQLCtx(), t.NodeID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("get node caches failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| err = execCtx.Args.DB.Cache().DeleteNodeAll(execCtx.Args.DB.SQLCtx(), t.NodeID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("delete node all caches failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash }))) | |||
| err = execCtx.Args.DB.Cache().DeleteNodeAll(execCtx.Args.DB.SQLCtx(), t.NodeID) | |||
| if err != nil { | |||
| log.WithField("NodeID", t.NodeID).Warnf("delete node all caches failed, err: %s", err.Error()) | |||
| return | |||
| } | |||
| */ | |||
| //execCtx.Executor.Post(NewCheckRepCount(lo.Map(caches, func(ch model.Cache, index int) string { return ch.FileHash }))) | |||
| } | |||
| func init() { | |||
| @@ -3,6 +3,7 @@ package event | |||
| import ( | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event" | |||
| ) | |||
| @@ -11,9 +12,9 @@ type CheckPackage struct { | |||
| *scevt.CheckPackage | |||
| } | |||
| func NewCheckPackage(objIDs []int64) *CheckPackage { | |||
| func NewCheckPackage(pkgIDs []cdssdk.PackageID) *CheckPackage { | |||
| return &CheckPackage{ | |||
| CheckPackage: scevt.NewCheckPackage(objIDs), | |||
| CheckPackage: scevt.NewCheckPackage(pkgIDs), | |||
| } | |||
| } | |||
| @@ -1,5 +1,7 @@ | |||
| package event | |||
| /* | |||
| // TODO 可作为新逻辑的参考 | |||
| import ( | |||
| "fmt" | |||
| "math" | |||
| @@ -213,3 +215,4 @@ func chooseDeleteAvaiRepNodes(allNodes []model.Node, curAvaiRepNodes []model.Nod | |||
| func init() { | |||
| RegisterMessageConvertor(func(msg *scevt.CheckRepCount) Event { return NewCheckRepCount(msg.FileHashes) }) | |||
| } | |||
| */ | |||
| @@ -1,10 +1,12 @@ | |||
| package event | |||
| /* | |||
| import ( | |||
| "testing" | |||
| "github.com/samber/lo" | |||
| . "github.com/smartystreets/goconvey/convey" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/common/utils/sort" | |||
| "gitlink.org.cn/cloudream/storage/common/consts" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| @@ -16,7 +18,7 @@ func Test_chooseNewRepNodes(t *testing.T) { | |||
| allNodes []model.Node | |||
| curRepNodes []model.Node | |||
| newCount int | |||
| wantNodeIDs []int | |||
| wantNodeIDs []cdssdk.NodeID | |||
| }{ | |||
| { | |||
| title: "优先选择不同地域的节点", | |||
| @@ -49,7 +51,7 @@ func Test_chooseNewRepNodes(t *testing.T) { | |||
| }, | |||
| }, | |||
| newCount: 2, | |||
| wantNodeIDs: []int{3, 4}, | |||
| wantNodeIDs: []cdssdk.NodeID{3, 4}, | |||
| }, | |||
| { | |||
| title: "就算节点数不足,也不能选择重复节点", | |||
| @@ -72,7 +74,7 @@ func Test_chooseNewRepNodes(t *testing.T) { | |||
| }, | |||
| }, | |||
| newCount: 2, | |||
| wantNodeIDs: []int{2}, | |||
| wantNodeIDs: []cdssdk.NodeID{2}, | |||
| }, | |||
| { | |||
| title: "就算节点数不足,也不能选择状态unavailable的节点", | |||
| @@ -95,16 +97,16 @@ func Test_chooseNewRepNodes(t *testing.T) { | |||
| }, | |||
| }, | |||
| newCount: 2, | |||
| wantNodeIDs: []int{2}, | |||
| wantNodeIDs: []cdssdk.NodeID{2}, | |||
| }, | |||
| } | |||
| for _, test := range testcases { | |||
| Convey(test.title, t, func() { | |||
| chooseNodes := chooseNewRepNodes(test.allNodes, test.curRepNodes, test.newCount) | |||
| chooseNodeIDs := lo.Map(chooseNodes, func(node model.Node, index int) int64 { return node.NodeID }) | |||
| chooseNodeIDs := lo.Map(chooseNodes, func(node model.Node, index int) cdssdk.NodeID { return node.NodeID }) | |||
| sort.Sort(chooseNodeIDs, sort.Cmp[int64]) | |||
| sort.Sort(chooseNodeIDs, sort.Cmp[cdssdk.NodeID]) | |||
| So(chooseNodeIDs, ShouldResemble, test.wantNodeIDs) | |||
| }) | |||
| @@ -117,7 +119,7 @@ func Test_chooseDeleteAvaiRepNodes(t *testing.T) { | |||
| allNodes []model.Node | |||
| curRepNodes []model.Node | |||
| delCount int | |||
| wantNodeLocationIDs []int | |||
| wantNodeLocationIDs []cdssdk.LocationID | |||
| }{ | |||
| { | |||
| title: "优先选择地域重复的节点", | |||
| @@ -129,7 +131,7 @@ func Test_chooseDeleteAvaiRepNodes(t *testing.T) { | |||
| {NodeID: 8, LocationID: 4}, | |||
| }, | |||
| delCount: 4, | |||
| wantNodeLocationIDs: []int{1, 2, 3, 3}, | |||
| wantNodeLocationIDs: []cdssdk.LocationID{1, 2, 3, 3}, | |||
| }, | |||
| { | |||
| title: "节点不够删", | |||
| @@ -138,18 +140,19 @@ func Test_chooseDeleteAvaiRepNodes(t *testing.T) { | |||
| {NodeID: 1, LocationID: 1}, | |||
| }, | |||
| delCount: 2, | |||
| wantNodeLocationIDs: []int{1}, | |||
| wantNodeLocationIDs: []cdssdk.LocationID{1}, | |||
| }, | |||
| } | |||
| for _, test := range testcases { | |||
| Convey(test.title, t, func() { | |||
| chooseNodes := chooseDeleteAvaiRepNodes(test.allNodes, test.curRepNodes, test.delCount) | |||
| chooseNodeLocationIDs := lo.Map(chooseNodes, func(node model.Node, index int) int64 { return node.LocationID }) | |||
| chooseNodeLocationIDs := lo.Map(chooseNodes, func(node model.Node, index int) cdssdk.LocationID { return node.LocationID }) | |||
| sort.Sort(chooseNodeLocationIDs, sort.Cmp[int64]) | |||
| sort.Sort(chooseNodeLocationIDs, sort.Cmp[cdssdk.LocationID]) | |||
| So(chooseNodeLocationIDs, ShouldResemble, test.wantNodeLocationIDs) | |||
| }) | |||
| } | |||
| } | |||
| */ | |||
| @@ -3,6 +3,7 @@ package tickevent | |||
| import ( | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage/scanner/internal/event" | |||
| ) | |||
| @@ -10,7 +11,7 @@ import ( | |||
| const AGENT_CHECK_CACHE_BATCH_SIZE = 2 | |||
| type BatchAllAgentCheckCache struct { | |||
| nodeIDs []int64 | |||
| nodeIDs []cdssdk.NodeID | |||
| } | |||
| func NewBatchAllAgentCheckCache() *BatchAllAgentCheckCache { | |||
| @@ -29,7 +30,7 @@ func (e *BatchAllAgentCheckCache) Execute(ctx ExecuteContext) { | |||
| return | |||
| } | |||
| e.nodeIDs = lo.Map(nodes, func(node model.Node, index int) int64 { return node.NodeID }) | |||
| e.nodeIDs = lo.Map(nodes, func(node model.Node, index int) cdssdk.NodeID { return node.NodeID }) | |||
| log.Debugf("new check start, get all nodes") | |||
| } | |||
| @@ -1,5 +1,6 @@ | |||
| package tickevent | |||
| /* | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/storage/scanner/internal/event" | |||
| @@ -37,3 +38,4 @@ func (e *BatchCheckAllRepCount) Execute(ctx ExecuteContext) { | |||
| e.lastCheckStart += CHECK_CACHE_BATCH_SIZE | |||
| } | |||
| } | |||
| */ | |||
| @@ -119,7 +119,7 @@ func startTickEvent(tickExecutor *tickevent.Executor) { | |||
| tickExecutor.Start(tickevent.NewBatchCheckAllPackage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) | |||
| tickExecutor.Start(tickevent.NewBatchCheckAllRepCount(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) | |||
| // tickExecutor.Start(tickevent.NewBatchCheckAllRepCount(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) | |||
| tickExecutor.Start(tickevent.NewBatchCheckAllStorage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000}) | |||