Browse Source

拆分下载策略选择模块;更改调度逻辑

gitlink
Sydonian 1 year ago
parent
commit
2520f97135
68 changed files with 1249 additions and 2112 deletions
  1. +17
    -5
      agent/internal/cmd/serve.go
  2. +11
    -9
      agent/internal/config/config.go
  3. +0
    -93
      agent/internal/mq/storage.go
  4. +0
    -339
      agent/internal/task/storage_load_package.go
  5. +8
    -19
      client/internal/cmdline/load.go
  6. +15
    -7
      client/internal/cmdline/newloadp.go
  7. +0
    -23
      client/internal/cmdline/package.go
  8. +0
    -39
      client/internal/cmdline/storage.go
  9. +6
    -6
      client/internal/cmdline/test.go
  10. +10
    -8
      client/internal/config/config.go
  11. +8
    -25
      client/internal/http/package.go
  12. +0
    -1
      client/internal/http/server.go
  13. +4
    -29
      client/internal/http/storage.go
  14. +1
    -1
      client/internal/services/hub.go
  15. +0
    -17
      client/internal/services/package.go
  16. +25
    -11
      client/internal/services/service.go
  17. +68
    -45
      client/internal/services/storage.go
  18. +16
    -6
      client/main.go
  19. +3
    -1
      common/assets/confs/agent.config.json
  20. +3
    -1
      common/assets/confs/client.config.json
  21. +13
    -24
      common/pkgs/connectivity/collector.go
  22. +1
    -22
      common/pkgs/db2/bucket.go
  23. +0
    -18
      common/pkgs/db2/model/model.go
  24. +20
    -25
      common/pkgs/db2/package.go
  25. +8
    -4
      common/pkgs/db2/pinned_object.go
  26. +0
    -83
      common/pkgs/db2/storage_package.go
  27. +7
    -3
      common/pkgs/db2/user_bucket.go
  28. +0
    -24
      common/pkgs/distlock/reqbuilder/metadata_storage_package.go
  29. +2
    -3
      common/pkgs/distlock/service.go
  30. +0
    -2
      common/pkgs/downloader/config.go
  31. +12
    -9
      common/pkgs/downloader/downloader.go
  32. +95
    -304
      common/pkgs/downloader/iterator.go
  33. +17
    -31
      common/pkgs/downloader/lrc.go
  34. +2
    -2
      common/pkgs/downloader/lrc_strip_iterator.go
  35. +6
    -0
      common/pkgs/downloader/strategy/config.go
  36. +337
    -0
      common/pkgs/downloader/strategy/selector.go
  37. +4
    -4
      common/pkgs/downloader/strip_iterator.go
  38. +8
    -12
      common/pkgs/ioswitch2/fromto.go
  39. +15
    -40
      common/pkgs/ioswitch2/ops2/shared_store.go
  40. +6
    -6
      common/pkgs/ioswitch2/parser/parser.go
  41. +96
    -0
      common/pkgs/metacache/connectivity.go
  42. +27
    -0
      common/pkgs/metacache/host.go
  43. +75
    -0
      common/pkgs/metacache/hubmeta.go
  44. +121
    -0
      common/pkgs/metacache/simple.go
  45. +76
    -0
      common/pkgs/metacache/storagemeta.go
  46. +0
    -128
      common/pkgs/mq/agent/storage.go
  47. +3
    -3
      common/pkgs/mq/coordinator/hub.go
  48. +0
    -33
      common/pkgs/mq/coordinator/package.go
  49. +12
    -10
      common/pkgs/mq/coordinator/storage.go
  50. +4
    -169
      common/pkgs/storage/local/shared_store.go
  51. +2
    -9
      common/pkgs/storage/types/shared_store.go
  52. +11
    -16
      common/pkgs/uploader/create_load.go
  53. +10
    -10
      common/pkgs/uploader/uploader.go
  54. +0
    -1
      coordinator/internal/cmd/migrate.go
  55. +18
    -2
      coordinator/internal/mq/bucket.go
  56. +22
    -10
      coordinator/internal/mq/hub.go
  57. +2
    -36
      coordinator/internal/mq/package.go
  58. +16
    -24
      coordinator/internal/mq/storage.go
  59. +1
    -1
      coordinator/internal/mq/temp.go
  60. +0
    -136
      scanner/internal/event/agent_check_storage.go
  61. +0
    -86
      scanner/internal/event/agent_storage_gc.go
  62. +0
    -44
      scanner/internal/event/check_package.go
  63. +4
    -4
      scanner/internal/event/check_package_redundancy.go
  64. +1
    -1
      scanner/internal/event/clean_pinned.go
  65. +0
    -38
      scanner/internal/tickevent/batch_check_all_package.go
  66. +0
    -43
      scanner/internal/tickevent/batch_check_all_storage.go
  67. +0
    -1
      scanner/internal/tickevent/storage_gc.go
  68. +0
    -6
      scanner/main.go

+ 17
- 5
agent/internal/cmd/serve.go View File

@@ -19,7 +19,9 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"

@@ -87,15 +89,15 @@ func serve(configPath string) {
hubCons := make([]cdssdk.HubConnectivity, 0, len(cons))
for _, con := range cons {
var delay *float32
if con.Delay != nil {
v := float32(con.Delay.Microseconds()) / 1000
if con.Latency != nil {
v := float32(con.Latency.Microseconds()) / 1000
delay = &v
}

hubCons = append(hubCons, cdssdk.HubConnectivity{
FromHubID: *stgglb.Local.HubID,
ToHubID: con.ToHubID,
Delay: delay,
Latency: delay,
TestTime: con.TestTime,
})
}
@@ -107,6 +109,13 @@ func serve(configPath string) {
})
conCol.CollectInPlace()

// 初始化元数据缓存服务
metacacheHost := metacache.NewHost()
go metacacheHost.Serve()
stgMeta := metacacheHost.AddStorageMeta()
hubMeta := metacacheHost.AddHubMeta()
conMeta := metacacheHost.AddConnectivity()

// 启动访问统计服务
acStat := accessstat.NewAccessStat(accessstat.Config{
// TODO 考虑放到配置里
@@ -120,11 +129,14 @@ func serve(configPath string) {
logger.Fatalf("new ipfs failed, err: %s", err.Error())
}

// 初始化下载策略选择器
strgSel := strategy.NewSelector(config.Cfg().DownloadStrategy, stgMeta, hubMeta, conMeta)

// 初始化下载器
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr)
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr, strgSel)

// 初始化上传器
uploader := uploader.NewUploader(distlock, &conCol, stgMgr)
uploader := uploader.NewUploader(distlock, &conCol, stgMgr, stgMeta)

// 初始化任务管理器
taskMgr := task.NewManager(distlock, &conCol, &dlder, acStat, stgMgr, uploader)


+ 11
- 9
agent/internal/config/config.go View File

@@ -9,19 +9,21 @@ import (
stgmodels "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/grpc"
)

type Config struct {
ID cdssdk.HubID `json:"id"`
ListenAddr string `json:"listenAddr"`
Local stgmodels.LocalMachineInfo `json:"local"`
GRPC *grpc.Config `json:"grpc"`
Logger log.Config `json:"logger"`
RabbitMQ mq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
ID cdssdk.HubID `json:"id"`
ListenAddr string `json:"listenAddr"`
Local stgmodels.LocalMachineInfo `json:"local"`
GRPC *grpc.Config `json:"grpc"`
Logger log.Config `json:"logger"`
RabbitMQ mq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
DownloadStrategy strategy.Config `json:"downloadStrategy"`
}

var cfg Config


+ 0
- 93
agent/internal/mq/storage.go View File

@@ -4,104 +4,11 @@ import (
"time"

"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
mytask "gitlink.org.cn/cloudream/storage/agent/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
)

func (svc *Service) StartStorageLoadPackage(msg *agtmq.StartStorageLoadPackage) (*agtmq.StartStorageLoadPackageResp, *mq.CodeMessage) {
tsk := svc.taskManager.StartNew(mytask.NewStorageLoadPackage(msg.UserID, msg.PackageID, msg.StorageID))
return mq.ReplyOK(agtmq.NewStartStorageLoadPackageResp(tsk.ID()))
}

func (svc *Service) WaitStorageLoadPackage(msg *agtmq.WaitStorageLoadPackage) (*agtmq.WaitStorageLoadPackageResp, *mq.CodeMessage) {
logger.WithField("TaskID", msg.TaskID).Debugf("wait loading package")

tsk := svc.taskManager.FindByID(msg.TaskID)
if tsk == nil {
return nil, mq.Failed(errorcode.TaskNotFound, "task not found")
}

if msg.WaitTimeoutMs == 0 {
tsk.Wait()

errMsg := ""
if tsk.Error() != nil {
errMsg = tsk.Error().Error()
}

loadTsk := tsk.Body().(*mytask.StorageLoadPackage)

return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(true, errMsg, loadTsk.PackagePath, loadTsk.LocalBase, loadTsk.RemoteBase))

} else {
if tsk.WaitTimeout(time.Duration(msg.WaitTimeoutMs) * time.Millisecond) {

errMsg := ""
if tsk.Error() != nil {
errMsg = tsk.Error().Error()
}

loadTsk := tsk.Body().(*mytask.StorageLoadPackage)

return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(true, errMsg, loadTsk.PackagePath, loadTsk.LocalBase, loadTsk.RemoteBase))
}

return mq.ReplyOK(agtmq.NewWaitStorageLoadPackageResp(false, "", "", "", ""))
}
}

func (svc *Service) StorageCheck(msg *agtmq.StorageCheck) (*agtmq.StorageCheckResp, *mq.CodeMessage) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

shared, err := svc.stgMgr.GetSharedStore(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

loaded, err := shared.ListLoadedPackages()
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

return mq.ReplyOK(agtmq.NewStorageCheckResp(loaded))
}

func (svc *Service) StorageGC(msg *agtmq.StorageGC) (*agtmq.StorageGCResp, *mq.CodeMessage) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

shared, err := svc.stgMgr.GetSharedStore(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

var loadeds []stgmod.LoadedPackageID
for _, pkg := range msg.Packages {
loadeds = append(loadeds, stgmod.LoadedPackageID{
UserID: pkg.UserID,
PackageID: pkg.PackageID,
})
}

err = shared.PackageGC(loadeds)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

return mq.ReplyOK(agtmq.RespStorageGC())
}

func (svc *Service) StartStorageCreatePackage(msg *agtmq.StartStorageCreatePackage) (*agtmq.StartStorageCreatePackageResp, *mq.CodeMessage) {
return nil, mq.Failed(errorcode.OperationFailed, "not implemented")
// coorCli, err := stgglb.CoordinatorMQPool.Acquire()


+ 0
- 339
agent/internal/task/storage_load_package.go View File

@@ -1,339 +0,0 @@
package task

import (
"fmt"
"io"
"math"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/bitmap"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/task"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/reflect2"
"gitlink.org.cn/cloudream/common/utils/sort2"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ec"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils"
)

type StorageLoadPackage struct {
PackagePath string
LocalBase string
RemoteBase string

userID cdssdk.UserID
packageID cdssdk.PackageID
storageID cdssdk.StorageID
pinnedBlocks []stgmod.ObjectBlock
}

func NewStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StorageLoadPackage {
return &StorageLoadPackage{
userID: userID,
packageID: packageID,
storageID: storageID,
}
}
func (t *StorageLoadPackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
startTime := time.Now()
log := logger.WithType[StorageLoadPackage]("Task")
log.WithField("TaskID", task.ID()).
Infof("begin to load package %v to %v", t.packageID, t.storageID)

err := t.do(task, ctx)
if err == nil {
log.WithField("TaskID", task.ID()).
Infof("loading success, cost: %v", time.Since(startTime))
} else {
log.WithField("TaskID", task.ID()).
Warnf("loading package: %v, cost: %v", err, time.Since(startTime))
}

complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
}

func (t *StorageLoadPackage) do(task *task.Task[TaskContext], ctx TaskContext) error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

shared, err := ctx.stgMgr.GetSharedStore(t.storageID)
if err != nil {
return fmt.Errorf("get shared store of storage %v: %w", t.storageID, err)
}
t.PackagePath = utils.MakeLoadedPackagePath(t.userID, t.packageID)

getObjectDetails, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(t.packageID))
if err != nil {
return fmt.Errorf("getting package object details: %w", err)
}

shardstore, err := ctx.stgMgr.GetShardStore(t.storageID)
if err != nil {
return fmt.Errorf("get shard store of storage %v: %w", t.storageID, err)
}

mutex, err := reqbuilder.NewBuilder().
// 提前占位
Metadata().StoragePackage().CreateOne(t.userID, t.storageID, t.packageID).
// 保护在storage目录中下载的文件
Storage().Buzy(t.storageID).
// 保护下载文件时同时保存到IPFS的文件
Shard().Buzy(t.storageID).
MutexLock(ctx.distlock)
if err != nil {
return fmt.Errorf("acquire locks failed, err: %w", err)
}
defer mutex.Unlock()

for _, obj := range getObjectDetails.Objects {
err := t.downloadOne(coorCli, shardstore, shared, obj)
if err != nil {
return err
}
ctx.accessStat.AddAccessCounter(obj.Object.ObjectID, t.packageID, t.storageID, 1)
}

_, err = coorCli.StoragePackageLoaded(coormq.NewStoragePackageLoaded(t.userID, t.storageID, t.packageID, t.pinnedBlocks))
if err != nil {
return fmt.Errorf("loading package to storage: %w", err)
}

// TODO 要防止下载的临时文件被删除
return err
}

func (t *StorageLoadPackage) downloadOne(coorCli *coormq.Client, shardStore types.ShardStore, shared types.SharedStore, obj stgmod.ObjectDetail) error {
var file io.ReadCloser

switch red := obj.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
reader, err := t.downloadNoneOrRepObject(shardStore, obj)
if err != nil {
return fmt.Errorf("downloading object: %w", err)
}
file = reader

case *cdssdk.RepRedundancy:
reader, err := t.downloadNoneOrRepObject(shardStore, obj)
if err != nil {
return fmt.Errorf("downloading rep object: %w", err)
}
file = reader

case *cdssdk.ECRedundancy:
reader, pinnedBlocks, err := t.downloadECObject(coorCli, shardStore, obj, red)
if err != nil {
return fmt.Errorf("downloading ec object: %w", err)
}
file = reader
t.pinnedBlocks = append(t.pinnedBlocks, pinnedBlocks...)

default:
return fmt.Errorf("unknow redundancy type: %v", reflect2.TypeOfValue(obj.Object.Redundancy))
}
defer file.Close()

if _, err := shared.WritePackageObject(t.userID, t.packageID, obj.Object.Path, file); err != nil {
return fmt.Errorf("writting object to file: %w", err)
}

return nil
}

func (t *StorageLoadPackage) downloadNoneOrRepObject(shardStore types.ShardStore, obj stgmod.ObjectDetail) (io.ReadCloser, error) {
if len(obj.Blocks) == 0 && len(obj.PinnedAt) == 0 {
return nil, fmt.Errorf("no storage has this object")
}

file, err := shardStore.Open(types.NewOpen(obj.Object.FileHash))
if err != nil {
return nil, err
}

return file, nil
}

func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, shardStore types.ShardStore, obj stgmod.ObjectDetail, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, []stgmod.ObjectBlock, error) {
allStorages, err := t.sortDownloadStorages(coorCli, obj)
if err != nil {
return nil, nil, err
}
bsc, blocks := t.getMinReadingBlockSolution(allStorages, ecRed.K)
osc, _ := t.getMinReadingObjectSolution(allStorages, ecRed.K)
if bsc < osc {
var fileStrs []io.ReadCloser

rs, err := ec.NewStreamRs(ecRed.K, ecRed.N, ecRed.ChunkSize)
if err != nil {
return nil, nil, fmt.Errorf("new rs: %w", err)
}

for i := range blocks {
str, err := shardStore.Open(types.NewOpen(blocks[i].Block.FileHash))
if err != nil {
for i -= 1; i >= 0; i-- {
fileStrs[i].Close()
}
return nil, nil, fmt.Errorf("donwloading file: %w", err)
}

fileStrs = append(fileStrs, str)
}

fileReaders, filesCloser := io2.ToReaders(fileStrs)

var indexes []int
for _, b := range blocks {
indexes = append(indexes, b.Block.Index)
}

outputs, outputsCloser := io2.ToReaders(rs.ReconstructData(fileReaders, indexes))
return io2.AfterReadClosed(io2.Length(io2.ChunkedJoin(outputs, int(ecRed.ChunkSize)), obj.Object.Size), func(c io.ReadCloser) {
filesCloser()
outputsCloser()
}), nil, nil
}

// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
if osc == math.MaxFloat64 {
return nil, nil, fmt.Errorf("no enough blocks to reconstruct the file, want %d, get only %d", ecRed.K, len(blocks))
}

// 如果是直接读取的文件,那么就不需要Pin文件块
str, err := shardStore.Open(types.NewOpen(obj.Object.FileHash))
return str, nil, err
}

type downloadStorageInfo struct {
Storage stgmod.StorageDetail
ObjectPinned bool
Blocks []stgmod.ObjectBlock
Distance float64
}

func (t *StorageLoadPackage) sortDownloadStorages(coorCli *coormq.Client, obj stgmod.ObjectDetail) ([]*downloadStorageInfo, error) {
var stgIDs []cdssdk.StorageID
for _, id := range obj.PinnedAt {
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range obj.Blocks {
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
}
}

getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs))
if err != nil {
return nil, fmt.Errorf("getting storage details: %w", err)
}
allStgs := make(map[cdssdk.StorageID]stgmod.StorageDetail)
for _, stg := range getStgs.Storages {
allStgs[stg.Storage.StorageID] = *stg
}

downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range obj.PinnedAt {
storage, ok := downloadStorageMap[id]
if !ok {
mod := allStgs[id]
storage = &downloadStorageInfo{
Storage: mod,
ObjectPinned: true,
Distance: t.getStorageDistance(mod),
}
downloadStorageMap[id] = storage
}

storage.ObjectPinned = true
}

for _, b := range obj.Blocks {
storage, ok := downloadStorageMap[b.StorageID]
if !ok {
mod := allStgs[b.StorageID]
storage = &downloadStorageInfo{
Storage: mod,
Distance: t.getStorageDistance(mod),
}
downloadStorageMap[b.StorageID] = storage
}

storage.Blocks = append(storage.Blocks, b)
}

return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
}), nil
}

type downloadBlock struct {
Storage stgmod.StorageDetail
Block stgmod.ObjectBlock
}

func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedStorages []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
for _, n := range sortedStorages {
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
}

if len(gotBlocks) >= k {
return dist, gotBlocks
}
}
}

return math.MaxFloat64, gotBlocks
}

func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedStorages []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadStg *stgmod.StorageDetail
for _, n := range sortedStorages {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
stg := n.Storage
downloadStg = &stg
}
}

return dist, downloadStg
}

func (t *StorageLoadPackage) getStorageDistance(stg stgmod.StorageDetail) float64 {
if stgglb.Local.HubID != nil {
if stg.MasterHub.HubID == *stgglb.Local.HubID {
return consts.StorageDistanceSameStorage
}
}

if stg.MasterHub.LocationID == stgglb.Local.LocationID {
return consts.StorageDistanceSameLocation
}

return consts.StorageDistanceOther
}

+ 8
- 19
client/internal/cmdline/load.go View File

@@ -15,7 +15,7 @@ func init() {
cmd := cobra.Command{
Use: "load",
Short: "Load data from CDS to a storage service",
Args: cobra.ExactArgs(2),
Args: cobra.ExactArgs(3),
Run: func(cmd *cobra.Command, args []string) {
cmdCtx := GetCmdCtx(cmd)

@@ -30,9 +30,9 @@ func init() {
fmt.Printf("Invalid storage ID: %s\n", args[1])
}

loadByID(cmdCtx, cdssdk.PackageID(pkgID), cdssdk.StorageID(stgID))
loadByID(cmdCtx, cdssdk.PackageID(pkgID), cdssdk.StorageID(stgID), args[2])
} else {
loadByPath(cmdCtx, args[0], args[1])
loadByPath(cmdCtx, args[0], args[1], args[2])
}
},
}
@@ -40,7 +40,7 @@ func init() {
rootCmd.AddCommand(&cmd)
}

func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) {
func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string, rootPath string) {
userID := cdssdk.UserID(1)

comps := strings.Split(strings.Trim(pkgPath, cdssdk.ObjectPathSeparator), cdssdk.ObjectPathSeparator)
@@ -61,29 +61,18 @@ func loadByPath(cmdCtx *CommandContext, pkgPath string, stgName string) {
return
}

loadByID(cmdCtx, pkg.PackageID, stg.StorageID)
loadByID(cmdCtx, pkg.PackageID, stg.StorageID, rootPath)
}

func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID) {
func loadByID(cmdCtx *CommandContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID, rootPath string) {
userID := cdssdk.UserID(1)
startTime := time.Now()

hubID, taskID, err := cmdCtx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(userID, pkgID, stgID)
err := cmdCtx.Cmdline.Svc.StorageSvc().LoadPackage(userID, pkgID, stgID, rootPath)
if err != nil {
fmt.Println(err)
return
}

for {
complete, fullPath, err := cmdCtx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10)
if err != nil {
fmt.Println(err)
return
}

if complete {
fmt.Printf("Package loaded to: %s in %v\n", fullPath, time.Since(startTime))
break
}
}
fmt.Printf("Package loaded to: %v:%v in %v\n", stgID, rootPath, time.Since(startTime))
}

+ 15
- 7
client/internal/cmdline/newloadp.go View File

@@ -29,26 +29,34 @@ func init() {

packageName := args[2]
storageIDs := make([]cdssdk.StorageID, 0)
for _, sID := range args[3:] {
sID, err := strconv.ParseInt(sID, 10, 64)
rootPathes := make([]string, 0)
for _, dst := range args[3:] {
comps := strings.Split(dst, ":")
if len(comps) != 2 {
fmt.Println("invalid storage destination: ", dst)
return
}

sID, err := strconv.ParseInt(comps[0], 10, 64)
if err != nil {
fmt.Println(err)
return
}
storageIDs = append(storageIDs, cdssdk.StorageID(sID))
rootPathes = append(rootPathes, comps[1])
}

newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, storageIDs)
newloadp(cmdCtx, localPath, cdssdk.BucketID(bktID), packageName, storageIDs, rootPathes)
},
}

rootCmd.AddCommand(cmd)
}

func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, storageIDs []cdssdk.StorageID) {
func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, packageName string, storageIDs []cdssdk.StorageID, rootPathes []string) {
userID := cdssdk.UserID(1)

up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(userID, bucketID, packageName, storageIDs)
up, err := cmdCtx.Cmdline.Svc.Uploader.BeginCreateLoad(userID, bucketID, packageName, storageIDs, rootPathes)
if err != nil {
fmt.Println(err)
return
@@ -94,7 +102,7 @@ func newloadp(cmdCtx *CommandContext, path string, bucketID cdssdk.BucketID, pac
}

wr := table.NewWriter()
wr.AppendHeader(table.Row{"ID", "Name", "FileCount", "TotalSize", "LoadedDirs"})
wr.AppendRow(table.Row{ret.Package.PackageID, ret.Package.Name, fileCount, totalSize, strings.Join(ret.LoadedDirs, "\n")})
wr.AppendHeader(table.Row{"ID", "Name", "FileCount", "TotalSize"})
wr.AppendRow(table.Row{ret.Package.PackageID, ret.Package.Name, fileCount, totalSize})
fmt.Println(wr.Render())
}

+ 0
- 23
client/internal/cmdline/package.go View File

@@ -181,26 +181,6 @@ func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) er
return nil
}

// PackageGetLoadedStorages 获取指定包裹的已加载节点信息。
//
// 参数:
//
// ctx - 命令上下文。
// packageID - 包裹ID。
//
// 返回值:
//
// error - 操作过程中发生的任何错误。
func PackageGetLoadedStorages(ctx CommandContext, packageID cdssdk.PackageID) error {
userID := cdssdk.UserID(1)
hubIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedStorages(userID, packageID)
fmt.Printf("hubIDs: %v\n", hubIDs)
if err != nil {
return fmt.Errorf("get package %d loaded storages failed, err: %w", packageID, err)
}
return nil
}

// 初始化命令行工具的包相关命令。
func init() {
commands.MustAdd(PackageListBucketPackages, "pkg", "ls")
@@ -213,7 +193,4 @@ func init() {

// 查询package缓存到哪些节点
commands.MustAdd(PackageGetCachedStorages, "pkg", "cached")

// 查询package调度到哪些节点
commands.MustAdd(PackageGetLoadedStorages, "pkg", "loaded")
}

+ 0
- 39
client/internal/cmdline/storage.go View File

@@ -7,42 +7,6 @@ import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

// StorageLoadPackage 加载指定的包到存储系统中。
// ctx: 命令上下文,提供必要的服务和环境配置。
// packageID: 需要加载的包的唯一标识。
// storageID: 目标存储系统的唯一标识。
// 返回值: 执行过程中遇到的任何错误。
func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageID cdssdk.StorageID) error {
startTime := time.Now()
defer func() {
// 打印函数执行时间
fmt.Printf("%v\n", time.Since(startTime).Seconds())
}()

// 开始加载包到存储系统
hubID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(1, packageID, storageID)
if err != nil {
return fmt.Errorf("start loading package to storage: %w", err)
}

// 循环等待加载完成
for {
complete, fullPath, err := ctx.Cmdline.Svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10)
if complete {
if err != nil {
return fmt.Errorf("moving complete with: %w", err)
}

fmt.Printf("Load To: %s\n", fullPath)
return nil
}

if err != nil {
return fmt.Errorf("wait moving: %w", err)
}
}
}

// StorageCreatePackage 创建一个新的包并上传到指定的存储系统。
// ctx: 命令上下文,提供必要的服务和环境配置。
// bucketID: 存储桶的唯一标识,包将被上传到这个存储桶中。
@@ -83,9 +47,6 @@ func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name str

// 初始化函数,注册加载包和创建包的命令到命令行解析器。
func init() {
// 注册加载包命令
commands.MustAdd(StorageLoadPackage, "stg", "pkg", "load")

// 注册创建包命令
commands.MustAdd(StorageCreatePackage, "stg", "pkg", "new")
}

+ 6
- 6
client/internal/cmdline/test.go View File

@@ -139,9 +139,9 @@ func init() {

toDrv, drvStr := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), exec.NewRange(0, 1293))
ft.AddTo(toDrv)
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(2), "EC2"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2"))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)
@@ -203,9 +203,9 @@ func init() {
ft := ioswitch2.NewFromTo()
ft.ECParam = &cdssdk.DefaultECRedundancy
ft.AddFrom(ioswitch2.NewFromShardstore("4E69A8B8CD9F42EDE371DA94458BADFB2308AFCA736AA393784A3D81F4746377", *stgs.Storages[1].MasterHub, stgs.Storages[1].Storage, ioswitch2.RawStream()))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECSrteam(2), "EC2"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(0), "EC0"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(1), "EC1"))
ft.AddTo(ioswitch2.NewToShardStore(*stgs.Storages[0].MasterHub, *stgs.Storages[0], ioswitch2.ECStream(2), "EC2"))

plans := exec.NewPlanBuilder()
err = parser.Parse(ft, plans)


+ 10
- 8
client/internal/config/config.go View File

@@ -9,18 +9,20 @@ import (
stgmodels "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
)

type Config struct {
Local stgmodels.LocalMachineInfo `json:"local"`
AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"`
Logger logger.Config `json:"logger"`
RabbitMQ mq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
StorageID cdssdk.StorageID `json:"storageID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。
Local stgmodels.LocalMachineInfo `json:"local"`
AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"`
Logger logger.Config `json:"logger"`
RabbitMQ mq.Config `json:"rabbitMQ"`
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
Downloader downloader.Config `json:"downloader"`
DownloadStrategy strategy.Config `json:"downloadStrategy"`
StorageID cdssdk.StorageID `json:"storageID"` // TODO 进行访问量统计时,当前客户端所属的存储ID。临时解决方案。
}

var cfg Config


+ 8
- 25
client/internal/http/package.go View File

@@ -103,7 +103,13 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) {
return
}

up, err := s.svc.Uploader.BeginCreateLoad(req.Info.UserID, req.Info.BucketID, req.Info.Name, req.Info.LoadTo)
if len(req.Info.LoadTo) != len(req.Info.LoadToPath) {
log.Warnf("load to and load to path count not match")
ctx.JSON(http.StatusOK, Failed(errorcode.BadArgument, "load to and load to path count not match"))
return
}

up, err := s.svc.Uploader.BeginCreateLoad(req.Info.UserID, req.Info.BucketID, req.Info.Name, req.Info.LoadTo, req.Info.LoadToPath)
if err != nil {
log.Warnf("begin package create load: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("begin package create load: %v", err)))
@@ -149,7 +155,7 @@ func (s *PackageService) CreateLoad(ctx *gin.Context) {
objs[i] = ret.Objects[pathes[i]]
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageCreateLoadResp{Package: ret.Package, Objects: objs, LoadedDirs: ret.LoadedDirs}))
ctx.JSON(http.StatusOK, OK(cdsapi.PackageCreateLoadResp{Package: ret.Package, Objects: objs}))

}
func (s *PackageService) Delete(ctx *gin.Context) {
@@ -236,26 +242,3 @@ func (s *PackageService) GetCachedStorages(ctx *gin.Context) {

ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetCachedStoragesResp{PackageCachingInfo: resp}))
}

// GetLoadedStorages 处理获取包的加载节点的HTTP请求。
func (s *PackageService) GetLoadedStorages(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetLoadedStorages")

var req cdsapi.PackageGetLoadedStoragesReq
if err := ctx.ShouldBindQuery(&req); err != nil {
log.Warnf("binding query: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

stgIDs, err := s.svc.PackageSvc().GetLoadedStorages(req.UserID, req.PackageID)
if err != nil {
log.Warnf("get package loaded storages failed: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package loaded storages failed"))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetLoadedStoragesResp{
StorageIDs: stgIDs,
}))
}

+ 0
- 1
client/internal/http/server.go View File

@@ -62,7 +62,6 @@ func (s *Server) initRouters() {
rt.POST(cdsapi.PackageClonePath, s.Package().Clone)
rt.GET(cdsapi.PackageListBucketPackagesPath, s.Package().ListBucketPackages)
rt.GET(cdsapi.PackageGetCachedStoragesPath, s.Package().GetCachedStorages)
rt.GET(cdsapi.PackageGetLoadedStoragesPath, s.Package().GetLoadedStorages)

rt.POST(cdsapi.StorageLoadPackagePath, s.Storage().LoadPackage)
rt.POST(cdsapi.StorageCreatePackagePath, s.Storage().CreatePackage)


+ 4
- 29
client/internal/http/storage.go View File

@@ -1,9 +1,7 @@
package http

import (
"fmt"
"net/http"
"path/filepath"
"time"

"github.com/gin-gonic/gin"
@@ -32,37 +30,14 @@ func (s *StorageService) LoadPackage(ctx *gin.Context) {
return
}

hubID, taskID, err := s.svc.StorageSvc().StartStorageLoadPackage(req.UserID, req.PackageID, req.StorageID)
err := s.svc.StorageSvc().LoadPackage(req.UserID, req.PackageID, req.StorageID, req.RootPath)
if err != nil {
log.Warnf("start storage load package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("start loading: %v", err)))
log.Warnf("loading package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "loading package failed"))
return
}

for {
complete, ret, err := s.svc.StorageSvc().WaitStorageLoadPackage(hubID, taskID, time.Second*10)
if complete {
if err != nil {
log.Warnf("loading complete with: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("loading complete with: %v", err)))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.StorageLoadPackageResp{
FullPath: filepath.Join(ret.RemoteBase, ret.PackagePath),
PackagePath: ret.PackagePath,
LocalBase: ret.LocalBase,
RemoteBase: ret.RemoteBase,
}))
return
}

if err != nil {
log.Warnf("wait loadding: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("wait loading: %v", err)))
return
}
}
ctx.JSON(http.StatusOK, OK(cdsapi.StorageLoadPackageResp{}))
}

func (s *StorageService) CreatePackage(ctx *gin.Context) {


+ 1
- 1
client/internal/services/hub.go View File

@@ -27,7 +27,7 @@ func (svc *Service) HubSvc() *HubService {
//
// []cdssdk.Hub - 获取到的节点信息列表
// error - 如果过程中发生错误,则返回错误信息
func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]cdssdk.Hub, error) {
func (svc *HubService) GetHubs(hubIDs []cdssdk.HubID) ([]*cdssdk.Hub, error) {
// 从协调器MQ池中获取一个客户端实例
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {


+ 0
- 17
client/internal/services/package.go View File

@@ -143,20 +143,3 @@ func (svc *PackageService) GetCachedStorages(userID cdssdk.UserID, packageID cds
}
return tmp, nil
}

// GetLoadedStorages 获取指定包加载的节点列表
func (svc *PackageService) GetLoadedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]cdssdk.StorageID, error) {
// 从协调器MQ池中获取客户端
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

// 向协调器请求获取加载指定包的节点ID列表
resp, err := coorCli.GetPackageLoadedStorages(coormq.ReqGetPackageLoadedStorages(userID, packageID))
if err != nil {
return nil, fmt.Errorf("get package loaded storages: %w", err)
}
return resp.StorageIDs, nil
}

+ 25
- 11
client/internal/services/service.go View File

@@ -7,24 +7,38 @@ import (
"gitlink.org.cn/cloudream/storage/client/internal/task"
"gitlink.org.cn/cloudream/storage/common/pkgs/accessstat"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"
)

// Service 结构体封装了分布锁服务和任务管理服务。
type Service struct {
DistLock *distlock.Service
TaskMgr *task.Manager
Downloader *downloader.Downloader
AccessStat *accessstat.AccessStat
Uploader *uploader.Uploader
DistLock *distlock.Service
TaskMgr *task.Manager
Downloader *downloader.Downloader
AccessStat *accessstat.AccessStat
Uploader *uploader.Uploader
StrategySelector *strategy.Selector
StorageMeta *metacache.StorageMeta
}

func NewService(distlock *distlock.Service, taskMgr *task.Manager, downloader *downloader.Downloader, accStat *accessstat.AccessStat, uploder *uploader.Uploader) (*Service, error) {
func NewService(
distlock *distlock.Service,
taskMgr *task.Manager,
downloader *downloader.Downloader,
accStat *accessstat.AccessStat,
uploder *uploader.Uploader,
strategySelector *strategy.Selector,
storageMeta *metacache.StorageMeta,
) (*Service, error) {
return &Service{
DistLock: distlock,
TaskMgr: taskMgr,
Downloader: downloader,
AccessStat: accStat,
Uploader: uploder,
DistLock: distlock,
TaskMgr: taskMgr,
Downloader: downloader,
AccessStat: accStat,
Uploader: uploder,
StrategySelector: strategySelector,
StorageMeta: storageMeta,
}, nil
}

+ 68
- 45
client/internal/services/storage.go View File

@@ -1,13 +1,20 @@
package services

import (
"context"
"fmt"
"path"
"time"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)
@@ -50,74 +57,90 @@ func (svc *StorageService) GetByName(userID cdssdk.UserID, name string) (*model.
return &getResp.Storage, nil
}

func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) (cdssdk.HubID, string, error) {
func (svc *StorageService) LoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID, rootPath string) error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return 0, "", fmt.Errorf("new coordinator client: %w", err)
return fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{storageID}))
if err != nil {
return 0, "", fmt.Errorf("getting storage info: %w", err)
destStg := svc.StorageMeta.Get(storageID)
if destStg == nil {
return fmt.Errorf("storage not found: %d", storageID)
}

if stgResp.Storages[0].Storage.ShardStore == nil {
return 0, "", fmt.Errorf("shard storage is not enabled")
if destStg.MasterHub == nil {
return fmt.Errorf("storage %v has no master hub", storageID)
}

agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.HubID)
details, err := coorCli.GetPackageObjectDetails(coormq.ReqGetPackageObjectDetails(packageID))
if err != nil {
return 0, "", fmt.Errorf("new agent client: %w", err)
return err
}
defer stgglb.AgentMQPool.Release(agentCli)

startResp, err := agentCli.StartStorageLoadPackage(agtmq.NewStartStorageLoadPackage(userID, packageID, storageID))
if err != nil {
return 0, "", fmt.Errorf("start storage load package: %w", err)
}
var pinned []cdssdk.ObjectID
plans := exec.NewPlanBuilder()
for _, obj := range details.Objects {
strg, err := svc.StrategySelector.Select(strategy.Request{
Detail: obj,
DestHub: destStg.MasterHub.HubID,
})
if err != nil {
return fmt.Errorf("select download strategy: %w", err)
}

return stgResp.Storages[0].MasterHub.HubID, startResp.TaskID, nil
}
ft := ioswitch2.NewFromTo()
switch strg := strg.(type) {
case *strategy.DirectStrategy:
ft.AddFrom(ioswitch2.NewFromShardstore(strg.Detail.Object.FileHash, *strg.Storage.MasterHub, strg.Storage.Storage, ioswitch2.RawStream()))

type StorageLoadPackageResult struct {
PackagePath string
LocalBase string
RemoteBase string
}
case *strategy.ECReconstructStrategy:
for i, b := range strg.Blocks {
ft.AddFrom(ioswitch2.NewFromShardstore(b.FileHash, *strg.Storages[i].MasterHub, strg.Storages[i].Storage, ioswitch2.ECStream(b.Index)))
ft.ECParam = &strg.Redundancy
}
default:
return fmt.Errorf("unsupported download strategy: %T", strg)
}

func (svc *StorageService) WaitStorageLoadPackage(hubID cdssdk.HubID, taskID string, waitTimeout time.Duration) (bool, *StorageLoadPackageResult, error) {
agentCli, err := stgglb.AgentMQPool.Acquire(hubID)
if err != nil {
// TODO 失败是否要当做任务已经结束?
return true, nil, fmt.Errorf("new agent client: %w", err)
ft.AddTo(ioswitch2.NewLoadToShared(*destStg.MasterHub, destStg.Storage, path.Join(rootPath, obj.Object.Path)))
// 顺便保存到同存储服务的分片存储中
if destStg.Storage.ShardStore != nil {
ft.AddTo(ioswitch2.NewToShardStore(*destStg.MasterHub, *destStg, ioswitch2.RawStream(), ""))
pinned = append(pinned, obj.Object.ObjectID)
}

err = parser.Parse(ft, plans)
if err != nil {
return fmt.Errorf("parse plan: %w", err)
}
}
defer stgglb.AgentMQPool.Release(agentCli)

waitResp, err := agentCli.WaitStorageLoadPackage(agtmq.NewWaitStorageLoadPackage(taskID, waitTimeout.Milliseconds()))
mutex, err := reqbuilder.NewBuilder().
// 保护在storage目录中下载的文件
Storage().Buzy(storageID).
// 保护下载文件时同时保存到IPFS的文件
Shard().Buzy(storageID).
MutexLock(svc.DistLock)
if err != nil {
// TODO 请求失败是否要当做任务已经结束?
return true, nil, fmt.Errorf("wait storage load package: %w", err)
return fmt.Errorf("acquire locks failed, err: %w", err)
}

if !waitResp.IsComplete {
return false, nil, nil
// 记录访问统计
for _, obj := range details.Objects {
svc.AccessStat.AddAccessCounter(obj.Object.ObjectID, packageID, storageID, 1)
}

if waitResp.Error != "" {
return true, nil, fmt.Errorf("%s", waitResp.Error)
}
defer mutex.Unlock()

return true, &StorageLoadPackageResult{
PackagePath: waitResp.PackagePath,
LocalBase: waitResp.LocalBase,
RemoteBase: waitResp.RemoteBase,
}, nil
}
drv := plans.Execute(exec.NewExecContext())
_, err = drv.Wait(context.Background())
if err != nil {
return err
}

func (svc *StorageService) DeleteStoragePackage(userID int64, packageID int64, storageID int64) error {
// TODO
panic("not implement yet")
// 失败也没关系
coorCli.StoragePackageLoaded(coormq.ReqStoragePackageLoaded(userID, storageID, packageID, rootPath, pinned))
return nil
}

// 请求节点启动从Storage中上传文件的任务。会返回节点ID和任务ID


+ 16
- 6
client/main.go View File

@@ -18,6 +18,8 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"
@@ -57,13 +59,13 @@ func main() {
consMap := make(map[cdssdk.HubID]connectivity.Connectivity)
for _, con := range getCons.Connectivities {
var delay *time.Duration
if con.Delay != nil {
d := time.Duration(*con.Delay * float32(time.Millisecond))
if con.Latency != nil {
d := time.Duration(*con.Latency * float32(time.Millisecond))
delay = &d
}
consMap[con.FromHubID] = connectivity.Connectivity{
ToHubID: con.ToHubID,
Delay: delay,
Latency: delay,
}
}
conCol = connectivity.NewCollectorWithInitData(&config.Cfg().Connectivity, nil, consMap)
@@ -75,6 +77,12 @@ func main() {
conCol.CollectInPlace()
}

metaCacheHost := metacache.NewHost()
go metaCacheHost.Serve()
stgMeta := metaCacheHost.AddStorageMeta()
hubMeta := metaCacheHost.AddHubMeta()
conMeta := metaCacheHost.AddConnectivity()

// 分布式锁
distlockSvc, err := distlock.NewService(&config.Cfg().DistLock)
if err != nil {
@@ -96,13 +104,15 @@ func main() {
// 任务管理器
taskMgr := task.NewManager(distlockSvc, &conCol, stgMgr)

strgSel := strategy.NewSelector(config.Cfg().DownloadStrategy, stgMeta, hubMeta, conMeta)

// 下载器
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr)
dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol, stgMgr, strgSel)

// 上传器
uploader := uploader.NewUploader(distlockSvc, &conCol, stgMgr)
uploader := uploader.NewUploader(distlockSvc, &conCol, stgMgr, stgMeta)

svc, err := services.NewService(distlockSvc, &taskMgr, &dlder, acStat, uploader)
svc, err := services.NewService(distlockSvc, &taskMgr, &dlder, acStat, uploader, strgSel, stgMeta)
if err != nil {
logger.Warnf("new services failed, err: %s", err.Error())
os.Exit(1)


+ 3
- 1
common/assets/confs/agent.config.json View File

@@ -39,7 +39,9 @@
},
"downloader": {
"maxStripCacheCount": 100,
"highLatencyHub": 35,
"ecStripPrefetchCount": 1
},
"downloadStrategy": {
"highLatencyHub": 35
}
}

+ 3
- 1
common/assets/confs/client.config.json View File

@@ -34,8 +34,10 @@
},
"downloader": {
"maxStripCacheCount": 100,
"highLatencyHub": 35,
"ecStripPrefetchCount": 1
},
"downloadStrategy": {
"highLatencyHub": 35
},
"storageID": 0
}

+ 13
- 24
common/pkgs/connectivity/collector.go View File

@@ -13,7 +13,7 @@ import (

type Connectivity struct {
ToHubID cdssdk.HubID
Delay *time.Duration
Latency *time.Duration
TestTime time.Time
}

@@ -52,17 +52,6 @@ func NewCollectorWithInitData(cfg *Config, onCollected func(collector *Collector
return rpt
}

func (r *Collector) Get(hubID cdssdk.HubID) *Connectivity {
r.lock.RLock()
defer r.lock.RUnlock()

con, ok := r.connectivities[hubID]
if ok {
return &con
}

return nil
}
func (r *Collector) GetAll() map[cdssdk.HubID]Connectivity {
r.lock.RLock()
defer r.lock.RUnlock()
@@ -101,8 +90,8 @@ func (r *Collector) serve() {

// 为了防止同时启动的节点会集中进行Ping,所以第一次上报间隔为0-TestInterval秒之间随机
startup := true
firstReportDelay := time.Duration(float64(r.cfg.TestInterval) * float64(time.Second) * rand.Float64())
ticker := time.NewTicker(firstReportDelay)
firstReportLatency := time.Duration(float64(r.cfg.TestInterval) * float64(time.Second) * rand.Float64())
ticker := time.NewTicker(firstReportLatency)

loop:
for {
@@ -150,7 +139,7 @@ func (r *Collector) testing() {
wg.Add(1)
go func() {
defer wg.Done()
cons[tmpIdx] = r.ping(tmpHub)
cons[tmpIdx] = r.ping(*tmpHub)
}()
}

@@ -190,7 +179,7 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {

return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}
@@ -200,7 +189,7 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {
log.Warnf("new agent %v:%v rpc client: %w", ip, port, err)
return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}
@@ -212,13 +201,13 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {
log.Warnf("pre ping: %v", err)
return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}

// 后几次ping计算延迟
var avgDelay time.Duration
var avgLatency time.Duration
for i := 0; i < 3; i++ {
start := time.Now()
err = agtCli.Ping()
@@ -226,22 +215,22 @@ func (r *Collector) ping(hub cdssdk.Hub) Connectivity {
log.Warnf("ping: %v", err)
return Connectivity{
ToHubID: hub.HubID,
Delay: nil,
Latency: nil,
TestTime: time.Now(),
}
}

delay := time.Since(start)
avgDelay += delay
latency := time.Since(start)
avgLatency += latency

// 每次ping之间间隔1秒
<-time.After(time.Second)
}
delay := avgDelay / 3
latency := avgLatency / 3

return Connectivity{
ToHubID: hub.HubID,
Delay: &delay,
Latency: &latency,
TestTime: time.Now(),
}
}

+ 1
- 22
common/pkgs/db2/bucket.go View File

@@ -113,26 +113,5 @@ func (db *BucketDB) Create(ctx SQLContext, userID cdssdk.UserID, bucketName stri
}

func (db *BucketDB) Delete(ctx SQLContext, bucketID cdssdk.BucketID) error {
if err := ctx.Exec("DELETE FROM UserBucket WHERE BucketID = ?", bucketID).Error; err != nil {
return fmt.Errorf("delete user bucket failed, err: %w", err)
}

if err := ctx.Exec("DELETE FROM Bucket WHERE BucketID = ?", bucketID).Error; err != nil {
return fmt.Errorf("delete bucket failed, err: %w", err)
}

var pkgIDs []cdssdk.PackageID
if err := ctx.Table("Package").Select("PackageID").Where("BucketID = ?", bucketID).Find(&pkgIDs).Error; err != nil {
return fmt.Errorf("query package failed, err: %w", err)
}

for _, pkgID := range pkgIDs {
if err := db.Package().SoftDelete(ctx, pkgID); err != nil {
return fmt.Errorf("set package selected failed, err: %w", err)
}

// 失败也没关系,会有定时任务再次尝试
db.Package().DeleteUnused(ctx, pkgID)
}
return nil
return ctx.Delete(&cdssdk.Bucket{}, "BucketID = ?", bucketID).Error
}

+ 0
- 18
common/pkgs/db2/model/model.go View File

@@ -67,24 +67,6 @@ func (Cache) TableName() string {
return "Cache"
}

const (
StoragePackageStateNormal = "Normal"
StoragePackageStateDeleted = "Deleted"
StoragePackageStateOutdated = "Outdated"
)

// Storage当前加载的Package
type StoragePackage struct {
StorageID cdssdk.StorageID `gorm:"column:StorageID; primaryKey; type:bigint" json:"storageID"`
PackageID cdssdk.PackageID `gorm:"column:PackageID; primaryKey; type:bigint" json:"packageID"`
UserID cdssdk.UserID `gorm:"column:UserID; primaryKey; type:bigint" json:"userID"`
State string `gorm:"column:State; type:varchar(255); not null" json:"state"`
}

func (StoragePackage) TableName() string {
return "StoragePackage"
}

type Location struct {
LocationID cdssdk.LocationID `gorm:"column:LocationID; primaryKey; type:bigint; autoIncrement" json:"locationID"`
Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"`


+ 20
- 25
common/pkgs/db2/package.go View File

@@ -57,7 +57,7 @@ func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([
return ret, err
}

func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) {
func (db *PackageDB) GetUserBucketPackages(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) ([]model.Package, error) {
var ret []model.Package
err := ctx.Table("UserBucket").
Select("Package.*").
@@ -67,6 +67,15 @@ func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID cdssdk.UserID, buc
return ret, err
}

func (db *PackageDB) GetBucketPackages(ctx SQLContext, bucketID cdssdk.BucketID) ([]model.Package, error) {
var ret []model.Package
err := ctx.Table("Package").
Select("Package.*").
Where("BucketID = ?", bucketID).
Find(&ret).Error
return ret, err
}

// IsAvailable 判断一个用户是否拥有指定对象
func (db *PackageDB) IsAvailable(ctx SQLContext, userID cdssdk.UserID, packageID cdssdk.PackageID) (bool, error) {
var pkgID cdssdk.PackageID
@@ -132,19 +141,15 @@ func (db *PackageDB) Create(ctx SQLContext, bucketID cdssdk.BucketID, name strin
return newPackage, nil
}

// SoftDelete 设置一个对象被删除,并将相关数据删除
func (db *PackageDB) SoftDelete(ctx SQLContext, packageID cdssdk.PackageID) error {
obj, err := db.GetByID(ctx, packageID)
if err != nil {
return fmt.Errorf("get package failed, err: %w", err)
}

if obj.State != cdssdk.PackageStateNormal {
return nil
}
func (*PackageDB) Delete(ctx SQLContext, packageID cdssdk.PackageID) error {
err := ctx.Delete(&model.Package{}, "PackageID = ?", packageID).Error
return err
}

if err := db.ChangeState(ctx, packageID, cdssdk.PackageStateDeleted); err != nil {
return fmt.Errorf("change package state failed, err: %w", err)
// 删除与Package相关的所有数据
func (db *PackageDB) DeleteComplete(ctx SQLContext, packageID cdssdk.PackageID) error {
if err := db.Package().Delete(ctx, packageID); err != nil {
return fmt.Errorf("delete package state: %w", err)
}

if err := db.ObjectAccessStat().DeleteInPackage(ctx, packageID); err != nil {
@@ -163,23 +168,13 @@ func (db *PackageDB) SoftDelete(ctx SQLContext, packageID cdssdk.PackageID) erro
return fmt.Errorf("deleting objects in package: %w", err)
}

if _, err := db.StoragePackage().SetAllPackageDeleted(ctx, packageID); err != nil {
return fmt.Errorf("set storage package deleted failed, err: %w", err)
if err := db.PackageAccessStat().DeleteByPackageID(ctx, packageID); err != nil {
return fmt.Errorf("deleting package access stat: %w", err)
}

return nil
}

// DeleteUnused 删除一个已经是Deleted状态,且不再被使用的对象
func (PackageDB) DeleteUnused(ctx SQLContext, packageID cdssdk.PackageID) error {
err := ctx.Exec("DELETE FROM Package WHERE PackageID = ? AND State = ? AND NOT EXISTS (SELECT StorageID FROM StoragePackage WHERE PackageID = ?)",
packageID,
cdssdk.PackageStateDeleted,
packageID,
).Error
return err
}

func (*PackageDB) ChangeState(ctx SQLContext, packageID cdssdk.PackageID, state string) error {
err := ctx.Exec("UPDATE Package SET State = ? WHERE PackageID = ?", state, packageID).Error
return err


+ 8
- 4
common/pkgs/db2/pinned_object.go View File

@@ -42,8 +42,10 @@ func (*PinnedObjectDB) BatchGetByObjectID(ctx SQLContext, objectIDs []cdssdk.Obj
}

func (*PinnedObjectDB) TryCreate(ctx SQLContext, stgID cdssdk.StorageID, objectID cdssdk.ObjectID, createTime time.Time) error {
err := ctx.Clauses(clause.Insert{Modifier: "ignore"}).Table("PinnedObject").Create(&cdssdk.PinnedObject{StorageID: stgID, ObjectID: objectID, CreateTime: createTime}).Error
return err
return ctx.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "ObjectID"}, {Name: "StorageID"}},
DoUpdates: clause.AssignmentColumns([]string{"CreateTime"}),
}).Create(&cdssdk.PinnedObject{StorageID: stgID, ObjectID: objectID, CreateTime: createTime}).Error
}

func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObject) error {
@@ -51,8 +53,10 @@ func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObj
return nil
}

err := ctx.Clauses(clause.Insert{Modifier: "ignore"}).Table("PinnedObject").Create(pinneds).Error
return err
return ctx.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "ObjectID"}, {Name: "StorageID"}},
DoUpdates: clause.AssignmentColumns([]string{"CreateTime"}),
}).Create(&pinneds).Error
}

func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID cdssdk.PackageID, stgID cdssdk.StorageID) error {


+ 0
- 83
common/pkgs/db2/storage_package.go View File

@@ -1,83 +0,0 @@
package db2

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StoragePackageDB struct {
*DB
}

func (db *DB) StoragePackage() *StoragePackageDB {
return &StoragePackageDB{DB: db}
}

func (*StoragePackageDB) Get(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) (model.StoragePackage, error) {
var ret model.StoragePackage
err := ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).First(&ret).Error
return ret, err
}

func (*StoragePackageDB) GetAllByStorageAndPackageID(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID) ([]model.StoragePackage, error) {
var ret []model.StoragePackage
err := ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ?", storageID, packageID).Find(&ret).Error
return ret, err
}

func (*StoragePackageDB) GetAllByStorageID(ctx SQLContext, storageID cdssdk.StorageID) ([]model.StoragePackage, error) {
var ret []model.StoragePackage
err := ctx.Table("StoragePackage").Where("StorageID = ?", storageID).Find(&ret).Error
return ret, err
}

func (*StoragePackageDB) CreateOrUpdate(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error {
sql := "INSERT INTO StoragePackage (StorageID, PackageID, UserID, State) VALUES (?, ?, ?, ?) " +
"ON DUPLICATE KEY UPDATE State = VALUES(State)"
return ctx.Exec(sql, storageID, packageID, userID, model.StoragePackageStateNormal).Error
}

func (*StoragePackageDB) ChangeState(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID, state string) error {
return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).Update("State", state).Error
}

// SetStateNormal 将状态设置为Normal,如果记录状态是Deleted,则不进行操作
func (*StoragePackageDB) SetStateNormal(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error {
return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ? AND State <> ?",
storageID, packageID, userID, model.StoragePackageStateDeleted).Update("State", model.StoragePackageStateNormal).Error
}

func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID cdssdk.PackageID, state string) (int64, error) {
ret := ctx.Table("StoragePackage").Where("PackageID = ?", packageID).Update("State", state)
if err := ret.Error; err != nil {
return 0, err
}
return ret.RowsAffected, nil
}

// SetAllPackageOutdated 将Storage中指定对象设置为已过期。只会设置Normal状态的对象
func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) {
ret := ctx.Table("StoragePackage").Where("State = ? AND PackageID = ?", model.StoragePackageStateNormal, packageID).Update("State", model.StoragePackageStateOutdated)
if err := ret.Error; err != nil {
return 0, err
}
return ret.RowsAffected, nil
}

func (db *StoragePackageDB) SetAllPackageDeleted(ctx SQLContext, packageID cdssdk.PackageID) (int64, error) {
return db.SetAllPackageState(ctx, packageID, model.StoragePackageStateDeleted)
}

func (*StoragePackageDB) Delete(ctx SQLContext, storageID cdssdk.StorageID, packageID cdssdk.PackageID, userID cdssdk.UserID) error {
return ctx.Table("StoragePackage").Where("StorageID = ? AND PackageID = ? AND UserID = ?", storageID, packageID, userID).Delete(&model.StoragePackage{}).Error
}

// FindPackageStorages 查询存储了指定对象的Storage
func (*StoragePackageDB) FindPackageStorages(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Storage, error) {
var ret []model.Storage
err := ctx.Table("StoragePackage").Select("Storage.*").
Joins("JOIN Storage ON StoragePackage.StorageID = Storage.StorageID").
Where("PackageID = ?", packageID).
Scan(&ret).Error
return ret, err
}

+ 7
- 3
common/pkgs/db2/user_bucket.go View File

@@ -13,10 +13,14 @@ func (db *DB) UserBucket() *UserBucketDB {
return &UserBucketDB{DB: db}
}

func (*UserBucketDB) Create(ctx SQLContext, userID int64, bucketID int64) error {
func (*UserBucketDB) Create(ctx SQLContext, userID cdssdk.UserID, bucketID cdssdk.BucketID) error {
userBucket := model.UserBucket{
UserID: cdssdk.UserID(userID),
BucketID: cdssdk.BucketID(bucketID),
UserID: userID,
BucketID: bucketID,
}
return ctx.Table("UserBucket").Create(&userBucket).Error
}

func (*UserBucketDB) DeleteByBucketID(ctx SQLContext, bucketID cdssdk.BucketID) error {
return ctx.Table("UserBucket").Where("BucketID = ?", bucketID).Delete(&model.UserBucket{}).Error
}

+ 0
- 24
common/pkgs/distlock/reqbuilder/metadata_storage_package.go View File

@@ -1,24 +0,0 @@
package reqbuilder

import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type MetadataStoragePackageLockReqBuilder struct {
*MetadataLockReqBuilder
}

func (b *MetadataLockReqBuilder) StoragePackage() *MetadataStoragePackageLockReqBuilder {
return &MetadataStoragePackageLockReqBuilder{MetadataLockReqBuilder: b}
}

func (b *MetadataStoragePackageLockReqBuilder) CreateOne(userID cdssdk.UserID, storageID cdssdk.StorageID, packageID cdssdk.PackageID) *MetadataStoragePackageLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath("StoragePackage"),
Name: lockprovider.MetadataCreateLock,
Target: *lockprovider.NewStringLockTarget().Add(userID, storageID, packageID),
})
return b
}

+ 2
- 3
common/pkgs/distlock/service.go View File

@@ -24,7 +24,7 @@ func initProviders() []distlock.PathProvider {

provs = append(provs, initMetadataLockProviders()...)

provs = append(provs, initIPFSLockProviders()...)
provs = append(provs, initShardLockProviders()...)

provs = append(provs, initStorageLockProviders()...)

@@ -45,12 +45,11 @@ func initMetadataLockProviders() []distlock.PathProvider {
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectRep"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "ObjectBlock"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Cache"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "StoragePackage"),
distlock.NewPathProvider(lockprovider.NewMetadataLock(), lockprovider.MetadataLockPathPrefix, "Location"),
}
}

func initIPFSLockProviders() []distlock.PathProvider {
func initShardLockProviders() []distlock.PathProvider {
return []distlock.PathProvider{
distlock.NewPathProvider(lockprovider.NewShardStoreLock(), lockprovider.ShardStoreLockPathPrefix, trie.WORD_ANY),
}


+ 0
- 2
common/pkgs/downloader/config.go View File

@@ -3,8 +3,6 @@ package downloader
type Config struct {
// EC模式的Object的条带缓存数量
MaxStripCacheCount int `json:"maxStripCacheCount"`
// 当到下载节点的延迟高于这个值时,该节点在评估时会有更高的分数惩罚,单位:ms
HighLatencyHubMs float64 `json:"highLatencyHubMs"`
// EC模式下,每个Object的条带的预取数量,最少为1
ECStripPrefetchCount int `json:"ecStripPrefetchCount"`
}

+ 12
- 9
common/pkgs/downloader/downloader.go View File

@@ -10,6 +10,7 @@ import (
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
)
@@ -38,23 +39,25 @@ type Downloading struct {
}

type Downloader struct {
strips *StripCache
cfg Config
conn *connectivity.Collector
stgMgr *svcmgr.Manager
strips *StripCache
cfg Config
conn *connectivity.Collector
stgMgr *svcmgr.Manager
selector *strategy.Selector
}

func NewDownloader(cfg Config, conn *connectivity.Collector, stgMgr *svcmgr.Manager) Downloader {
func NewDownloader(cfg Config, conn *connectivity.Collector, stgMgr *svcmgr.Manager, sel *strategy.Selector) Downloader {
if cfg.MaxStripCacheCount == 0 {
cfg.MaxStripCacheCount = DefaultMaxStripCacheCount
}

ch, _ := lru.New[ECStripKey, ObjectECStrip](cfg.MaxStripCacheCount)
return Downloader{
strips: ch,
cfg: cfg,
conn: conn,
stgMgr: stgMgr,
strips: ch,
cfg: cfg,
conn: conn,
stgMgr: stgMgr,
selector: sel,
}
}



+ 95
- 304
common/pkgs/downloader/iterator.go View File

@@ -4,28 +4,21 @@ import (
"context"
"fmt"
"io"
"math"
"reflect"
"time"

"github.com/samber/lo"

"gitlink.org.cn/cloudream/common/pkgs/bitmap"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/common/utils/sort2"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

type downloadStorageInfo struct {
@@ -39,15 +32,10 @@ type DownloadContext struct {
Distlock *distlock.Service
}
type DownloadObjectIterator struct {
OnClosing func()

OnClosing func()
downloader *Downloader
reqs []downloadReqeust2
currentIndex int
inited bool

coorCli *coormq.Client
allStorages map[cdssdk.StorageID]stgmod.StorageDetail
}

func NewDownloadObjectIterator(downloader *Downloader, downloadObjs []downloadReqeust2) *DownloadObjectIterator {
@@ -58,68 +46,11 @@ func NewDownloadObjectIterator(downloader *Downloader, downloadObjs []downloadRe
}

func (i *DownloadObjectIterator) MoveNext() (*Downloading, error) {
if !i.inited {
if err := i.init(); err != nil {
return nil, err
}

i.inited = true
}

if i.currentIndex >= len(i.reqs) {
return nil, iterator.ErrNoMoreItem
}

item, err := i.doMove()
i.currentIndex++
return item, err
}

func (i *DownloadObjectIterator) init() error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new coordinator client: %w", err)
}
i.coorCli = coorCli

allStgIDsMp := make(map[cdssdk.StorageID]bool)
for _, obj := range i.reqs {
if obj.Detail == nil {
continue
}

for _, p := range obj.Detail.PinnedAt {
allStgIDsMp[p] = true
}

for _, b := range obj.Detail.Blocks {
allStgIDsMp[b.StorageID] = true
}
}

stgIDs := lo.Keys(allStgIDsMp)
getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs))
if err != nil {
return fmt.Errorf("getting storage details: %w", err)
}

i.allStorages = make(map[cdssdk.StorageID]stgmod.StorageDetail)
for idx, s := range getStgs.Storages {
if s == nil {
return fmt.Errorf("storage %v not found", stgIDs[idx])
}
if s.Storage.ShardStore == nil {
return fmt.Errorf("storage %v has no shard store", stgIDs[idx])
}

i.allStorages[s.Storage.StorageID] = *s
}

return nil
}

func (iter *DownloadObjectIterator) doMove() (*Downloading, error) {
req := iter.reqs[iter.currentIndex]
req := i.reqs[i.currentIndex]
if req.Detail == nil {
return &Downloading{
Object: nil,
@@ -128,57 +59,51 @@ func (iter *DownloadObjectIterator) doMove() (*Downloading, error) {
}, nil
}

switch red := req.Detail.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
reader, err := iter.downloadNoneOrRepObject(req)
if err != nil {
return nil, fmt.Errorf("downloading object %v: %w", req.Raw.ObjectID, err)
}
destHub := cdssdk.HubID(0)
if stgglb.Local.HubID != nil {
destHub = *stgglb.Local.HubID
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil
strg, err := i.downloader.selector.Select(strategy.Request{
Detail: *req.Detail,
Range: math2.NewRange(req.Raw.Offset, req.Raw.Length),
DestHub: destHub,
DestLocation: stgglb.Local.LocationID,
})
if err != nil {
return nil, fmt.Errorf("selecting download strategy: %w", err)
}

case *cdssdk.RepRedundancy:
reader, err := iter.downloadNoneOrRepObject(req)
var reader io.ReadCloser
switch strg := strg.(type) {
case *strategy.DirectStrategy:
reader, err = i.downloadDirect(req, *strg)
if err != nil {
return nil, fmt.Errorf("downloading rep object %v: %w", req.Raw.ObjectID, err)
return nil, fmt.Errorf("downloading object %v: %w", req.Raw.ObjectID, err)
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil

case *cdssdk.ECRedundancy:
reader, err := iter.downloadECObject(req, red)
case *strategy.ECReconstructStrategy:
reader, err = i.downloadECReconstruct(req, *strg)
if err != nil {
return nil, fmt.Errorf("downloading ec object %v: %w", req.Raw.ObjectID, err)
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil

case *cdssdk.LRCRedundancy:
reader, err := iter.downloadLRCObject(req, red)
case *strategy.LRCReconstructStrategy:
reader, err = i.downloadLRCReconstruct(req, *strg)
if err != nil {
return nil, fmt.Errorf("downloading lrc object %v: %w", req.Raw.ObjectID, err)
}

return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil
default:
return nil, fmt.Errorf("unsupported strategy type: %v", reflect.TypeOf(strg))
}

return nil, fmt.Errorf("unsupported redundancy type: %v of object %v", reflect.TypeOf(req.Detail.Object.Redundancy), req.Raw.ObjectID)
i.currentIndex++
return &Downloading{
Object: &req.Detail.Object,
File: reader,
Request: req.Raw,
}, nil
}

func (i *DownloadObjectIterator) Close() {
@@ -187,227 +112,93 @@ func (i *DownloadObjectIterator) Close() {
}
}

func (iter *DownloadObjectIterator) downloadNoneOrRepObject(obj downloadReqeust2) (io.ReadCloser, error) {
allStgs, err := iter.sortDownloadStorages(obj)
if err != nil {
return nil, err
}
func (i *DownloadObjectIterator) downloadDirect(req downloadReqeust2, strg strategy.DirectStrategy) (io.ReadCloser, error) {
logger.Debugf("downloading object %v from storage %v", req.Raw.ObjectID, strg.Storage.Storage.String())

bsc, blocks := iter.getMinReadingBlockSolution(allStgs, 1)
osc, stg := iter.getMinReadingObjectSolution(allStgs, 1)
if bsc < osc {
logger.Debugf("downloading object %v from storage %v", obj.Raw.ObjectID, blocks[0].Storage.Storage.String())
return iter.downloadFromStorage(&blocks[0].Storage, obj)
}
var strHandle *exec.DriverReadStream
ft := ioswitch2.NewFromTo()

if osc == math.MaxFloat64 {
// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
return nil, fmt.Errorf("no storage has this object")
toExec, handle := ioswitch2.NewToDriver(ioswitch2.RawStream())
toExec.Range = exec.Range{
Offset: req.Raw.Offset,
}

logger.Debugf("downloading object %v from storage %v", obj.Raw.ObjectID, stg.Storage.String())
return iter.downloadFromStorage(stg, obj)
}

func (iter *DownloadObjectIterator) downloadECObject(req downloadReqeust2, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, error) {
allStorages, err := iter.sortDownloadStorages(req)
if err != nil {
return nil, err
if req.Raw.Length != -1 {
len := req.Raw.Length
toExec.Range.Length = &len
}

bsc, blocks := iter.getMinReadingBlockSolution(allStorages, ecRed.K)
osc, stg := iter.getMinReadingObjectSolution(allStorages, ecRed.K)

if bsc < osc {
var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from blocks: ", req.Raw.ObjectID)}
for i, b := range blocks {
if i > 0 {
logStrs = append(logStrs, ", ")
}
logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Block.Index, b.Storage.Storage.String()))
}
logger.Debug(logStrs...)

pr, pw := io.Pipe()
go func() {
readPos := req.Raw.Offset
totalReadLen := req.Detail.Object.Size - req.Raw.Offset
if req.Raw.Length >= 0 {
totalReadLen = math2.Min(req.Raw.Length, totalReadLen)
}

firstStripIndex := readPos / ecRed.StripSize()
stripIter := NewStripIterator(iter.downloader, req.Detail.Object, blocks, ecRed, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount)
defer stripIter.Close()

for totalReadLen > 0 {
strip, err := stripIter.MoveNext()
if err == iterator.ErrNoMoreItem {
pw.CloseWithError(io.ErrUnexpectedEOF)
return
}
if err != nil {
pw.CloseWithError(err)
return
}

readRelativePos := readPos - strip.Position
curReadLen := math2.Min(totalReadLen, ecRed.StripSize()-readRelativePos)

err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen])
if err != nil {
pw.CloseWithError(err)
return
}

totalReadLen -= curReadLen
readPos += curReadLen
}
pw.Close()
}()
ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *strg.Storage.MasterHub, strg.Storage.Storage, ioswitch2.RawStream())).AddTo(toExec)
strHandle = handle

return pr, nil
plans := exec.NewPlanBuilder()
if err := parser.Parse(ft, plans); err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}

// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
if osc == math.MaxFloat64 {
return nil, fmt.Errorf("no enough blocks to reconstruct the object %v , want %d, get only %d", req.Raw.ObjectID, ecRed.K, len(blocks))
}
exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, i.downloader.stgMgr)
exec := plans.Execute(exeCtx)
go exec.Wait(context.TODO())

logger.Debugf("downloading ec object %v from storage %v", req.Raw.ObjectID, stg.Storage.String())
return iter.downloadFromStorage(stg, req)
return exec.BeginRead(strHandle)
}

func (iter *DownloadObjectIterator) sortDownloadStorages(req downloadReqeust2) ([]*downloadStorageInfo, error) {
var stgIDs []cdssdk.StorageID
for _, id := range req.Detail.PinnedAt {
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range req.Detail.Blocks {
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
func (i *DownloadObjectIterator) downloadECReconstruct(req downloadReqeust2, strg strategy.ECReconstructStrategy) (io.ReadCloser, error) {
var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from: ", req.Raw.ObjectID)}
for i, b := range strg.Blocks {
if i > 0 {
logStrs = append(logStrs, ", ")
}

logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Storages[i].Storage.String()))
}
logger.Debug(logStrs...)

downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range req.Detail.PinnedAt {
storage, ok := downloadStorageMap[id]
if !ok {
mod := iter.allStorages[id]
storage = &downloadStorageInfo{
Storage: mod,
ObjectPinned: true,
Distance: iter.getStorageDistance(mod),
}
downloadStorageMap[id] = storage
downloadBlks := make([]downloadBlock, len(strg.Blocks))
for i, b := range strg.Blocks {
downloadBlks[i] = downloadBlock{
Block: b,
Storage: strg.Storages[i],
}

storage.ObjectPinned = true
}

for _, b := range req.Detail.Blocks {
storage, ok := downloadStorageMap[b.StorageID]
if !ok {
mod := iter.allStorages[b.StorageID]
storage = &downloadStorageInfo{
Storage: mod,
Distance: iter.getStorageDistance(mod),
}
downloadStorageMap[b.StorageID] = storage
pr, pw := io.Pipe()
go func() {
readPos := req.Raw.Offset
totalReadLen := req.Detail.Object.Size - req.Raw.Offset
if req.Raw.Length >= 0 {
totalReadLen = math2.Min(req.Raw.Length, totalReadLen)
}

storage.Blocks = append(storage.Blocks, b)
}

return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
}), nil
}
firstStripIndex := readPos / strg.Redundancy.StripSize()
stripIter := NewStripIterator(i.downloader, req.Detail.Object, downloadBlks, strg.Redundancy, firstStripIndex, i.downloader.strips, i.downloader.cfg.ECStripPrefetchCount)
defer stripIter.Close()

func (iter *DownloadObjectIterator) getMinReadingBlockSolution(sortedStgs []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
for _, n := range sortedStgs {
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
for totalReadLen > 0 {
strip, err := stripIter.MoveNext()
if err == iterator.ErrNoMoreItem {
pw.CloseWithError(io.ErrUnexpectedEOF)
return
}
if len(gotBlocks) >= k {
return dist, gotBlocks
if err != nil {
pw.CloseWithError(err)
return
}
}
}

return math.MaxFloat64, gotBlocks
}

func (iter *DownloadObjectIterator) getMinReadingObjectSolution(sortedStgs []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadStg *stgmod.StorageDetail
for _, n := range sortedStgs {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
stg := n.Storage
downloadStg = &stg
}
}
readRelativePos := readPos - strip.Position
curReadLen := math2.Min(totalReadLen, strg.Redundancy.StripSize()-readRelativePos)

return dist, downloadStg
}
err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen])
if err != nil {
pw.CloseWithError(err)
return
}

func (iter *DownloadObjectIterator) getStorageDistance(stg stgmod.StorageDetail) float64 {
if stgglb.Local.HubID != nil {
if stg.MasterHub.HubID == *stgglb.Local.HubID {
return consts.StorageDistanceSameStorage
totalReadLen -= curReadLen
readPos += curReadLen
}
}

if stg.MasterHub.LocationID == stgglb.Local.LocationID {
return consts.StorageDistanceSameLocation
}

c := iter.downloader.conn.Get(stg.MasterHub.HubID)
if c == nil || c.Delay == nil || *c.Delay > time.Duration(float64(time.Millisecond)*iter.downloader.cfg.HighLatencyHubMs) {
return consts.HubDistanceHighLatencyHub
}

return consts.StorageDistanceOther
}
pw.Close()
}()

func (iter *DownloadObjectIterator) downloadFromStorage(stg *stgmod.StorageDetail, req downloadReqeust2) (io.ReadCloser, error) {
var strHandle *exec.DriverReadStream
ft := ioswitch2.NewFromTo()

toExec, handle := ioswitch2.NewToDriver(ioswitch2.RawStream())
toExec.Range = exec.Range{
Offset: req.Raw.Offset,
}
if req.Raw.Length != -1 {
len := req.Raw.Length
toExec.Range.Length = &len
}

ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *stg.MasterHub, stg.Storage, ioswitch2.RawStream())).AddTo(toExec)
strHandle = handle

plans := exec.NewPlanBuilder()
if err := parser.Parse(ft, plans); err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}

exeCtx := exec.NewExecContext()
exec.SetValueByType(exeCtx, iter.downloader.stgMgr)
exec := plans.Execute(exeCtx)
go exec.Wait(context.TODO())

return exec.BeginRead(strHandle)
return pr, nil
}

+ 17
- 31
common/pkgs/downloader/lrc.go View File

@@ -6,44 +6,30 @@ import (

"gitlink.org.cn/cloudream/common/pkgs/iterator"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader/strategy"
)

func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red *cdssdk.LRCRedundancy) (io.ReadCloser, error) {
allStgs, err := iter.sortDownloadStorages(req)
if err != nil {
return nil, err
}

var blocks []downloadBlock
selectedBlkIdx := make(map[int]bool)
for _, stg := range allStgs {
for _, b := range stg.Blocks {
if b.Index >= red.M() || selectedBlkIdx[b.Index] {
continue
}
blocks = append(blocks, downloadBlock{
Storage: stg.Storage,
Block: b,
})
selectedBlkIdx[b.Index] = true
}
}
if len(blocks) < red.K {
return nil, fmt.Errorf("not enough blocks to download lrc object")
}

var logStrs []any = []any{"downloading lrc object from blocks: "}
for i, b := range blocks {
func (iter *DownloadObjectIterator) downloadLRCReconstruct(req downloadReqeust2, strg strategy.LRCReconstructStrategy) (io.ReadCloser, error) {
var logStrs []any = []any{fmt.Sprintf("downloading lrc object %v from: ", req.Raw.ObjectID)}
for i, b := range strg.Blocks {
if i > 0 {
logStrs = append(logStrs, ", ")
}
logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Block.Index, b.Storage.Storage.String()))

logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Index, strg.Storages[i].Storage.String()))
}
logger.Debug(logStrs...)

downloadBlks := make([]downloadBlock, len(strg.Blocks))
for i, b := range strg.Blocks {
downloadBlks[i] = downloadBlock{
Block: b,
Storage: strg.Storages[i],
}
}

pr, pw := io.Pipe()
go func() {
readPos := req.Raw.Offset
@@ -52,8 +38,8 @@ func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red
totalReadLen = math2.Min(req.Raw.Length, totalReadLen)
}

firstStripIndex := readPos / int64(red.K) / int64(red.ChunkSize)
stripIter := NewLRCStripIterator(iter.downloader, req.Detail.Object, blocks, red, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount)
firstStripIndex := readPos / int64(strg.Redundancy.K) / int64(strg.Redundancy.ChunkSize)
stripIter := NewLRCStripIterator(iter.downloader, req.Detail.Object, downloadBlks, strg.Redundancy, firstStripIndex, iter.downloader.strips, iter.downloader.cfg.ECStripPrefetchCount)
defer stripIter.Close()

for totalReadLen > 0 {
@@ -68,7 +54,7 @@ func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red
}

readRelativePos := readPos - strip.Position
nextStripPos := strip.Position + int64(red.K)*int64(red.ChunkSize)
nextStripPos := strip.Position + int64(strg.Redundancy.K)*int64(strg.Redundancy.ChunkSize)
curReadLen := math2.Min(totalReadLen, nextStripPos-readPos)

err = io2.WriteAll(pw, strip.Data[readRelativePos:readRelativePos+curReadLen])


+ 2
- 2
common/pkgs/downloader/lrc_strip_iterator.go View File

@@ -17,7 +17,7 @@ type LRCStripIterator struct {
downloder *Downloader
object cdssdk.Object
blocks []downloadBlock
red *cdssdk.LRCRedundancy
red cdssdk.LRCRedundancy
curStripIndex int64
cache *StripCache
dataChan chan dataChanEntry
@@ -26,7 +26,7 @@ type LRCStripIterator struct {
inited bool
}

func NewLRCStripIterator(downloder *Downloader, object cdssdk.Object, blocks []downloadBlock, red *cdssdk.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator {
func NewLRCStripIterator(downloder *Downloader, object cdssdk.Object, blocks []downloadBlock, red cdssdk.LRCRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *LRCStripIterator {
if maxPrefetch <= 0 {
maxPrefetch = 1
}


+ 6
- 0
common/pkgs/downloader/strategy/config.go View File

@@ -0,0 +1,6 @@
package strategy

type Config struct {
// 当到下载节点的延迟高于这个值时,该节点在评估时会有更高的分数惩罚,单位:ms
HighLatencyHubMs float64 `json:"highLatencyHubMs"`
}

+ 337
- 0
common/pkgs/downloader/strategy/selector.go View File

@@ -0,0 +1,337 @@
package strategy

import (
"fmt"
"math"
"reflect"
"time"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/bitmap"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/common/utils/sort2"
"gitlink.org.cn/cloudream/storage/common/consts"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
)

type Request struct {
Detail stgmod.ObjectDetail
Range math2.Range
DestHub cdssdk.HubID // 可以为0。此字段不为0时,DestLocation字段无意义。
DestLocation cdssdk.LocationID // 可以为0
}

type Strategy interface {
GetDetail() stgmod.ObjectDetail
}

// 直接下载完整对象
type DirectStrategy struct {
Detail stgmod.ObjectDetail
Storage stgmod.StorageDetail
}

func (s *DirectStrategy) GetDetail() stgmod.ObjectDetail {
return s.Detail
}

// 从指定对象重建对象
type ECReconstructStrategy struct {
Detail stgmod.ObjectDetail
Redundancy cdssdk.ECRedundancy
Blocks []stgmod.ObjectBlock
Storages []stgmod.StorageDetail
}

func (s *ECReconstructStrategy) GetDetail() stgmod.ObjectDetail {
return s.Detail
}

type LRCReconstructStrategy struct {
Detail stgmod.ObjectDetail
Redundancy cdssdk.LRCRedundancy
Blocks []stgmod.ObjectBlock
Storages []stgmod.StorageDetail
}

func (s *LRCReconstructStrategy) GetDetail() stgmod.ObjectDetail {
return s.Detail
}

type Selector struct {
cfg Config
storageMeta *metacache.StorageMeta
hubMeta *metacache.HubMeta
connectivity *metacache.Connectivity
}

func NewSelector(cfg Config, storageMeta *metacache.StorageMeta, hubMeta *metacache.HubMeta, connectivity *metacache.Connectivity) *Selector {
return &Selector{
cfg: cfg,
storageMeta: storageMeta,
hubMeta: hubMeta,
connectivity: connectivity,
}
}

func (s *Selector) Select(req Request) (Strategy, error) {
req2 := request2{
Detail: req.Detail,
Range: req.Range,
DestLocation: req.DestLocation,
}

if req.DestHub != 0 {
req2.DestHub = s.hubMeta.Get(req.DestHub)
}

switch red := req.Detail.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
return s.selectForNoneOrRep(req2)

case *cdssdk.RepRedundancy:
return s.selectForNoneOrRep(req2)

case *cdssdk.ECRedundancy:
return s.selectForEC(req2, *red)

case *cdssdk.LRCRedundancy:
return s.selectForLRC(req2, *red)
}

return nil, fmt.Errorf("unsupported redundancy type: %v of object %v", reflect.TypeOf(req.Detail.Object.Redundancy), req.Detail.Object.ObjectID)
}

type downloadStorageInfo struct {
Storage stgmod.StorageDetail
ObjectPinned bool
Blocks []stgmod.ObjectBlock
Distance float64
}

type downloadBlock struct {
Storage stgmod.StorageDetail
Block stgmod.ObjectBlock
}

type request2 struct {
Detail stgmod.ObjectDetail
Range math2.Range
DestHub *cdssdk.Hub
DestLocation cdssdk.LocationID
}

func (s *Selector) selectForNoneOrRep(req request2) (Strategy, error) {
sortedStgs := s.sortDownloadStorages(req)
if len(sortedStgs) == 0 {
return nil, fmt.Errorf("no storage available for download")
}

_, blks := s.getMinReadingBlockSolution(sortedStgs, 1)
if len(blks) == 0 {
return nil, fmt.Errorf("no block available for download")
}

return &DirectStrategy{
Detail: req.Detail,
Storage: sortedStgs[0].Storage,
}, nil
}

func (s *Selector) selectForEC(req request2, red cdssdk.ECRedundancy) (Strategy, error) {
sortedStgs := s.sortDownloadStorages(req)
if len(sortedStgs) == 0 {
return nil, fmt.Errorf("no storage available for download")
}

bsc, blocks := s.getMinReadingBlockSolution(sortedStgs, red.K)
osc, stg := s.getMinReadingObjectSolution(sortedStgs, red.K)

if bsc < osc {
bs := make([]stgmod.ObjectBlock, len(blocks))
ss := make([]stgmod.StorageDetail, len(blocks))
for i, b := range blocks {
bs[i] = b.Block
ss[i] = b.Storage
}

return &ECReconstructStrategy{
Detail: req.Detail,
Redundancy: red,
Blocks: bs,
Storages: ss,
}, nil
}

// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
if osc == math.MaxFloat64 {
return nil, fmt.Errorf("no enough blocks to reconstruct the object %v , want %d, get only %d", req.Detail.Object.ObjectID, red.K, len(blocks))
}

return &DirectStrategy{
Detail: req.Detail,
Storage: stg,
}, nil
}

func (s *Selector) selectForLRC(req request2, red cdssdk.LRCRedundancy) (Strategy, error) {
sortedStgs := s.sortDownloadStorages(req)
if len(sortedStgs) == 0 {
return nil, fmt.Errorf("no storage available for download")
}

var blocks []downloadBlock
selectedBlkIdx := make(map[int]bool)
for _, stg := range sortedStgs {
for _, b := range stg.Blocks {
if b.Index >= red.M() || selectedBlkIdx[b.Index] {
continue
}
blocks = append(blocks, downloadBlock{
Storage: stg.Storage,
Block: b,
})
selectedBlkIdx[b.Index] = true
}
}
if len(blocks) < red.K {
return nil, fmt.Errorf("not enough blocks to download lrc object")
}

bs := make([]stgmod.ObjectBlock, len(blocks))
ss := make([]stgmod.StorageDetail, len(blocks))
for i, b := range blocks {
bs[i] = b.Block
ss[i] = b.Storage
}

return &LRCReconstructStrategy{
Detail: req.Detail,
Redundancy: red,
Blocks: bs,
Storages: ss,
}, nil
}

func (s *Selector) sortDownloadStorages(req request2) []*downloadStorageInfo {
var stgIDs []cdssdk.StorageID
for _, id := range req.Detail.PinnedAt {
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range req.Detail.Blocks {
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
}
}

downloadStorageMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range req.Detail.PinnedAt {
storage, ok := downloadStorageMap[id]
if !ok {
mod := s.storageMeta.Get(id)
if mod == nil || mod.MasterHub == nil {
continue
}

storage = &downloadStorageInfo{
Storage: *mod,
ObjectPinned: true,
Distance: s.getStorageDistance(req, *mod),
}
downloadStorageMap[id] = storage
}

storage.ObjectPinned = true
}

for _, b := range req.Detail.Blocks {
storage, ok := downloadStorageMap[b.StorageID]
if !ok {
mod := s.storageMeta.Get(b.StorageID)
if mod == nil || mod.MasterHub == nil {
continue
}

storage = &downloadStorageInfo{
Storage: *mod,
Distance: s.getStorageDistance(req, *mod),
}
downloadStorageMap[b.StorageID] = storage
}

storage.Blocks = append(storage.Blocks, b)
}

return sort2.Sort(lo.Values(downloadStorageMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
})
}

func (s *Selector) getStorageDistance(req request2, src stgmod.StorageDetail) float64 {
if req.DestHub != nil {
if src.MasterHub.HubID == req.DestHub.HubID {
return consts.StorageDistanceSameStorage
}

if src.MasterHub.LocationID == req.DestHub.LocationID {
return consts.StorageDistanceSameLocation
}

latency := s.connectivity.Get(src.MasterHub.HubID, req.DestHub.HubID)
if latency == nil || *latency > time.Duration(float64(time.Millisecond)*s.cfg.HighLatencyHubMs) {
return consts.HubDistanceHighLatencyHub
}

return consts.StorageDistanceOther
}

if req.DestLocation != 0 {
if src.MasterHub.LocationID == req.DestLocation {
return consts.StorageDistanceSameLocation
}
}

return consts.StorageDistanceOther
}

func (s *Selector) getMinReadingBlockSolution(sortedStgs []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
for _, n := range sortedStgs {
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
}

if len(gotBlocks) >= k {
return dist, gotBlocks
}
}
}

return math.MaxFloat64, gotBlocks
}

func (s *Selector) getMinReadingObjectSolution(sortedStgs []*downloadStorageInfo, k int) (float64, stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadStg stgmod.StorageDetail
for _, n := range sortedStgs {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
stg := n.Storage
downloadStg = stg
}
}

return dist, downloadStg
}

+ 4
- 4
common/pkgs/downloader/strip_iterator.go View File

@@ -28,7 +28,7 @@ type StripIterator struct {
downloader *Downloader
object cdssdk.Object
blocks []downloadBlock
red *cdssdk.ECRedundancy
red cdssdk.ECRedundancy
curStripIndex int64
cache *StripCache
dataChan chan dataChanEntry
@@ -46,7 +46,7 @@ type dataChanEntry struct {
Error error
}

func NewStripIterator(downloader *Downloader, object cdssdk.Object, blocks []downloadBlock, red *cdssdk.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator {
func NewStripIterator(downloader *Downloader, object cdssdk.Object, blocks []downloadBlock, red cdssdk.ECRedundancy, beginStripIndex int64, cache *StripCache, maxPrefetch int) *StripIterator {
if maxPrefetch <= 0 {
maxPrefetch = 1
}
@@ -199,10 +199,10 @@ func (s *StripIterator) readStrip(stripIndex int64, buf []byte) (int, error) {
}

ft := ioswitch2.NewFromTo()
ft.ECParam = s.red
ft.ECParam = &s.red
for _, b := range s.blocks {
stg := b.Storage
ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *stg.MasterHub, stg.Storage, ioswitch2.ECSrteam(b.Block.Index)))
ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *stg.MasterHub, stg.Storage, ioswitch2.ECStream(b.Block.Index)))
}

toExec, hd := ioswitch2.NewToDriverWithRange(ioswitch2.RawStream(), exec.Range{


+ 8
- 12
common/pkgs/ioswitch2/fromto.go View File

@@ -38,7 +38,7 @@ func RawStream() StreamIndex {
}
}

func ECSrteam(index int) StreamIndex {
func ECStream(index int) StreamIndex {
return StreamIndex{
Type: StreamIndexEC,
Index: index,
@@ -195,20 +195,16 @@ func (t *ToShardStore) GetRange() exec.Range {
}

type LoadToShared struct {
Hub cdssdk.Hub
Storage cdssdk.Storage
UserID cdssdk.UserID
PackageID cdssdk.PackageID
Path string
Hub cdssdk.Hub
Storage cdssdk.Storage
ObjectPath string
}

func NewLoadToShared(hub cdssdk.Hub, storage cdssdk.Storage, userID cdssdk.UserID, packageID cdssdk.PackageID, path string) *LoadToShared {
func NewLoadToShared(hub cdssdk.Hub, storage cdssdk.Storage, objectPath string) *LoadToShared {
return &LoadToShared{
Hub: hub,
Storage: storage,
UserID: userID,
PackageID: packageID,
Path: path,
Hub: hub,
Storage: storage,
ObjectPath: objectPath,
}
}



+ 15
- 40
common/pkgs/ioswitch2/ops2/shared_store.go View File

@@ -16,12 +16,9 @@ func init() {
}

type SharedLoad struct {
Input exec.VarID `json:"input"`
StorageID cdssdk.StorageID `json:"storageID"`
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
Path string `json:"path"`
FullPathOutput exec.VarID `json:"fullPathOutput"`
Input exec.VarID
StorageID cdssdk.StorageID
ObjectPath string
}

func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
@@ -46,44 +43,29 @@ func (o *SharedLoad) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
}
defer input.Stream.Close()

fullPath, err := store.WritePackageObject(o.UserID, o.PackageID, o.Path, input.Stream)
if err != nil {
return fmt.Errorf("writing file to shard store: %w", err)
}

if o.FullPathOutput > 0 {
e.PutVar(o.FullPathOutput, &exec.StringValue{
Value: fullPath,
})
}
return nil
return store.Write(o.ObjectPath, input.Stream)
}

func (o *SharedLoad) String() string {
return fmt.Sprintf("SharedLoad %v -> %v:%v/%v/%v", o.Input, o.StorageID, o.UserID, o.PackageID, o.Path)
return fmt.Sprintf("SharedLoad %v -> %v:%v", o.Input, o.StorageID, o.ObjectPath)
}

type SharedLoadNode struct {
dag.NodeBase
To ioswitch2.To
StorageID cdssdk.StorageID
UserID cdssdk.UserID
PackageID cdssdk.PackageID
Path string
To ioswitch2.To
StorageID cdssdk.StorageID
ObjectPath string
}

func (b *GraphNodeBuilder) NewSharedLoad(to ioswitch2.To, stgID cdssdk.StorageID, userID cdssdk.UserID, packageID cdssdk.PackageID, path string) *SharedLoadNode {
func (b *GraphNodeBuilder) NewSharedLoad(to ioswitch2.To, stgID cdssdk.StorageID, objPath string) *SharedLoadNode {
node := &SharedLoadNode{
To: to,
StorageID: stgID,
UserID: userID,
PackageID: packageID,
Path: path,
To: to,
StorageID: stgID,
ObjectPath: objPath,
}
b.AddNode(node)

node.InputStreams().Init(1)
node.OutputValues().Init(node, 1)
return node
}

@@ -102,17 +84,10 @@ func (t *SharedLoadNode) Input() dag.StreamInputSlot {
}
}

func (t *SharedLoadNode) FullPathVar() *dag.ValueVar {
return t.OutputValues().Get(0)
}

func (t *SharedLoadNode) GenerateOp() (exec.Op, error) {
return &SharedLoad{
Input: t.InputStreams().Get(0).VarID,
StorageID: t.StorageID,
UserID: t.UserID,
PackageID: t.PackageID,
Path: t.Path,
FullPathOutput: t.OutputValues().Get(0).VarID,
Input: t.InputStreams().Get(0).VarID,
StorageID: t.StorageID,
ObjectPath: t.ObjectPath,
}, nil
}

+ 6
- 6
common/pkgs/ioswitch2/parser/parser.go View File

@@ -236,7 +236,7 @@ func extend(ctx *ParseContext) error {
for i := 0; i < ctx.Ft.ECParam.K; i++ {
ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
Stream: splitNode.SubStream(i),
StreamIndex: ioswitch2.ECSrteam(i),
StreamIndex: ioswitch2.ECStream(i),
})
}
}
@@ -276,14 +276,14 @@ func extend(ctx *ParseContext) error {
for i := 0; i < ctx.Ft.ECParam.N; i++ {
ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
Stream: mulNode.NewOutput(i),
StreamIndex: ioswitch2.ECSrteam(i),
StreamIndex: ioswitch2.ECStream(i),
})
}

joinNode := ctx.DAG.NewChunkedJoin(ctx.Ft.ECParam.ChunkSize)
for i := 0; i < ctx.Ft.ECParam.K; i++ {
// 不可能找不到流
joinNode.AddInput(findOutputStream(ctx, ioswitch2.ECSrteam(i)))
joinNode.AddInput(findOutputStream(ctx, ioswitch2.ECStream(i)))
}
ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
Stream: joinNode.Joined(),
@@ -317,14 +317,14 @@ func extend(ctx *ParseContext) error {
mulNode.AddInput(splitNode.SubStream(i), i)
ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
Stream: splitNode.SubStream(i),
StreamIndex: ioswitch2.ECSrteam(i),
StreamIndex: ioswitch2.ECStream(i),
})
}

for i := 0; i < ctx.Ft.ECParam.N; i++ {
ctx.IndexedStreams = append(ctx.IndexedStreams, IndexedStream{
Stream: mulNode.NewOutput(i),
StreamIndex: ioswitch2.ECSrteam(i),
StreamIndex: ioswitch2.ECStream(i),
})
}
}
@@ -471,7 +471,7 @@ func buildToNode(ctx *ParseContext, t ioswitch2.To) (ops2.ToNode, error) {
return n, nil

case *ioswitch2.LoadToShared:
n := ctx.DAG.NewSharedLoad(t, t.Storage.StorageID, t.UserID, t.PackageID, t.Path)
n := ctx.DAG.NewSharedLoad(t, t.Storage.StorageID, t.ObjectPath)

if err := setEnvByAddress(n, t.Hub, t.Hub.Address); err != nil {
return nil, err


+ 96
- 0
common/pkgs/metacache/connectivity.go View File

@@ -0,0 +1,96 @@
package metacache

import (
"sync"
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

func (m *MetaCacheHost) AddConnectivity() *Connectivity {
cache := &Connectivity{
entries: make(map[cdssdk.HubID]*ConnectivityEntry),
}

m.caches = append(m.caches, cache)
return cache
}

type Connectivity struct {
lock sync.RWMutex
entries map[cdssdk.HubID]*ConnectivityEntry
}

func (c *Connectivity) Get(from cdssdk.HubID, to cdssdk.HubID) *time.Duration {
for i := 0; i < 2; i++ {
c.lock.RLock()
entry, ok := c.entries[from]
if ok {
con, ok := entry.To[to]
if ok {
c.lock.RUnlock()

if con.Latency == nil {
return nil
}
l := time.Millisecond * time.Duration(*con.Latency)
return &l
}
}
c.lock.RUnlock()

c.load(from)
}

return nil
}

func (c *Connectivity) ClearOutdated() {
c.lock.Lock()
defer c.lock.Unlock()

for hubID, entry := range c.entries {
if time.Since(entry.UpdateTime) > time.Minute*5 {
delete(c.entries, hubID)
}
}
}

func (c *Connectivity) load(hubID cdssdk.HubID) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

get, err := coorCli.GetHubConnectivities(coormq.ReqGetHubConnectivities([]cdssdk.HubID{hubID}))
if err != nil {
logger.Warnf("get hub connectivities: %v", err)
return
}

c.lock.Lock()
defer c.lock.Unlock()

ce := &ConnectivityEntry{
From: hubID,
To: make(map[cdssdk.HubID]cdssdk.HubConnectivity),
UpdateTime: time.Now(),
}

for _, conn := range get.Connectivities {
ce.To[conn.ToHubID] = conn
}

c.entries[hubID] = ce
}

type ConnectivityEntry struct {
From cdssdk.HubID
To map[cdssdk.HubID]cdssdk.HubConnectivity
UpdateTime time.Time
}

+ 27
- 0
common/pkgs/metacache/host.go View File

@@ -0,0 +1,27 @@
package metacache

import "time"

type MetaCache interface {
ClearOutdated()
}

type MetaCacheHost struct {
caches []MetaCache
}

func NewHost() *MetaCacheHost {
return &MetaCacheHost{}
}

func (m *MetaCacheHost) Serve() {
ticker := time.NewTicker(time.Minute)
for {
select {
case <-ticker.C:
for _, cache := range m.caches {
cache.ClearOutdated()
}
}
}
}

+ 75
- 0
common/pkgs/metacache/hubmeta.go View File

@@ -0,0 +1,75 @@
package metacache

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

func (m *MetaCacheHost) AddHubMeta() *HubMeta {
meta := &HubMeta{}
meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cdssdk.HubID, cdssdk.Hub]{
Getter: meta.load,
Expire: time.Minute * 5,
})

m.caches = append(m.caches, meta)
return meta
}

type HubMeta struct {
cache *SimpleMetaCache[cdssdk.HubID, cdssdk.Hub]
}

func (h *HubMeta) Get(hubID cdssdk.HubID) *cdssdk.Hub {
v, ok := h.cache.Get(hubID)
if ok {
return &v
}
return nil
}

func (h *HubMeta) GetMany(hubIDs []cdssdk.HubID) []*cdssdk.Hub {
vs, oks := h.cache.GetMany(hubIDs)
ret := make([]*cdssdk.Hub, len(vs))
for i := range vs {
if oks[i] {
ret[i] = &vs[i]
}
}
return ret
}

func (h *HubMeta) ClearOutdated() {
h.cache.ClearOutdated()
}

func (h *HubMeta) load(keys []cdssdk.HubID) ([]cdssdk.Hub, []bool) {
vs := make([]cdssdk.Hub, len(keys))
oks := make([]bool, len(keys))

coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return vs, oks
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

get, err := coorCli.GetHubs(coormq.NewGetHubs(keys))
if err != nil {
logger.Warnf("get hubs: %v", err)
return vs, oks
}

for i := range keys {
if get.Hubs[i] != nil {
vs[i] = *get.Hubs[i]
oks[i] = true
}
}

return vs, oks
}

+ 121
- 0
common/pkgs/metacache/simple.go View File

@@ -0,0 +1,121 @@
package metacache

import (
"sync"
"time"
)

type SimpleMetaCacheConfig[K comparable, V any] struct {
Getter Getter[K, V]
Expire time.Duration
}

type Getter[K comparable, V any] func(keys []K) ([]V, []bool)

type SimpleMetaCache[K comparable, V any] struct {
lock sync.RWMutex
cache map[K]*CacheEntry[K, V]
cfg SimpleMetaCacheConfig[K, V]
}

func NewSimpleMetaCache[K comparable, V any](cfg SimpleMetaCacheConfig[K, V]) *SimpleMetaCache[K, V] {
return &SimpleMetaCache[K, V]{
cache: make(map[K]*CacheEntry[K, V]),
cfg: cfg,
}
}

func (mc *SimpleMetaCache[K, V]) Get(key K) (V, bool) {
var ret V
var ok bool

for i := 0; i < 2; i++ {
mc.lock.RLock()
entry, o := mc.cache[key]
if o {
ret = entry.Data
ok = true
}
mc.lock.RUnlock()

if o {
break
}

mc.load([]K{key})
}

return ret, ok
}

func (mc *SimpleMetaCache[K, V]) GetMany(keys []K) ([]V, []bool) {
result := make([]V, len(keys))
oks := make([]bool, len(keys))

for i := 0; i < 2; i++ {
allGet := true
mc.lock.RLock()
for i, key := range keys {
entry, ok := mc.cache[key]
if ok {
result[i] = entry.Data
oks[i] = true
} else {
allGet = false
}
}
mc.lock.RUnlock()

if allGet {
break
}

mc.load(keys)
}

return result, oks
}

func (mc *SimpleMetaCache[K, V]) load(keys []K) {
vs, getOks := mc.cfg.Getter(keys)

mc.lock.Lock()
defer mc.lock.Unlock()

for i, key := range keys {
if !getOks[i] {
continue
}

_, ok := mc.cache[key]
// 缓存中已有key则认为缓存中是最新的,不再更新
if ok {
continue
}

entry := &CacheEntry[K, V]{
Key: key,
Data: vs[i],
UpdateTime: time.Now(),
}
mc.cache[key] = entry
}
}

func (mc *SimpleMetaCache[K, V]) ClearOutdated() {
mc.lock.Lock()
defer mc.lock.Unlock()

for key, entry := range mc.cache {
dt := time.Since(entry.UpdateTime)
if dt > mc.cfg.Expire || dt < 0 {
delete(mc.cache, key)
}
}
}

type CacheEntry[K comparable, V any] struct {
Key K
Data V
UpdateTime time.Time
}

+ 76
- 0
common/pkgs/metacache/storagemeta.go View File

@@ -0,0 +1,76 @@
package metacache

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

func (m *MetaCacheHost) AddStorageMeta() *StorageMeta {
meta := &StorageMeta{}
meta.cache = NewSimpleMetaCache(SimpleMetaCacheConfig[cdssdk.StorageID, stgmod.StorageDetail]{
Getter: meta.load,
Expire: time.Minute * 5,
})

m.caches = append(m.caches, meta)
return meta
}

type StorageMeta struct {
cache *SimpleMetaCache[cdssdk.StorageID, stgmod.StorageDetail]
}

func (s *StorageMeta) Get(stgID cdssdk.StorageID) *stgmod.StorageDetail {
v, ok := s.cache.Get(stgID)
if ok {
return &v
}
return nil
}

func (s *StorageMeta) GetMany(stgIDs []cdssdk.StorageID) []*stgmod.StorageDetail {
vs, oks := s.cache.GetMany(stgIDs)
ret := make([]*stgmod.StorageDetail, len(vs))
for i := range vs {
if oks[i] {
ret[i] = &vs[i]
}
}
return ret
}

func (s *StorageMeta) ClearOutdated() {
s.cache.ClearOutdated()
}

func (s *StorageMeta) load(keys []cdssdk.StorageID) ([]stgmod.StorageDetail, []bool) {
vs := make([]stgmod.StorageDetail, len(keys))
oks := make([]bool, len(keys))

coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
logger.Warnf("new coordinator client: %v", err)
return vs, oks
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

get, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(keys))
if err != nil {
logger.Warnf("get storage details: %v", err)
return vs, oks
}

for i := range keys {
if get.Storages[i] != nil {
vs[i] = *get.Storages[i]
oks[i] = true
}
}

return vs, oks
}

+ 0
- 128
common/pkgs/mq/agent/storage.go View File

@@ -3,142 +3,14 @@ package agent
import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StorageService interface {
StartStorageLoadPackage(msg *StartStorageLoadPackage) (*StartStorageLoadPackageResp, *mq.CodeMessage)

WaitStorageLoadPackage(msg *WaitStorageLoadPackage) (*WaitStorageLoadPackageResp, *mq.CodeMessage)

StorageCheck(msg *StorageCheck) (*StorageCheckResp, *mq.CodeMessage)

StorageGC(msg *StorageGC) (*StorageGCResp, *mq.CodeMessage)

StartStorageCreatePackage(msg *StartStorageCreatePackage) (*StartStorageCreatePackageResp, *mq.CodeMessage)

WaitStorageCreatePackage(msg *WaitStorageCreatePackage) (*WaitStorageCreatePackageResp, *mq.CodeMessage)
}

// 启动调度Package的任务
var _ = Register(Service.StartStorageLoadPackage)

type StartStorageLoadPackage struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
StorageID cdssdk.StorageID `json:"storageID"`
}
type StartStorageLoadPackageResp struct {
mq.MessageBodyBase
TaskID string `json:"taskID"`
}

func NewStartStorageLoadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, storageID cdssdk.StorageID) *StartStorageLoadPackage {
return &StartStorageLoadPackage{
UserID: userID,
PackageID: packageID,
StorageID: storageID,
}
}
func NewStartStorageLoadPackageResp(taskID string) *StartStorageLoadPackageResp {
return &StartStorageLoadPackageResp{
TaskID: taskID,
}
}
func (client *Client) StartStorageLoadPackage(msg *StartStorageLoadPackage, opts ...mq.RequestOption) (*StartStorageLoadPackageResp, error) {
return mq.Request(Service.StartStorageLoadPackage, client.rabbitCli, msg, opts...)
}

// 等待调度Package的任务
var _ = Register(Service.WaitStorageLoadPackage)

type WaitStorageLoadPackage struct {
mq.MessageBodyBase
TaskID string `json:"taskID"`
WaitTimeoutMs int64 `json:"waitTimeout"`
}
type WaitStorageLoadPackageResp struct {
mq.MessageBodyBase
IsComplete bool `json:"isComplete"`
Error string `json:"error"`
PackagePath string `json:"packagePath"` // 加载后的Package的路径,相对于数据库中配置的Directory
LocalBase string `json:"localBase"` // 存储服务本地的目录,LocalBase + PackagePath = Package在代理节点上的完整路径
RemoteBase string `json:"remoteBase"` // 存储服务远程的目录,RemoteBase + PackagePath = Package在存储服务中的完整路径
}

func NewWaitStorageLoadPackage(taskID string, waitTimeoutMs int64) *WaitStorageLoadPackage {
return &WaitStorageLoadPackage{
TaskID: taskID,
WaitTimeoutMs: waitTimeoutMs,
}
}
func NewWaitStorageLoadPackageResp(isComplete bool, err string, packagePath string, localBase string, remoteBase string) *WaitStorageLoadPackageResp {
return &WaitStorageLoadPackageResp{
IsComplete: isComplete,
Error: err,
PackagePath: packagePath,
LocalBase: localBase,
RemoteBase: remoteBase,
}
}
func (client *Client) WaitStorageLoadPackage(msg *WaitStorageLoadPackage, opts ...mq.RequestOption) (*WaitStorageLoadPackageResp, error) {
return mq.Request(Service.WaitStorageLoadPackage, client.rabbitCli, msg, opts...)
}

// 检查Storage
var _ = Register(Service.StorageCheck)

type StorageCheck struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
}
type StorageCheckResp struct {
mq.MessageBodyBase
Packages []stgmod.LoadedPackageID `json:"packages"`
}

func NewStorageCheck(storageID cdssdk.StorageID) *StorageCheck {
return &StorageCheck{
StorageID: storageID,
}
}
func NewStorageCheckResp(packages []stgmod.LoadedPackageID) *StorageCheckResp {
return &StorageCheckResp{
Packages: packages,
}
}
func (client *Client) StorageCheck(msg *StorageCheck, opts ...mq.RequestOption) (*StorageCheckResp, error) {
return mq.Request(Service.StorageCheck, client.rabbitCli, msg, opts...)
}

// 清理Cache中不用的文件
var _ = Register(Service.StorageGC)

type StorageGC struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
Packages []model.StoragePackage `json:"packages"`
}
type StorageGCResp struct {
mq.MessageBodyBase
}

func ReqStorageGC(storageID cdssdk.StorageID, packages []model.StoragePackage) *StorageGC {
return &StorageGC{
StorageID: storageID,
Packages: packages,
}
}
func RespStorageGC() *StorageGCResp {
return &StorageGCResp{}
}
func (client *Client) StorageGC(msg *StorageGC, opts ...mq.RequestOption) (*StorageGCResp, error) {
return mq.Request(Service.StorageGC, client.rabbitCli, msg, opts...)
}

// 启动从Storage上传Package的任务
var _ = Register(Service.StartStorageCreatePackage)



+ 3
- 3
common/pkgs/mq/coordinator/hub.go View File

@@ -80,7 +80,7 @@ type GetHubs struct {
}
type GetHubsResp struct {
mq.MessageBodyBase
Hubs []cdssdk.Hub `json:"hubs"`
Hubs []*cdssdk.Hub `json:"hubs"`
}

func NewGetHubs(hubIDs []cdssdk.HubID) *GetHubs {
@@ -88,7 +88,7 @@ func NewGetHubs(hubIDs []cdssdk.HubID) *GetHubs {
HubIDs: hubIDs,
}
}
func NewGetHubsResp(hubs []cdssdk.Hub) *GetHubsResp {
func NewGetHubsResp(hubs []*cdssdk.Hub) *GetHubsResp {
return &GetHubsResp{
Hubs: hubs,
}
@@ -96,7 +96,7 @@ func NewGetHubsResp(hubs []cdssdk.Hub) *GetHubsResp {
func (r *GetHubsResp) GetHub(id cdssdk.HubID) *cdssdk.Hub {
for _, n := range r.Hubs {
if n.HubID == id {
return &n
return n
}
}



+ 0
- 33
common/pkgs/mq/coordinator/package.go View File

@@ -23,8 +23,6 @@ type PackageService interface {
ClonePackage(msg *ClonePackage) (*ClonePackageResp, *mq.CodeMessage)

GetPackageCachedStorages(msg *GetPackageCachedStorages) (*GetPackageCachedStoragesResp, *mq.CodeMessage)

GetPackageLoadedStorages(msg *GetPackageLoadedStorages) (*GetPackageLoadedStoragesResp, *mq.CodeMessage)
}

// 获取Package基本信息
@@ -260,34 +258,3 @@ func ReqGetPackageCachedStoragesResp(stgInfos []cdssdk.StoragePackageCachingInfo
func (client *Client) GetPackageCachedStorages(msg *GetPackageCachedStorages) (*GetPackageCachedStoragesResp, error) {
return mq.Request(Service.GetPackageCachedStorages, client.rabbitCli, msg)
}

// 根据PackageID获取storage分布情况
var _ = Register(Service.GetPackageLoadedStorages)

type GetPackageLoadedStorages struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
}

type GetPackageLoadedStoragesResp struct {
mq.MessageBodyBase
StorageIDs []cdssdk.StorageID `json:"storageIDs"`
}

func ReqGetPackageLoadedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageLoadedStorages {
return &GetPackageLoadedStorages{
UserID: userID,
PackageID: packageID,
}
}

func NewGetPackageLoadedStoragesResp(stgIDs []cdssdk.StorageID) *GetPackageLoadedStoragesResp {
return &GetPackageLoadedStoragesResp{
StorageIDs: stgIDs,
}
}

func (client *Client) GetPackageLoadedStorages(msg *GetPackageLoadedStorages) (*GetPackageLoadedStoragesResp, error) {
return mq.Request(Service.GetPackageLoadedStorages, client.rabbitCli, msg)
}

+ 12
- 10
common/pkgs/mq/coordinator/storage.go View File

@@ -144,24 +144,26 @@ var _ = Register(Service.StoragePackageLoaded)

type StoragePackageLoaded struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
StorageID cdssdk.StorageID `json:"storageID"`
PackageID cdssdk.PackageID `json:"packageID"`
PinnedBlocks []stgmod.ObjectBlock `json:"pinnedBlocks"`
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
StorageID cdssdk.StorageID `json:"storageID"`
RootPath string `json:"rootPath"`
PinnedObjects []cdssdk.ObjectID `json:"pinnedObjects"`
}
type StoragePackageLoadedResp struct {
mq.MessageBodyBase
}

func NewStoragePackageLoaded(userID cdssdk.UserID, stgID cdssdk.StorageID, packageID cdssdk.PackageID, pinnedBlocks []stgmod.ObjectBlock) *StoragePackageLoaded {
func ReqStoragePackageLoaded(userID cdssdk.UserID, stgID cdssdk.StorageID, packageID cdssdk.PackageID, rootPath string, pinnedObjects []cdssdk.ObjectID) *StoragePackageLoaded {
return &StoragePackageLoaded{
UserID: userID,
PackageID: packageID,
StorageID: stgID,
PinnedBlocks: pinnedBlocks,
UserID: userID,
PackageID: packageID,
StorageID: stgID,
RootPath: rootPath,
PinnedObjects: pinnedObjects,
}
}
func NewStoragePackageLoadedResp() *StoragePackageLoadedResp {
func RespStoragePackageLoaded() *StoragePackageLoadedResp {
return &StoragePackageLoadedResp{}
}
func (client *Client) StoragePackageLoaded(msg *StoragePackageLoaded) (*StoragePackageLoadedResp, error) {


+ 4
- 169
common/pkgs/storage/local/shared_store.go View File

@@ -1,25 +1,18 @@
package local

import (
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strconv"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils"
)

type SharedStore struct {
svc *Service
cfg cdssdk.LocalSharedStorage
// lock sync.Mutex
}

func NewSharedStore(svc *Service, cfg cdssdk.LocalSharedStorage) (*SharedStore, error) {
@@ -37,185 +30,27 @@ func (s *SharedStore) Stop() {
s.getLogger().Infof("component stop")
}

func (s *SharedStore) WritePackageObject(userID cdssdk.UserID, pkgID cdssdk.PackageID, path string, stream io.Reader) (string, error) {
relaPath := filepath.Join(utils.MakeLoadedPackagePath(userID, pkgID), path)
fullPath := filepath.Join(s.cfg.LoadBase, relaPath)
func (s *SharedStore) Write(objPath string, stream io.Reader) error {
fullPath := filepath.Join(s.cfg.LoadBase, objPath)
err := os.MkdirAll(filepath.Dir(fullPath), 0755)
if err != nil {
return "", err
return err
}

f, err := os.Create(fullPath)
if err != nil {
return "", err
return err
}
defer f.Close()

_, err = io.Copy(f, stream)
if err != nil {
return "", err
}

return filepath.ToSlash(relaPath), nil
}

func (s *SharedStore) ListLoadedPackages() ([]stgmod.LoadedPackageID, error) {
entries, err := os.ReadDir(s.cfg.LoadBase)
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
s.getLogger().Warnf("list package directory: %v", err)
return nil, err
}

var loadeds []stgmod.LoadedPackageID
for _, e := range entries {
if !e.IsDir() {
continue
}

uid, err := strconv.ParseInt(e.Name(), 10, 64)
if err != nil {
continue
}

userID := cdssdk.UserID(uid)
pkgs, err := s.listUserPackages(userID, fmt.Sprintf("%v", userID))
if err != nil {
continue
}

loadeds = append(loadeds, pkgs...)
}

return loadeds, nil
}

func (s *SharedStore) listUserPackages(userID cdssdk.UserID, userIDStr string) ([]stgmod.LoadedPackageID, error) {
userDir := filepath.Join(s.cfg.LoadBase, userIDStr)
entries, err := os.ReadDir(userDir)
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
s.getLogger().Warnf("list package directory: %v", err)
return nil, err
}

var pkgs []stgmod.LoadedPackageID
for _, e := range entries {
if !e.IsDir() {
continue
}

pkgID, err := strconv.ParseInt(e.Name(), 10, 64)
if err != nil {
continue
}

pkgs = append(pkgs, stgmod.LoadedPackageID{
UserID: userID,
PackageID: cdssdk.PackageID(pkgID),
})
}

return pkgs, nil
}

func (s *SharedStore) PackageGC(avaiables []stgmod.LoadedPackageID) error {
log := s.getLogger()

entries, err := os.ReadDir(s.cfg.LoadBase)
if err != nil {
log.Warnf("list storage directory: %s", err.Error())
return err
}

// userID->pkgID->pkg
userPkgs := make(map[string]map[string]bool)
for _, pkg := range avaiables {
userIDStr := fmt.Sprintf("%v", pkg.UserID)

pkgs, ok := userPkgs[userIDStr]
if !ok {
pkgs = make(map[string]bool)
userPkgs[userIDStr] = pkgs
}

pkgIDStr := fmt.Sprintf("%v", pkg.PackageID)
pkgs[pkgIDStr] = true
}

userDirs := lo.Filter(entries, func(info fs.DirEntry, index int) bool { return info.IsDir() })
for _, dir := range userDirs {
pkgMap, ok := userPkgs[dir.Name()]
// 第一级目录名是UserID,先删除UserID在StoragePackage表里没出现过的文件夹
if !ok {
rmPath := filepath.Join(s.cfg.LoadBase, dir.Name())
err := os.RemoveAll(rmPath)
if err != nil {
log.Warnf("removing user dir %s: %s", rmPath, err.Error())
} else {
log.Debugf("user dir %s removed by gc", rmPath)
}
continue
}

pkgDir := filepath.Join(s.cfg.LoadBase, dir.Name())
// 遍历每个UserID目录的packages目录里的内容
pkgs, err := os.ReadDir(pkgDir)
if err != nil {
log.Warnf("reading package dir %s: %s", pkgDir, err.Error())
continue
}

for _, pkg := range pkgs {
if !pkgMap[pkg.Name()] {
rmPath := filepath.Join(pkgDir, pkg.Name())
err := os.RemoveAll(rmPath)
if err != nil {
log.Warnf("removing package dir %s: %s", rmPath, err.Error())
} else {
log.Debugf("package dir %s removed by gc", rmPath)
}
}
}
}

return nil
}

func (s *SharedStore) getLogger() logger.Logger {
return logger.WithField("SharedStore", "Local").WithField("Storage", s.svc.Detail.Storage.String())
}

type PackageWriter struct {
pkgRoot string
fullDirPath string
}

func (w *PackageWriter) Root() string {
return w.pkgRoot
}

func (w *PackageWriter) Write(path string, stream io.Reader) (string, error) {
fullFilePath := filepath.Join(w.fullDirPath, path)
err := os.MkdirAll(filepath.Dir(fullFilePath), 0755)
if err != nil {
return "", err
}

f, err := os.Create(fullFilePath)
if err != nil {
return "", err
}
defer f.Close()

_, err = io.Copy(f, stream)
if err != nil {
return "", err
}

return filepath.ToSlash(filepath.Join(w.pkgRoot, path)), nil
}

+ 2
- 9
common/pkgs/storage/types/shared_store.go View File

@@ -2,18 +2,11 @@ package types

import (
"io"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
)

type SharedStore interface {
Start(ch *StorageEventChan)
Stop()
// 写入一个文件到Package的调度目录下,返回值为文件路径:userID/pkgID/path
WritePackageObject(userID cdssdk.UserID, pkgID cdssdk.PackageID, path string, stream io.Reader) (string, error)
// 获取所有已加载的Package信息
ListLoadedPackages() ([]stgmod.LoadedPackageID, error)
// 垃圾回收,删除过期的Package
PackageGC(avaiables []stgmod.LoadedPackageID) error

Write(objectPath string, stream io.Reader) error
}

+ 11
- 16
common/pkgs/uploader/create_load.go View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
"path"
"sync"
"time"

@@ -16,13 +17,13 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/utils"
)

type CreateLoadUploader struct {
pkg cdssdk.Package
userID cdssdk.UserID
targetStgs []stgmod.StorageDetail
loadRoots []string
uploader *Uploader
distlock *distlock.Mutex
successes []coormq.AddObjectEntry
@@ -31,21 +32,20 @@ type CreateLoadUploader struct {
}

type CreateLoadResult struct {
Package cdssdk.Package
Objects map[string]cdssdk.Object
LoadedDirs []string
Package cdssdk.Package
Objects map[string]cdssdk.Object
}

func (u *CreateLoadUploader) Upload(path string, size int64, stream io.Reader) error {
func (u *CreateLoadUploader) Upload(pa string, size int64, stream io.Reader) error {
uploadTime := time.Now()
stgIDs := make([]cdssdk.StorageID, 0, len(u.targetStgs))

ft := ioswitch2.FromTo{}
fromExec, hd := ioswitch2.NewFromDriver(ioswitch2.RawStream())
ft.AddFrom(fromExec)
for _, stg := range u.targetStgs {
for i, stg := range u.targetStgs {
ft.AddTo(ioswitch2.NewToShardStore(*stg.MasterHub, stg, ioswitch2.RawStream(), "fileHash"))
ft.AddTo(ioswitch2.NewLoadToShared(*stg.MasterHub, stg.Storage, u.userID, u.pkg.PackageID, path))
ft.AddTo(ioswitch2.NewLoadToShared(*stg.MasterHub, stg.Storage, path.Join(u.loadRoots[i], pa)))
stgIDs = append(stgIDs, stg.Storage.StorageID)
}

@@ -70,7 +70,7 @@ func (u *CreateLoadUploader) Upload(path string, size int64, stream io.Reader) e
// 记录上传结果
fileHash := ret["fileHash"].(*ops2.FileHashValue).Hash
u.successes = append(u.successes, coormq.AddObjectEntry{
Path: path,
Path: pa,
Size: size,
FileHash: fileHash,
UploadTime: uploadTime,
@@ -110,14 +110,9 @@ func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) {
ret.Objects[entry.Path] = entry
}

for _, stg := range u.targetStgs {
_, err := coorCli.StoragePackageLoaded(coormq.NewStoragePackageLoaded(u.userID, stg.Storage.StorageID, u.pkg.PackageID, nil))
if err != nil {
return CreateLoadResult{}, fmt.Errorf("notifying storage package loaded: %w", err)
}

// TODO 考虑让SharedStore来生成Load目录路径
ret.LoadedDirs = append(ret.LoadedDirs, utils.MakeLoadedPackagePath(u.userID, u.pkg.PackageID))
for i, stg := range u.targetStgs {
// 不关注是否成功
coorCli.StoragePackageLoaded(coormq.ReqStoragePackageLoaded(u.userID, stg.Storage.StorageID, u.pkg.PackageID, u.loadRoots[i], nil))
}

return ret, nil


+ 10
- 10
common/pkgs/uploader/uploader.go View File

@@ -14,6 +14,7 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/svcmgr"
)
@@ -22,13 +23,15 @@ type Uploader struct {
distlock *distlock.Service
connectivity *connectivity.Collector
stgMgr *svcmgr.Manager
stgMeta *metacache.StorageMeta
}

func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collector, stgMgr *svcmgr.Manager) *Uploader {
func NewUploader(distlock *distlock.Service, connectivity *connectivity.Collector, stgMgr *svcmgr.Manager, stgMeta *metacache.StorageMeta) *Uploader {
return &Uploader{
distlock: distlock,
connectivity: connectivity,
stgMgr: stgMgr,
stgMeta: stgMeta,
}
}

@@ -54,8 +57,8 @@ func (u *Uploader) BeginUpdate(userID cdssdk.UserID, pkgID cdssdk.PackageID, aff
delay := time.Duration(math.MaxInt64)

con, ok := cons[stg.MasterHub.HubID]
if ok && con.Delay != nil {
delay = *con.Delay
if ok && con.Latency != nil {
delay = *con.Latency
}

userStgs = append(userStgs, UploadStorageInfo{
@@ -110,20 +113,17 @@ func (w *Uploader) chooseUploadStorage(storages []UploadStorageInfo, stgAffinity
return storages[0]
}

func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID, pkgName string, loadTo []cdssdk.StorageID) (*CreateLoadUploader, error) {
func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID, pkgName string, loadTo []cdssdk.StorageID, loadToPath []string) (*CreateLoadUploader, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(loadTo))
if err != nil {
return nil, fmt.Errorf("getting storages: %w", err)
}
getStgs := u.stgMeta.GetMany(loadTo)

targetStgs := make([]stgmod.StorageDetail, len(loadTo))
for i, stg := range getStgs.Storages {
for i, stg := range getStgs {
if stg == nil {
return nil, fmt.Errorf("storage %v not found", loadTo[i])
}
@@ -139,7 +139,6 @@ func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID,
for _, stg := range targetStgs {
reqBld.Shard().Buzy(stg.Storage.StorageID)
reqBld.Storage().Buzy(stg.Storage.StorageID)
reqBld.Metadata().StoragePackage().CreateOne(userID, stg.Storage.StorageID, createPkg.Package.PackageID)
}
lock, err := reqBld.MutexLock(u.distlock)
if err != nil {
@@ -150,6 +149,7 @@ func (u *Uploader) BeginCreateLoad(userID cdssdk.UserID, bktID cdssdk.BucketID,
pkg: createPkg.Package,
userID: userID,
targetStgs: targetStgs,
loadRoots: loadToPath,
uploader: u,
distlock: lock,
}, nil


+ 0
- 1
coordinator/internal/cmd/migrate.go View File

@@ -52,7 +52,6 @@ func migrate(configPath string) {
migrateOne(db, stgmod.PackageAccessStat{})
migrateOne(db, cdssdk.Package{})
migrateOne(db, cdssdk.PinnedObject{})
migrateOne(db, model.StoragePackage{})
migrateOne(db, cdssdk.Storage{})
migrateOne(db, model.UserStorage{})
migrateOne(db, model.UserBucket{})


+ 18
- 2
coordinator/internal/mq/bucket.go View File

@@ -50,7 +50,7 @@ func (svc *Service) GetUserBuckets(msg *coormq.GetUserBuckets) (*coormq.GetUserB
}

func (svc *Service) GetBucketPackages(msg *coormq.GetBucketPackages) (*coormq.GetBucketPackagesResp, *mq.CodeMessage) {
packages, err := svc.db2.Package().GetBucketPackages(svc.db2.DefCtx(), msg.UserID, msg.BucketID)
packages, err := svc.db2.Package().GetUserBucketPackages(svc.db2.DefCtx(), msg.UserID, msg.BucketID)

if err != nil {
logger.WithField("UserID", msg.UserID).
@@ -103,7 +103,23 @@ func (svc *Service) DeleteBucket(msg *coormq.DeleteBucket) (*coormq.DeleteBucket
return fmt.Errorf("bucket is not avaiable to the user")
}

err := svc.db2.Bucket().Delete(tx, msg.BucketID)
if err := svc.db2.UserBucket().DeleteByBucketID(tx, msg.BucketID); err != nil {
return fmt.Errorf("deleting user bucket: %w", err)
}

pkgs, err := svc.db2.Package().GetBucketPackages(tx, msg.BucketID)
if err != nil {
return fmt.Errorf("getting bucket packages: %w", err)
}

for _, pkg := range pkgs {
err := svc.db2.Package().DeleteComplete(tx, pkg.PackageID)
if err != nil {
return fmt.Errorf("deleting package %v: %w", pkg.PackageID, err)
}
}

err = svc.db2.Bucket().Delete(tx, msg.BucketID)
if err != nil {
return fmt.Errorf("deleting bucket: %w", err)
}


+ 22
- 10
coordinator/internal/mq/hub.go View File

@@ -57,27 +57,39 @@ func (svc *Service) GetUserHubs(msg *coormq.GetUserHubs) (*coormq.GetUserHubsRes
}

func (svc *Service) GetHubs(msg *coormq.GetHubs) (*coormq.GetHubsResp, *mq.CodeMessage) {
var hubs []cdssdk.Hub
var hubs []*cdssdk.Hub

if msg.HubIDs == nil {
var err error
hubs, err = svc.db2.Hub().GetAllHubs(svc.db2.DefCtx())
get, err := svc.db2.Hub().GetAllHubs(svc.db2.DefCtx())
if err != nil {
logger.Warnf("getting all hubs: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "get all hub failed")
}
for _, hub := range get {
h := hub
hubs = append(hubs, &h)
}

} else {
// 可以不用事务
get, err := svc.db2.Hub().BatchGetByID(svc.db2.DefCtx(), msg.HubIDs)
if err != nil {
logger.Warnf("batch get hubs by id: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("batch get hubs by id: %v", err))
}

getMp := make(map[cdssdk.HubID]cdssdk.Hub)
for _, hub := range get {
getMp[hub.HubID] = hub
}

for _, id := range msg.HubIDs {
hub, err := svc.db2.Hub().GetByID(svc.db2.DefCtx(), id)
if err != nil {
logger.WithField("HubID", id).
Warnf("query hub failed, err: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "query hub failed")
if hub, ok := getMp[id]; ok {
h := hub
hubs = append(hubs, &h)
} else {
hubs = append(hubs, nil)
}

hubs = append(hubs, hub)
}
}



+ 2
- 36
coordinator/internal/mq/package.go View File

@@ -123,23 +123,9 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack
return fmt.Errorf("package is not available to the user")
}

err := svc.db2.Package().SoftDelete(tx, msg.PackageID)
err := svc.db2.Package().DeleteComplete(tx, msg.PackageID)
if err != nil {
return fmt.Errorf("soft delete package: %w", err)
}

err = svc.db2.Package().DeleteUnused(tx, msg.PackageID)
if err != nil {
logger.WithField("UserID", msg.UserID).
WithField("PackageID", msg.PackageID).
Warnf("deleting unused package: %w", err.Error())
}

err = svc.db2.PackageAccessStat().DeleteByPackageID(tx, msg.PackageID)
if err != nil {
logger.WithField("UserID", msg.UserID).
WithField("PackageID", msg.PackageID).
Warnf("deleting package access stat: %w", err.Error())
return fmt.Errorf("deleting package: %w", err)
}

return nil
@@ -274,26 +260,6 @@ func (svc *Service) GetPackageCachedStorages(msg *coormq.GetPackageCachedStorage
return mq.ReplyOK(coormq.ReqGetPackageCachedStoragesResp(stgInfos, packageSize))
}

func (svc *Service) GetPackageLoadedStorages(msg *coormq.GetPackageLoadedStorages) (*coormq.GetPackageLoadedStoragesResp, *mq.CodeMessage) {
storages, err := svc.db2.StoragePackage().FindPackageStorages(svc.db2.DefCtx(), msg.PackageID)
if err != nil {
logger.WithField("PackageID", msg.PackageID).
Warnf("get storages by packageID failed, err: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "get storages by packageID failed")
}

uniqueStgIDs := make(map[cdssdk.StorageID]bool)
var stgIDs []cdssdk.StorageID
for _, stg := range storages {
if !uniqueStgIDs[stg.StorageID] {
uniqueStgIDs[stg.StorageID] = true
stgIDs = append(stgIDs, stg.StorageID)
}
}

return mq.ReplyOK(coormq.NewGetPackageLoadedStoragesResp(stgIDs))
}

func (svc *Service) AddAccessStat(msg *coormq.AddAccessStat) {
pkgIDs := make([]cdssdk.PackageID, len(msg.Entries))
objIDs := make([]cdssdk.ObjectID, len(msg.Entries))


+ 16
- 24
coordinator/internal/mq/storage.go View File

@@ -3,6 +3,7 @@ package mq
import (
"errors"
"fmt"
"time"

"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
@@ -104,35 +105,26 @@ func (svc *Service) GetStorageByName(msg *coormq.GetStorageByName) (*coormq.GetS

func (svc *Service) StoragePackageLoaded(msg *coormq.StoragePackageLoaded) (*coormq.StoragePackageLoadedResp, *mq.CodeMessage) {
err := svc.db2.DoTx(func(tx db2.SQLContext) error {
// 可以不用检查用户是否存在
if ok, _ := svc.db2.Package().IsAvailable(tx, msg.UserID, msg.PackageID); !ok {
return fmt.Errorf("package is not available to user")
}

if ok, _ := svc.db2.Storage().IsAvailable(tx, msg.UserID, msg.StorageID); !ok {
return fmt.Errorf("storage is not available to user")
}

err := svc.db2.StoragePackage().CreateOrUpdate(tx, msg.StorageID, msg.PackageID, msg.UserID)
// TODO 权限检查
exists, err := svc.db2.Object().BatchTestObjectID(tx, msg.PinnedObjects)
if err != nil {
return fmt.Errorf("creating storage package: %w", err)
return fmt.Errorf("testing object id: %w", err)
}

stg, err := svc.db2.Storage().GetByID(tx, msg.StorageID)
if err != nil {
return fmt.Errorf("getting storage: %w", err)
pinned := make([]cdssdk.PinnedObject, 0, len(msg.PinnedObjects))
for _, obj := range msg.PinnedObjects {
if exists[obj] {
pinned = append(pinned, cdssdk.PinnedObject{
StorageID: msg.StorageID,
ObjectID: obj,
CreateTime: time.Now(),
})
}
}

err = svc.db2.PinnedObject().CreateFromPackage(tx, msg.PackageID, stg.StorageID)
err = svc.db2.PinnedObject().BatchTryCreate(tx, pinned)
if err != nil {
return fmt.Errorf("creating pinned object from package: %w", err)
}

if len(msg.PinnedBlocks) > 0 {
err = svc.db2.ObjectBlock().BatchCreate(tx, msg.PinnedBlocks)
if err != nil {
return fmt.Errorf("batch creating object block: %w", err)
}
return fmt.Errorf("batch creating pinned object: %w", err)
}

return nil
@@ -145,5 +137,5 @@ func (svc *Service) StoragePackageLoaded(msg *coormq.StoragePackageLoaded) (*coo
return nil, mq.Failed(errorcode.OperationFailed, "user load package to storage failed")
}

return mq.ReplyOK(coormq.NewStoragePackageLoadedResp())
return mq.ReplyOK(coormq.RespStoragePackageLoaded())
}

+ 1
- 1
coordinator/internal/mq/temp.go View File

@@ -26,7 +26,7 @@ func (svc *Service) GetDatabaseAll(msg *coormq.GetDatabaseAll) (*coormq.GetDatab
}

for _, bkt := range bkts {
ps, err := svc.db2.Package().GetBucketPackages(tx, msg.UserID, bkt.BucketID)
ps, err := svc.db2.Package().GetUserBucketPackages(tx, msg.UserID, bkt.BucketID)
if err != nil {
return fmt.Errorf("get bucket packages: %w", err)
}


+ 0
- 136
scanner/internal/event/agent_check_storage.go View File

@@ -1,136 +0,0 @@
package event

import (
"database/sql"
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type AgentCheckStorage struct {
*scevt.AgentCheckStorage
}

func NewAgentCheckStorage(evt *scevt.AgentCheckStorage) *AgentCheckStorage {
return &AgentCheckStorage{
AgentCheckStorage: evt,
}
}

func (t *AgentCheckStorage) TryMerge(other Event) bool {
event, ok := other.(*AgentCheckStorage)
if !ok {
return false
}

if t.StorageID != event.StorageID {
return false
}

return true
}

func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckStorage]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t.AgentCheckStorage))
defer log.Debugf("end")

// 读取数据的地方就不加锁了,因为check任务会反复执行,单次失败问题不大

stg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.DefCtx(), t.StorageID)
if err != nil {
if err != sql.ErrNoRows {
log.WithField("StorageID", t.StorageID).Warnf("get storage failed, err: %s", err.Error())
}
return
}

hub, err := execCtx.Args.DB.Hub().GetByID(execCtx.Args.DB.DefCtx(), stg.MasterHub)
if err != nil {
if err != sql.ErrNoRows {
log.WithField("StorageID", t.StorageID).Warnf("get storage hub failed, err: %s", err.Error())
}
return
}

if hub.State != consts.HubStateNormal {
return
}

agtCli, err := stgglb.AgentMQPool.Acquire(stg.MasterHub)
if err != nil {
log.WithField("MasterHub", stg.MasterHub).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

checkResp, err := agtCli.StorageCheck(agtmq.NewStorageCheck(stg.StorageID), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("MasterHub", stg.MasterHub).Warnf("checking storage: %s", err.Error())
return
}
realPkgs := make(map[cdssdk.UserID]map[cdssdk.PackageID]bool)
for _, pkg := range checkResp.Packages {
pkgs, ok := realPkgs[pkg.UserID]
if !ok {
pkgs = make(map[cdssdk.PackageID]bool)
realPkgs[pkg.UserID] = pkgs
}

pkgs[pkg.PackageID] = true
}

execCtx.Args.DB.DoTx(func(tx db2.SQLContext) error {
packages, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(tx, t.StorageID)
if err != nil {
log.Warnf("getting storage package: %s", err.Error())
return nil
}

var rms []model.StoragePackage
for _, pkg := range packages {
pkgMap, ok := realPkgs[pkg.UserID]
if !ok {
rms = append(rms, pkg)
continue
}

if !pkgMap[pkg.PackageID] {
rms = append(rms, pkg)
}
}

rmdPkgIDs := make(map[cdssdk.PackageID]bool)
for _, rm := range rms {
err := execCtx.Args.DB.StoragePackage().Delete(tx, rm.StorageID, rm.PackageID, rm.UserID)
if err != nil {
log.Warnf("deleting storage package: %s", err.Error())
continue
}
rmdPkgIDs[rm.PackageID] = true
}

// 彻底删除已经是Deleted状态,且不被再引用的Package
for pkgID := range rmdPkgIDs {
err := execCtx.Args.DB.Package().DeleteUnused(tx, pkgID)
if err != nil {
log.Warnf("deleting unused package: %s", err.Error())
continue
}
}

return nil
})
}

func init() {
RegisterMessageConvertor(NewAgentCheckStorage)
}

+ 0
- 86
scanner/internal/event/agent_storage_gc.go View File

@@ -1,86 +0,0 @@
package event

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"

agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type AgentStorageGC struct {
*scevt.AgentStorageGC
}

func NewAgentStorageGC(evt *scevt.AgentStorageGC) *AgentStorageGC {
return &AgentStorageGC{
AgentStorageGC: evt,
}
}

func (t *AgentStorageGC) TryMerge(other Event) bool {
event, ok := other.(*AgentStorageGC)
if !ok {
return false
}

if event.StorageID != t.StorageID {
return false
}

return true
}

func (t *AgentStorageGC) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentStorageGC]("Event")
startTime := time.Now()
log.Debugf("begin with %v", logger.FormatStruct(t.AgentStorageGC))
defer func() {
log.Debugf("end, time: %v", time.Since(startTime))
}()

// TODO unavailable的节点需不需要发送任务?

mutex, err := reqbuilder.NewBuilder().
// 进行GC
Storage().GC(t.StorageID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
return
}
defer mutex.Unlock()

getStg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.DefCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("getting storage: %s", err.Error())
return
}

stgPkgs, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.DefCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("getting storage packages: %s", err.Error())
return
}

agtCli, err := stgglb.AgentMQPool.Acquire(getStg.MasterHub)
if err != nil {
log.WithField("MasterHub", getStg.MasterHub).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

_, err = agtCli.StorageGC(agtmq.ReqStorageGC(t.StorageID, stgPkgs), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("storage gc: %s", err.Error())
return
}
}

func init() {
RegisterMessageConvertor(NewAgentStorageGC)
}

+ 0
- 44
scanner/internal/event/check_package.go View File

@@ -1,44 +0,0 @@
package event

import (
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type CheckPackage struct {
*scevt.CheckPackage
}

func NewCheckPackage(evt *scevt.CheckPackage) *CheckPackage {
return &CheckPackage{
CheckPackage: evt,
}
}

func (t *CheckPackage) TryMerge(other Event) bool {
event, ok := other.(*CheckPackage)
if !ok {
return false
}

t.PackageIDs = lo.Union(t.PackageIDs, event.PackageIDs)
return true
}

func (t *CheckPackage) Execute(execCtx ExecuteContext) {
log := logger.WithType[CheckPackage]("Event")
log.Debugf("begin with %v", logger.FormatStruct(t.CheckPackage))
defer log.Debugf("end")

for _, objID := range t.PackageIDs {
err := execCtx.Args.DB.Package().DeleteUnused(execCtx.Args.DB.DefCtx(), objID)
if err != nil {
log.WithField("PackageID", objID).Warnf("delete unused package failed, err: %s", err.Error())
}
}
}

func init() {
RegisterMessageConvertor(NewCheckPackage)
}

+ 4
- 4
scanner/internal/event/check_package_redundancy.go View File

@@ -515,7 +515,7 @@ func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectD
ft.ECParam = red
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *srcStg.Storage.MasterHub, srcStg.Storage.Storage, ioswitch2.RawStream()))
for i := 0; i < red.N; i++ {
ft.AddTo(ioswitch2.NewToShardStore(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.ECSrteam(i), fmt.Sprintf("%d", i)))
ft.AddTo(ioswitch2.NewToShardStore(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.ECStream(i), fmt.Sprintf("%d", i)))
}
plans := exec.NewPlanBuilder()
err := parser.Parse(ft, plans)
@@ -745,7 +745,7 @@ func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDe
ft.ECParam = srcRed

for i2, block := range chosenBlocks {
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2].Storage, ioswitch2.ECSrteam(block.Index)))
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2].Storage, ioswitch2.ECStream(block.Index)))
}

len := obj.Object.Size
@@ -841,11 +841,11 @@ func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDet
ft := ioswitch2.NewFromTo()
ft.ECParam = srcRed
for i2, block := range chosenBlocks {
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2].Storage, ioswitch2.ECSrteam(block.Index)))
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2].Storage, ioswitch2.ECStream(block.Index)))
}

// 输出只需要自己要保存的那一块
ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.ECSrteam(i), fmt.Sprintf("%d", i)))
ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.ECStream(i), fmt.Sprintf("%d", i)))

err := parser.Parse(ft, planBlder)
if err != nil {


+ 1
- 1
scanner/internal/event/clean_pinned.go View File

@@ -802,7 +802,7 @@ func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stg
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *allStgInfos[id].MasterHub, allStgInfos[id].Storage, ioswitch2.RawStream()))

for _, i := range *idxs {
ft.AddTo(ioswitch2.NewToShardStore(*allStgInfos[id].MasterHub, *allStgInfos[id], ioswitch2.ECSrteam(i), fmt.Sprintf("%d.%d", obj.Object.ObjectID, i)))
ft.AddTo(ioswitch2.NewToShardStore(*allStgInfos[id].MasterHub, *allStgInfos[id], ioswitch2.ECStream(i), fmt.Sprintf("%d.%d", obj.Object.ObjectID, i)))
}

err := parser.Parse(ft, planBld)


+ 0
- 38
scanner/internal/tickevent/batch_check_all_package.go View File

@@ -1,38 +0,0 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

type BatchCheckAllPackage struct {
lastCheckStart int
}

func NewBatchCheckAllPackage() *BatchCheckAllPackage {
return &BatchCheckAllPackage{}
}

func (e *BatchCheckAllPackage) Execute(ctx ExecuteContext) {
log := logger.WithType[BatchCheckAllPackage]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

packageIDs, err := ctx.Args.DB.Package().BatchGetAllPackageIDs(ctx.Args.DB.DefCtx(), e.lastCheckStart, CheckPackageBatchSize)
if err != nil {
log.Warnf("batch get package ids failed, err: %s", err.Error())
return
}

ctx.Args.EventExecutor.Post(event.NewCheckPackage(scevt.NewCheckPackage(packageIDs)))

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来
if len(packageIDs) < CheckPackageBatchSize {
e.lastCheckStart = 0
log.Debugf("all package checked, next time will start check at 0")

} else {
e.lastCheckStart += CheckPackageBatchSize
}
}

+ 0
- 43
scanner/internal/tickevent/batch_check_all_storage.go View File

@@ -1,43 +0,0 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
)

const CHECK_STORAGE_BATCH_SIZE = 5

type BatchCheckAllStorage struct {
lastCheckStart int
}

func NewBatchCheckAllStorage() *BatchCheckAllStorage {
return &BatchCheckAllStorage{}
}

func (e *BatchCheckAllStorage) Execute(ctx ExecuteContext) {
log := logger.WithType[BatchCheckAllStorage]("TickEvent")
log.Debugf("begin")
defer log.Debugf("end")

storageIDs, err := ctx.Args.DB.Storage().BatchGetAllStorageIDs(ctx.Args.DB.DefCtx(), e.lastCheckStart, CHECK_STORAGE_BATCH_SIZE)
if err != nil {
log.Warnf("batch get storage ids failed, err: %s", err.Error())
return
}

for _, stgID := range storageIDs {
// 设置nil代表进行全量检查
ctx.Args.EventExecutor.Post(event.NewAgentCheckStorage(scevt.NewAgentCheckStorage(stgID)))
}

// 如果结果的长度小于预期的长度,则认为已经查询了所有,下次从头再来
if len(storageIDs) < CHECK_STORAGE_BATCH_SIZE {
e.lastCheckStart = 0
log.Debugf("all storage checked, next time will start check at 0")

} else {
e.lastCheckStart += CHECK_STORAGE_BATCH_SIZE
}
}

+ 0
- 1
scanner/internal/tickevent/storage_gc.go View File

@@ -41,6 +41,5 @@ func (e *StorageGC) Execute(ctx ExecuteContext) {
}

ctx.Args.EventExecutor.Post(event.NewAgentShardStoreGC(scevt.NewAgentShardStoreGC(e.storageIDs[0])))
ctx.Args.EventExecutor.Post(event.NewAgentStorageGC(scevt.NewAgentStorageGC(e.storageIDs[0])))
e.storageIDs = e.storageIDs[1:]
}

+ 0
- 6
scanner/main.go View File

@@ -141,14 +141,8 @@ func startTickEvent(tickExecutor *tickevent.Executor) {

tickExecutor.Start(tickevent.NewBatchAllAgentCheckShardStore(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewBatchCheckAllPackage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewStorageGC(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

// tickExecutor.Start(tickevent.NewBatchCheckAllRepCount(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewBatchCheckAllStorage(), interval, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewCheckAgentState(), 5*60*1000, tickevent.StartOption{RandomStartDelayMs: 60 * 1000})

tickExecutor.Start(tickevent.NewBatchCheckPackageRedundancy(), interval, tickevent.StartOption{RandomStartDelayMs: 20 * 60 * 1000})


Loading…
Cancel
Save