Browse Source

调整数据库;解耦ipfs

gitlink
Sydonian 1 year ago
parent
commit
d905a46fe6
100 changed files with 1899 additions and 1505 deletions
  1. +139
    -177
      agent/internal/http/hub_io.go
  2. +9
    -9
      agent/internal/mq/cache.go
  3. +29
    -14
      agent/internal/mq/storage.go
  4. +4
    -4
      agent/internal/task/cache_move_package.go
  5. +63
    -67
      agent/internal/task/storage_load_package.go
  6. +2
    -2
      client/internal/cmdline/cache.go
  7. +7
    -7
      client/internal/cmdline/package.go
  8. +11
    -11
      client/internal/http/package.go
  9. +3
    -3
      client/internal/http/server.go
  10. +13
    -11
      client/internal/http/temp.go
  11. +1
    -1
      client/internal/services/bucket.go
  12. +6
    -6
      client/internal/services/cache.go
  13. +1
    -1
      client/internal/services/object.go
  14. +7
    -7
      client/internal/services/package.go
  15. +9
    -9
      client/internal/services/storage.go
  16. +3
    -3
      common/globals/utils.go
  17. +25
    -24
      common/models/models.go
  18. +0
    -110
      common/pkgs/cmd/download_package.go
  19. +31
    -31
      common/pkgs/cmd/upload_objects.go
  20. +21
    -5
      common/pkgs/connectivity/collector.go
  21. +2
    -0
      common/pkgs/db/bucket.go
  22. +2
    -0
      common/pkgs/db/object.go
  23. +2
    -0
      common/pkgs/db/package.go
  24. +2
    -1
      common/pkgs/db2/bucket.go
  25. +36
    -45
      common/pkgs/db2/cache.go
  26. +0
    -0
      common/pkgs/db2/config/config.go
  27. +1
    -1
      common/pkgs/db2/db2.go
  28. +2
    -1
      common/pkgs/db2/location.go
  29. +18
    -18
      common/pkgs/db2/model/model.go
  30. +7
    -0
      common/pkgs/db2/node.go
  31. +1
    -1
      common/pkgs/db2/node_connectivity.go
  32. +21
    -24
      common/pkgs/db2/object.go
  33. +4
    -4
      common/pkgs/db2/object_access_stat.go
  34. +6
    -6
      common/pkgs/db2/object_block.go
  35. +2
    -1
      common/pkgs/db2/package.go
  36. +4
    -4
      common/pkgs/db2/package_access_stat.go
  37. +6
    -0
      common/pkgs/db2/shard_storage.go
  38. +6
    -0
      common/pkgs/db2/shared_storage.go
  39. +21
    -5
      common/pkgs/db2/storage.go
  40. +1
    -1
      common/pkgs/db2/storage_package.go
  41. +1
    -1
      common/pkgs/db2/user.go
  42. +1
    -1
      common/pkgs/db2/user_bucket.go
  43. +8
    -8
      common/pkgs/distlock/reqbuilder/ipfs.go
  44. +1
    -1
      common/pkgs/downloader/downloader.go
  45. +66
    -57
      common/pkgs/downloader/iterator.go
  46. +6
    -6
      common/pkgs/downloader/lrc.go
  47. +2
    -2
      common/pkgs/downloader/lrc_strip_iterator.go
  48. +4
    -4
      common/pkgs/downloader/strip_iterator.go
  49. +3
    -2
      common/pkgs/ioswitch2/agent_worker.go
  50. +20
    -18
      common/pkgs/ioswitch2/fromto.go
  51. +32
    -5
      common/pkgs/ioswitch2/http_hub_worker.go
  52. +13
    -4
      common/pkgs/ioswitch2/ops2/multipart.go
  53. +12
    -10
      common/pkgs/ioswitch2/ops2/shard_store.go
  54. +8
    -8
      common/pkgs/ioswitch2/parser/parser.go
  55. +3
    -2
      common/pkgs/ioswitchlrc/agent_worker.go
  56. +10
    -8
      common/pkgs/ioswitchlrc/fromto.go
  57. +7
    -7
      common/pkgs/ioswitchlrc/ops2/shard_store.go
  58. +3
    -2
      common/pkgs/ioswitchlrc/parser/passes.go
  59. +5
    -6
      common/pkgs/mq/agent/cache.go
  60. +1
    -1
      common/pkgs/mq/agent/storage.go
  61. +1
    -1
      common/pkgs/mq/coordinator/bucket.go
  62. +3
    -3
      common/pkgs/mq/coordinator/cache.go
  63. +2
    -2
      common/pkgs/mq/coordinator/object.go
  64. +34
    -34
      common/pkgs/mq/coordinator/package.go
  65. +43
    -15
      common/pkgs/mq/coordinator/storage.go
  66. +3
    -3
      common/pkgs/mq/scanner/event/agent_cache_gc.go
  67. +3
    -3
      common/pkgs/mq/scanner/event/agent_check_cache.go
  68. +49
    -2
      common/pkgs/storage/shard/pool/pool.go
  69. +138
    -8
      common/pkgs/storage/shard/storages/local/local.go
  70. +48
    -2
      common/pkgs/storage/shard/storages/local/writer.go
  71. +26
    -0
      common/pkgs/storage/shard/storages/utils/utils.go
  72. +7
    -3
      common/pkgs/storage/shard/types/option.go
  73. +13
    -7
      common/pkgs/storage/shard/types/shardstore.go
  74. +26
    -20
      coordinator/internal/cmd/migrate.go
  75. +1
    -7
      coordinator/internal/cmd/serve.go
  76. +1
    -1
      coordinator/internal/config/config.go
  77. +2
    -1
      coordinator/internal/mq/bucket.go
  78. +7
    -6
      coordinator/internal/mq/cache.go
  79. +18
    -48
      coordinator/internal/mq/object.go
  80. +19
    -18
      coordinator/internal/mq/package.go
  81. +3
    -6
      coordinator/internal/mq/service.go
  82. +118
    -15
      coordinator/internal/mq/storage.go
  83. +5
    -6
      coordinator/internal/mq/temp.go
  84. +1
    -1
      scanner/internal/config/config.go
  85. +24
    -13
      scanner/internal/event/agent_cache_gc.go
  86. +30
    -25
      scanner/internal/event/agent_check_cache.go
  87. +3
    -3
      scanner/internal/event/agent_check_state.go
  88. +8
    -8
      scanner/internal/event/agent_check_storage.go
  89. +4
    -4
      scanner/internal/event/agent_storage_gc.go
  90. +1
    -1
      scanner/internal/event/check_package.go
  91. +317
    -241
      scanner/internal/event/check_package_redundancy.go
  92. +158
    -182
      scanner/internal/event/clean_pinned.go
  93. +13
    -13
      scanner/internal/event/clean_pinned_test.go
  94. +3
    -3
      scanner/internal/event/event.go
  95. +19
    -17
      scanner/internal/event/event_test.go
  96. +4
    -4
      scanner/internal/event/update_package_access_stat_amount.go
  97. +7
    -9
      scanner/internal/tickevent/batch_all_agent_check_cache.go
  98. +1
    -1
      scanner/internal/tickevent/batch_check_all_package.go
  99. +1
    -1
      scanner/internal/tickevent/batch_check_all_storage.go
  100. +1
    -1
      scanner/internal/tickevent/batch_check_package_redudancy.go

+ 139
- 177
agent/internal/http/hub_io.go View File

@@ -27,17 +27,16 @@ func (s *Server) IOSvc() *IOService {
}

func (s *IOService) GetStream(ctx *gin.Context) {
var req cdsapi.GetStreamReq
if err := ctx.ShouldBindJSON(&req); err != nil {
logger.Warnf("binding body: %s", err.Error())
log := logger.WithField("HTTP", "HubIO.GetStream")

req, err := serder.JSONToObjectStreamEx[cdsapi.GetStreamReq](ctx.Request.Body)
if err != nil {
log.Warnf("deserializing request: %v", err)
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

logger.
WithField("PlanID", req.PlanID).
WithField("VarID", req.VarID).
Debugf("stream output")
log = log.WithField("PlanID", req.PlanID).WithField("VarID", req.VarID)

// 设置超时
c, cancel := context.WithTimeout(ctx.Request.Context(), time.Second*30)
@@ -45,168 +44,125 @@ func (s *IOService) GetStream(ctx *gin.Context) {

sw := s.svc.swWorker.FindByIDContexted(c, req.PlanID)
if sw == nil {
ctx.JSON(http.StatusNotFound, gin.H{"error": "plan not found"})
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "plan not found"))
return
}

signalBytes, err := serder.ObjectToJSON(req.Signal)
if err != nil {
logger.Warnf("serializing SignalVar: %s", err)
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "serializing SignalVar fail"))
return
}
signal, err := serder.JSONToObjectEx[*exec.SignalVar](signalBytes)
if err != nil {
ctx.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("deserializing var: %v", err)})
return
}
sw.PutVar(req.SignalID, req.Signal)

sw.PutVars(signal)

strVar := &exec.StreamVar{
ID: req.VarID,
}
err = sw.BindVars(ctx.Request.Context(), strVar)
strVal, err := exec.BindVar[*exec.StreamValue](sw, ctx.Request.Context(), req.VarID)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("binding vars: %v", err)})
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("bind var: %v", err)))
return
}

reader := strVar.Stream
defer reader.Close()
defer strVal.Stream.Close()

ctx.Header("Content-Type", "application/octet-stream")
ctx.Status(http.StatusOK)

buf := make([]byte, 1024*64)
readAllCnt := 0
startTime := time.Now()
for {
readCnt, err := reader.Read(buf)

if readCnt > 0 {
readAllCnt += readCnt
_, err := ctx.Writer.Write(buf[:readCnt])
if err != nil {
logger.
WithField("PlanID", req.PlanID).
WithField("VarID", req.VarID).
Warnf("send stream data failed, err: %s", err.Error())
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("send stream data failed, err: %v", err)})
return
}
// 刷新缓冲区,确保数据立即发送
ctx.Writer.Flush()
}

// 文件读取完毕
if err == io.EOF {
dt := time.Since(startTime)
logger.
WithField("PlanID", req.PlanID).
WithField("VarID", req.VarID).
Debugf("send data size %d in %v, speed %v/s", readAllCnt, dt, bytesize.New(float64(readAllCnt)/dt.Seconds()))
return
}

// io.ErrUnexpectedEOF 没有读满整个 buf 就遇到了 EOF,此时正常发送剩余数据即可
if err != nil && err != io.ErrUnexpectedEOF {
logger.
WithField("PlanID", req.PlanID).
WithField("VarID", req.VarID).
Warnf("reading stream data: %s", err.Error())
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("reading stream data: %v", err)})
return
}
}
}

func (s *IOService) SendStream(ctx *gin.Context) {
var req cdsapi.SendStreamReq
if err := ctx.ShouldBindJSON(&req); err != nil {
logger.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
n, err := cdsapi.WriteStream(ctx.Writer, strVal.Stream)
if err != nil {
log.Warnf("sending stream: %v", err)
return
}
dt := time.Since(startTime)

logger.
WithField("PlanID", req.PlanID).
WithField("VarID", req.VarID).
Debugf("stream input")
log.Debugf("send stream completed, size: %v, time: %v, speed: %v/s", n, dt, bytesize.New(float64(n)/dt.Seconds()))
}

// 超时设置
c, cancel := context.WithTimeout(ctx.Request.Context(), time.Second*30)
defer cancel()
func (s *IOService) SendStream(ctx *gin.Context) {
ctx.JSON(http.StatusBadRequest, Failed(errorcode.OperationFailed, "not implemented"))
return

sw := s.svc.swWorker.FindByIDContexted(c, req.PlanID)
if sw == nil {
ctx.JSON(http.StatusNotFound, gin.H{"error": "plan not found"})
return
}
// var req cdsapi.SendStreamReq
// if err := ctx.ShouldBindJSON(&req); err != nil {
// logger.Warnf("binding body: %s", err.Error())
// ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
// return
// }

pr, pw := io.Pipe()
defer pr.Close()
// logger.
// WithField("PlanID", req.PlanID).
// WithField("VarID", req.VarID).
// Debugf("stream input")

streamVar := &exec.StreamVar{
ID: req.VarID,
Stream: pr,
}
sw.PutVars(streamVar)

var recvSize int64

go func() {
defer pw.Close()
_, err := io.Copy(pw, ctx.Request.Body)
if err != nil {
logger.Warnf("write data to file failed, err: %s", err.Error())
pw.CloseWithError(fmt.Errorf("write data to file failed: %w", err))
}
}()

for {
buf := make([]byte, 1024*64)
n, err := pr.Read(buf)
if err != nil {
if err == io.EOF {
logger.WithField("ReceiveSize", recvSize).
WithField("VarID", req.VarID).
Debugf("file transmission completed")

// 将结果返回给客户端
ctx.JSON(http.StatusOK, gin.H{"message": "file transmission completed"})
return
}
logger.Warnf("read stream failed, err: %s", err.Error())
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("read stream failed: %v", err)})
return
}

if n > 0 {
recvSize += int64(n)
// 处理接收到的数据,例如写入文件或进行其他操作
}
}
// 超时设置
// c, cancel := context.WithTimeout(ctx.Request.Context(), time.Second*30)
// defer cancel()

// sw := s.svc.swWorker.FindByIDContexted(c, req.PlanID)
// if sw == nil {
// ctx.JSON(http.StatusNotFound, gin.H{"error": "plan not found"})
// return
// }

// pr, pw := io.Pipe()
// defer pr.Close()

// streamVar := &exec.StreamVar{
// ID: req.VarID,
// Stream: pr,
// }
// sw.PutVar(streamVar)

// var recvSize int64

// go func() {
// defer pw.Close()
// _, err := io.Copy(pw, ctx.Request.Body)
// if err != nil {
// logger.Warnf("write data to file failed, err: %s", err.Error())
// pw.CloseWithError(fmt.Errorf("write data to file failed: %w", err))
// }
// }()

// for {
// buf := make([]byte, 1024*64)
// n, err := pr.Read(buf)
// if err != nil {
// if err == io.EOF {
// logger.WithField("ReceiveSize", recvSize).
// WithField("VarID", req.VarID).
// Debugf("file transmission completed")

// // 将结果返回给客户端
// ctx.JSON(http.StatusOK, gin.H{"message": "file transmission completed"})
// return
// }
// logger.Warnf("read stream failed, err: %s", err.Error())
// ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("read stream failed: %v", err)})
// return
// }

// if n > 0 {
// recvSize += int64(n)
// // 处理接收到的数据,例如写入文件或进行其他操作
// }
// }
}

func (s *IOService) ExecuteIOPlan(ctx *gin.Context) {
log := logger.WithField("HTTP", "HubIO.ExecuteIOPlan")

data, err := io.ReadAll(ctx.Request.Body)
if err != nil {
logger.Warnf("reading body: %s", err.Error())
ctx.JSON(http.StatusInternalServerError, Failed("400", "internal error"))
log.Warnf("reading body: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "reading body failed"))
return
}

plan, err := serder.JSONToObjectEx[exec.Plan](data)
req, err := serder.JSONToObjectEx[cdsapi.ExecuteIOPlanReq](data)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("deserializing plan: %v", err)})
log.Warnf("deserializing request: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

logger.WithField("PlanID", plan.ID).Infof("begin execute io plan")
defer logger.WithField("PlanID", plan.ID).Infof("plan finished")
log = log.WithField("PlanID", req.Plan.ID)

log.Infof("begin execute io plan")

sw := exec.NewExecutor(plan)
sw := exec.NewExecutor(req.Plan)

s.svc.swWorker.Add(sw)
defer s.svc.swWorker.Remove(sw)
@@ -214,21 +170,29 @@ func (s *IOService) ExecuteIOPlan(ctx *gin.Context) {
execCtx := exec.NewWithContext(ctx.Request.Context())

// TODO 注入依赖

_, err = sw.Run(execCtx)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("running io plan: %v", err)})
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("executing plan: %v", err)))
return
}

ctx.JSON(http.StatusOK, gin.H{"message": "plan executed successfully"})
ctx.JSON(http.StatusOK, OK(nil))
}

func (s *IOService) SendVar(ctx *gin.Context) {
var req cdsapi.SendVarReq
if err := ctx.ShouldBindJSON(&req); err != nil {
logger.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
log := logger.WithField("HTTP", "HubIO.SendVar")

data, err := io.ReadAll(ctx.Request.Body)
if err != nil {
log.Warnf("reading body: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "reading body failed"))
return
}

req, err := serder.JSONToObjectEx[cdsapi.SendVarReq](data)
if err != nil {
log.Warnf("deserializing request: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

@@ -237,65 +201,63 @@ func (s *IOService) SendVar(ctx *gin.Context) {

sw := s.svc.swWorker.FindByIDContexted(c, req.PlanID)
if sw == nil {
ctx.JSON(http.StatusNotFound, gin.H{"error": "plan not found"})
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "plan not found"))
return
}

VarBytes, err := serder.ObjectToJSON(req.Var)
v, err := serder.JSONToObjectEx[exec.Var](VarBytes)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("deserializing var: %v", err)})
return
}
sw.PutVar(req.VarID, req.VarValue)

sw.PutVars(v)
ctx.JSON(http.StatusOK, gin.H{"message": "var sent successfully"})
ctx.JSON(http.StatusOK, OK(nil))
}

func (s *IOService) GetVar(ctx *gin.Context) {
var req cdsapi.GetVarReq
if err := ctx.ShouldBindJSON(&req); err != nil {
logger.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
log := logger.WithField("HTTP", "HubIO.GetVar")

data, err := io.ReadAll(ctx.Request.Body)
if err != nil {
log.Warnf("reading body: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "reading body failed"))
return
}

req, err := serder.JSONToObjectEx[cdsapi.GetVarReq](data)
if err != nil {
log.Warnf("deserializing request: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

log = log.WithField("PlanID", req.PlanID).WithField("VarID", req.VarID)

c, cancel := context.WithTimeout(ctx.Request.Context(), time.Second*30)
defer cancel()

sw := s.svc.swWorker.FindByIDContexted(c, req.PlanID)
if sw == nil {
ctx.JSON(http.StatusNotFound, gin.H{"error": "plan not found"})
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "plan not found"))
return
}

VarBytes, err := serder.ObjectToJSON(req.Var)
v, err := serder.JSONToObjectEx[exec.Var](VarBytes)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("deserializing var: %v", err)})
return
}
sw.PutVar(req.SignalID, req.Signal)

SignalBytes, err := serder.ObjectToJSON(req.Signal)
signal, err := serder.JSONToObjectEx[*exec.SignalVar](SignalBytes)
v, err := sw.BindVar(ctx.Request.Context(), req.VarID)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("deserializing signal: %v", err)})
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("bind var: %v", err)))
return
}

sw.PutVars(signal)

err = sw.BindVars(c, v)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("binding vars: %v", err)})
return
resp := Response{
Code: errorcode.OK,
Data: cdsapi.GetVarResp{
Value: v,
},
}

vd, err := serder.ObjectToJSONEx(v)
respData, err := serder.ObjectToJSONEx(resp)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("serializing var: %v", err)})
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, fmt.Sprintf("serializing response: %v", err)))
return
}

ctx.JSON(http.StatusOK, gin.H{"var": string(vd)})
ctx.JSON(http.StatusOK, respData)
}

+ 9
- 9
agent/internal/mq/cache.go View File

@@ -6,15 +6,15 @@ import (

"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
mytask "gitlink.org.cn/cloudream/storage/agent/internal/task"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
)

func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *mq.CodeMessage) {
store, err := svc.shardStorePool.Get(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("finding shard store: %v", err))
store := svc.shardStorePool.Get(msg.StorageID)
if store == nil {
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("storage %v has no shard store", msg.StorageID))
}

infos, err := store.ListAll()
@@ -22,7 +22,7 @@ func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *m
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("listting file in shard store: %v", err))
}

var fileHashes []types.FileHash
var fileHashes []cdssdk.FileHash
for _, info := range infos {
fileHashes = append(fileHashes, info.Hash)
}
@@ -31,12 +31,12 @@ func (svc *Service) CheckCache(msg *agtmq.CheckCache) (*agtmq.CheckCacheResp, *m
}

func (svc *Service) CacheGC(msg *agtmq.CacheGC) (*agtmq.CacheGCResp, *mq.CodeMessage) {
store, err := svc.shardStorePool.Get(msg.StorageID)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("finding shard store: %v", err))
store := svc.shardStorePool.Get(msg.StorageID)
if store == nil {
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("storage %v has no shard store", msg.StorageID))
}

err = store.Purge(msg.Avaiables)
err := store.Purge(msg.Avaiables)
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, fmt.Sprintf("purging cache: %v", err))
}


+ 29
- 14
agent/internal/mq/storage.go View File

@@ -16,7 +16,7 @@ import (
mytask "gitlink.org.cn/cloudream/storage/agent/internal/task"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
@@ -76,15 +76,21 @@ func (svc *Service) StorageCheck(msg *agtmq.StorageCheck) (*agtmq.StorageCheckRe
defer stgglb.CoordinatorMQPool.Release(coorCli)

// TODO UserID。应该设计两种接口,一种需要UserID,一种不需要。
getStg, err := coorCli.GetStorage(coormq.ReqGetStorage(cdssdk.UserID(1), msg.StorageID))
getStg, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{msg.StorageID}))
if err != nil {
return mq.ReplyOK(agtmq.NewStorageCheckResp(
err.Error(),
nil,
))
}
if getStg.Storages[0] == nil {
return nil, mq.Failed(errorcode.OperationFailed, "storage not found")
}
if getStg.Storages[0].Shared == nil {
return nil, mq.Failed(errorcode.OperationFailed, "storage has no shared storage")
}

entries, err := os.ReadDir(utils.MakeStorageLoadDirectory(getStg.Storage.LocalBase))
entries, err := os.ReadDir(utils.MakeStorageLoadDirectory(getStg.Storages[0].Shared.LoadBase))
if err != nil {
logger.Warnf("list storage directory failed, err: %s", err.Error())
return mq.ReplyOK(agtmq.NewStorageCheckResp(
@@ -103,7 +109,7 @@ func (svc *Service) StorageCheck(msg *agtmq.StorageCheck) (*agtmq.StorageCheckRe
continue
}

pkgDir := filepath.Join(utils.MakeStorageLoadDirectory(getStg.Storage.LocalBase), dir.Name())
pkgDir := filepath.Join(utils.MakeStorageLoadDirectory(getStg.Storages[0].Shared.LoadBase), dir.Name())
pkgDirs, err := os.ReadDir(pkgDir)
if err != nil {
logger.Warnf("reading package dir %s: %s", pkgDir, err.Error())
@@ -136,12 +142,18 @@ func (svc *Service) StorageGC(msg *agtmq.StorageGC) (*agtmq.StorageGCResp, *mq.C
defer stgglb.CoordinatorMQPool.Release(coorCli)

// TODO UserID。应该设计两种接口,一种需要UserID,一种不需要。
getStg, err := coorCli.GetStorage(coormq.ReqGetStorage(cdssdk.UserID(1), msg.StorageID))
getStg, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{msg.StorageID}))
if err != nil {
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}
if getStg.Storages[0] == nil {
return nil, mq.Failed(errorcode.OperationFailed, "storage not found")
}
if getStg.Storages[0].Shared == nil {
return nil, mq.Failed(errorcode.OperationFailed, "storage has no shared storage")
}

entries, err := os.ReadDir(utils.MakeStorageLoadDirectory(getStg.Storage.LocalBase))
entries, err := os.ReadDir(utils.MakeStorageLoadDirectory(getStg.Storages[0].Shared.LoadBase))
if err != nil {
logger.Warnf("list storage directory failed, err: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "list directory files failed")
@@ -167,7 +179,7 @@ func (svc *Service) StorageGC(msg *agtmq.StorageGC) (*agtmq.StorageGCResp, *mq.C
pkgMap, ok := userPkgs[dir.Name()]
// 第一级目录名是UserID,先删除UserID在StoragePackage表里没出现过的文件夹
if !ok {
rmPath := filepath.Join(utils.MakeStorageLoadDirectory(getStg.Storage.LocalBase), dir.Name())
rmPath := filepath.Join(utils.MakeStorageLoadDirectory(getStg.Storages[0].Shared.LoadBase), dir.Name())
err := os.RemoveAll(rmPath)
if err != nil {
logger.Warnf("removing user dir %s: %s", rmPath, err.Error())
@@ -177,7 +189,7 @@ func (svc *Service) StorageGC(msg *agtmq.StorageGC) (*agtmq.StorageGCResp, *mq.C
continue
}

pkgDir := filepath.Join(utils.MakeStorageLoadDirectory(getStg.Storage.LocalBase), dir.Name())
pkgDir := filepath.Join(utils.MakeStorageLoadDirectory(getStg.Storages[0].Shared.LoadBase), dir.Name())
// 遍历每个UserID目录的packages目录里的内容
pkgs, err := os.ReadDir(pkgDir)
if err != nil {
@@ -210,15 +222,18 @@ func (svc *Service) StartStorageCreatePackage(msg *agtmq.StartStorageCreatePacka
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

getStgResp, err := coorCli.GetStorage(coormq.ReqGetStorage(msg.UserID, msg.StorageID))
getStg, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{msg.StorageID}))
if err != nil {
logger.WithField("StorageID", msg.StorageID).
Warnf("getting storage info: %s", err.Error())

return nil, mq.Failed(errorcode.OperationFailed, "get storage info failed")
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}
if getStg.Storages[0] == nil {
return nil, mq.Failed(errorcode.OperationFailed, "storage not found")
}
if getStg.Storages[0].Shared == nil {
return nil, mq.Failed(errorcode.OperationFailed, "storage has no shared storage")
}

fullPath := filepath.Clean(filepath.Join(getStgResp.Storage.LocalBase, msg.Path))
fullPath := filepath.Clean(filepath.Join(getStg.Storages[0].Shared.LoadBase, msg.Path))

var uploadFilePathes []string
err = filepath.WalkDir(fullPath, func(fname string, fi os.DirEntry, err error) error {


+ 4
- 4
agent/internal/task/cache_move_package.go View File

@@ -40,14 +40,14 @@ func (t *CacheMovePackage) do(ctx TaskContext) error {
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

store, err := ctx.shardStorePool.Get(t.storageID)
if err != nil {
return fmt.Errorf("getting shard store: %w", err)
store := ctx.shardStorePool.Get(t.storageID)
if store == nil {
return fmt.Errorf("storage has no shard store")
}

mutex, err := reqbuilder.NewBuilder().
// 保护解码出来的Object数据
IPFS().Buzy(*stgglb.Local.NodeID).
Shard().Buzy(t.storageID).
MutexLock(ctx.distlock)
if err != nil {
return fmt.Errorf("acquiring distlock: %w", err)


+ 63
- 67
agent/internal/task/storage_load_package.go View File

@@ -10,7 +10,6 @@ import (

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/bitmap"
"gitlink.org.cn/cloudream/common/pkgs/ipfs"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/task"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
@@ -23,6 +22,7 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ec"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
"gitlink.org.cn/cloudream/storage/common/utils"
)

@@ -71,19 +71,19 @@ func (t *StorageLoadPackage) do(task *task.Task[TaskContext], ctx TaskContext) e
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

ipfsCli, err := stgglb.IPFSPool.Acquire()
if err != nil {
return fmt.Errorf("new IPFS client: %w", err)
}
defer stgglb.IPFSPool.Release(ipfsCli)

getStgResp, err := coorCli.GetStorage(coormq.ReqGetStorage(t.userID, t.storageID))
getStgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{t.storageID}))
if err != nil {
return fmt.Errorf("request to coordinator: %w", err)
}
if getStgResp.Storages[0] == nil {
return fmt.Errorf("storage not found")
}
if getStgResp.Storages[0].Shared == nil {
return fmt.Errorf("storage has shared storage")
}

t.PackagePath = utils.MakeLoadedPackagePath(t.userID, t.packageID)
fullLocalPath := filepath.Join(getStgResp.Storage.LocalBase, t.PackagePath)
fullLocalPath := filepath.Join(getStgResp.Storages[0].Shared.LoadBase, t.PackagePath)

if err = os.MkdirAll(fullLocalPath, 0755); err != nil {
return fmt.Errorf("creating output directory: %w", err)
@@ -94,13 +94,18 @@ func (t *StorageLoadPackage) do(task *task.Task[TaskContext], ctx TaskContext) e
return fmt.Errorf("getting package object details: %w", err)
}

shardstore := ctx.shardStorePool.Get(t.storageID)
if shardstore == nil {
return fmt.Errorf("shard store %v not found on this hub", t.storageID)
}

mutex, err := reqbuilder.NewBuilder().
// 提前占位
Metadata().StoragePackage().CreateOne(t.userID, t.storageID, t.packageID).
// 保护在storage目录中下载的文件
Storage().Buzy(t.storageID).
// 保护下载文件时同时保存到IPFS的文件
IPFS().Buzy(getStgResp.Storage.NodeID).
Shard().Buzy(t.storageID).
MutexLock(ctx.distlock)
if err != nil {
return fmt.Errorf("acquire locks failed, err: %w", err)
@@ -108,7 +113,7 @@ func (t *StorageLoadPackage) do(task *task.Task[TaskContext], ctx TaskContext) e
defer mutex.Unlock()

for _, obj := range getObjectDetails.Objects {
err := t.downloadOne(coorCli, ipfsCli, fullLocalPath, obj)
err := t.downloadOne(coorCli, shardstore, fullLocalPath, obj)
if err != nil {
return err
}
@@ -124,26 +129,26 @@ func (t *StorageLoadPackage) do(task *task.Task[TaskContext], ctx TaskContext) e
return err
}

func (t *StorageLoadPackage) downloadOne(coorCli *coormq.Client, ipfsCli *ipfs.PoolClient, dir string, obj stgmod.ObjectDetail) error {
func (t *StorageLoadPackage) downloadOne(coorCli *coormq.Client, shardStore types.ShardStore, dir string, obj stgmod.ObjectDetail) error {
var file io.ReadCloser

switch red := obj.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
reader, err := t.downloadNoneOrRepObject(ipfsCli, obj)
reader, err := t.downloadNoneOrRepObject(shardStore, obj)
if err != nil {
return fmt.Errorf("downloading object: %w", err)
}
file = reader

case *cdssdk.RepRedundancy:
reader, err := t.downloadNoneOrRepObject(ipfsCli, obj)
reader, err := t.downloadNoneOrRepObject(shardStore, obj)
if err != nil {
return fmt.Errorf("downloading rep object: %w", err)
}
file = reader

case *cdssdk.ECRedundancy:
reader, pinnedBlocks, err := t.downloadECObject(coorCli, ipfsCli, obj, red)
reader, pinnedBlocks, err := t.downloadECObject(coorCli, shardStore, obj, red)
if err != nil {
return fmt.Errorf("downloading ec object: %w", err)
}
@@ -175,15 +180,12 @@ func (t *StorageLoadPackage) downloadOne(coorCli *coormq.Client, ipfsCli *ipfs.P
return nil
}

func (t *StorageLoadPackage) downloadNoneOrRepObject(ipfsCli *ipfs.PoolClient, obj stgmod.ObjectDetail) (io.ReadCloser, error) {
func (t *StorageLoadPackage) downloadNoneOrRepObject(shardStore types.ShardStore, obj stgmod.ObjectDetail) (io.ReadCloser, error) {
if len(obj.Blocks) == 0 && len(obj.PinnedAt) == 0 {
return nil, fmt.Errorf("no node has this object")
}

// 不管实际有没有成功
ipfsCli.Pin(obj.Object.FileHash)

file, err := ipfsCli.OpenRead(obj.Object.FileHash)
file, err := shardStore.Open(types.NewOpen(obj.Object.FileHash))
if err != nil {
return nil, err
}
@@ -191,7 +193,7 @@ func (t *StorageLoadPackage) downloadNoneOrRepObject(ipfsCli *ipfs.PoolClient, o
return file, nil
}

func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, ipfsCli *ipfs.PoolClient, obj stgmod.ObjectDetail, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, []stgmod.ObjectBlock, error) {
func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, shardStore types.ShardStore, obj stgmod.ObjectDetail, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, []stgmod.ObjectBlock, error) {
allNodes, err := t.sortDownloadNodes(coorCli, obj)
if err != nil {
return nil, nil, err
@@ -207,10 +209,7 @@ func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, ipfsCli *i
}

for i := range blocks {
// 不管实际有没有成功
ipfsCli.Pin(blocks[i].Block.FileHash)

str, err := ipfsCli.OpenRead(blocks[i].Block.FileHash)
str, err := shardStore.Open(types.NewOpen(blocks[i].Block.FileHash))
if err != nil {
for i -= 1; i >= 0; i-- {
fileStrs[i].Close()
@@ -224,22 +223,15 @@ func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, ipfsCli *i
fileReaders, filesCloser := io2.ToReaders(fileStrs)

var indexes []int
var pinnedBlocks []stgmod.ObjectBlock
for _, b := range blocks {
indexes = append(indexes, b.Block.Index)
pinnedBlocks = append(pinnedBlocks, stgmod.ObjectBlock{
ObjectID: b.Block.ObjectID,
Index: b.Block.Index,
NodeID: *stgglb.Local.NodeID,
FileHash: b.Block.FileHash,
})
}

outputs, outputsCloser := io2.ToReaders(rs.ReconstructData(fileReaders, indexes))
return io2.AfterReadClosed(io2.Length(io2.ChunkedJoin(outputs, int(ecRed.ChunkSize)), obj.Object.Size), func(c io.ReadCloser) {
filesCloser()
outputsCloser()
}), pinnedBlocks, nil
}), nil, nil
}

// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
@@ -248,42 +240,46 @@ func (t *StorageLoadPackage) downloadECObject(coorCli *coormq.Client, ipfsCli *i
}

// 如果是直接读取的文件,那么就不需要Pin文件块
str, err := ipfsCli.OpenRead(obj.Object.FileHash)
str, err := shardStore.Open(types.NewOpen(obj.Object.FileHash))
return str, nil, err
}

type downloadNodeInfo struct {
Node cdssdk.Node
type downloadStorageInfo struct {
Storage stgmod.StorageDetail
ObjectPinned bool
Blocks []stgmod.ObjectBlock
Distance float64
}

func (t *StorageLoadPackage) sortDownloadNodes(coorCli *coormq.Client, obj stgmod.ObjectDetail) ([]*downloadNodeInfo, error) {
var nodeIDs []cdssdk.NodeID
func (t *StorageLoadPackage) sortDownloadNodes(coorCli *coormq.Client, obj stgmod.ObjectDetail) ([]*downloadStorageInfo, error) {
var stgIDs []cdssdk.StorageID
for _, id := range obj.PinnedAt {
if !lo.Contains(nodeIDs, id) {
nodeIDs = append(nodeIDs, id)
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range obj.Blocks {
if !lo.Contains(nodeIDs, b.NodeID) {
nodeIDs = append(nodeIDs, b.NodeID)
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
}
}

getNodes, err := coorCli.GetNodes(coormq.NewGetNodes(nodeIDs))
getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs))
if err != nil {
return nil, fmt.Errorf("getting nodes: %w", err)
return nil, fmt.Errorf("getting storage details: %w", err)
}
allStgs := make(map[cdssdk.StorageID]stgmod.StorageDetail)
for _, stg := range getStgs.Storages {
allStgs[stg.Storage.StorageID] = *stg
}

downloadNodeMap := make(map[cdssdk.NodeID]*downloadNodeInfo)
downloadNodeMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range obj.PinnedAt {
node, ok := downloadNodeMap[id]
if !ok {
mod := *getNodes.GetNode(id)
node = &downloadNodeInfo{
Node: mod,
mod := allStgs[id]
node = &downloadStorageInfo{
Storage: mod,
ObjectPinned: true,
Distance: t.getNodeDistance(mod),
}
@@ -294,30 +290,30 @@ func (t *StorageLoadPackage) sortDownloadNodes(coorCli *coormq.Client, obj stgmo
}

for _, b := range obj.Blocks {
node, ok := downloadNodeMap[b.NodeID]
node, ok := downloadNodeMap[b.StorageID]
if !ok {
mod := *getNodes.GetNode(b.NodeID)
node = &downloadNodeInfo{
Node: mod,
mod := allStgs[b.StorageID]
node = &downloadStorageInfo{
Storage: mod,
Distance: t.getNodeDistance(mod),
}
downloadNodeMap[b.NodeID] = node
downloadNodeMap[b.StorageID] = node
}

node.Blocks = append(node.Blocks, b)
}

return sort2.Sort(lo.Values(downloadNodeMap), func(left, right *downloadNodeInfo) int {
return sort2.Sort(lo.Values(downloadNodeMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
}), nil
}

type downloadBlock struct {
Node cdssdk.Node
Block stgmod.ObjectBlock
Storage stgmod.StorageDetail
Block stgmod.ObjectBlock
}

func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedNodes []*downloadNodeInfo, k int) (float64, []downloadBlock) {
func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedNodes []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
@@ -325,8 +321,8 @@ func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedNodes []*downloadN
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Node: n.Node,
Block: b,
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
@@ -341,28 +337,28 @@ func (t *StorageLoadPackage) getMinReadingBlockSolution(sortedNodes []*downloadN
return math.MaxFloat64, gotBlocks
}

func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedNodes []*downloadNodeInfo, k int) (float64, *cdssdk.Node) {
func (t *StorageLoadPackage) getMinReadingObjectSolution(sortedNodes []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadNode *cdssdk.Node
var downloadStg *stgmod.StorageDetail
for _, n := range sortedNodes {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
node := n.Node
downloadNode = &node
stg := n.Storage
downloadStg = &stg
}
}

return dist, downloadNode
return dist, downloadStg
}

func (t *StorageLoadPackage) getNodeDistance(node cdssdk.Node) float64 {
func (t *StorageLoadPackage) getNodeDistance(stg stgmod.StorageDetail) float64 {
if stgglb.Local.NodeID != nil {
if node.NodeID == *stgglb.Local.NodeID {
if stg.MasterHub.NodeID == *stgglb.Local.NodeID {
return consts.NodeDistanceSameNode
}
}

if node.LocationID == stgglb.Local.LocationID {
if stg.MasterHub.LocationID == stgglb.Local.LocationID {
return consts.NodeDistanceSameLocation
}



+ 2
- 2
client/internal/cmdline/cache.go View File

@@ -34,8 +34,8 @@ func CacheMovePackage(ctx CommandContext, packageID cdssdk.PackageID, stgID cdss
}
}

func CacheRemovePackage(ctx CommandContext, packageID cdssdk.PackageID, nodeID cdssdk.NodeID) error {
return ctx.Cmdline.Svc.CacheSvc().CacheRemovePackage(packageID, nodeID)
func CacheRemovePackage(ctx CommandContext, packageID cdssdk.PackageID, stgID cdssdk.StorageID) error {
return ctx.Cmdline.Svc.CacheSvc().CacheRemovePackage(packageID, stgID)
}

func init() {


+ 7
- 7
client/internal/cmdline/package.go View File

@@ -161,7 +161,7 @@ func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error
return nil
}

// PackageGetCachedNodes 获取指定包裹的缓存节点信息。
// PackageGetCachedStorages 获取指定包裹的缓存节点信息。
//
// 参数:
//
@@ -171,7 +171,7 @@ func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error
// 返回值:
//
// error - 操作过程中发生的任何错误。
func PackageGetCachedNodes(ctx CommandContext, packageID cdssdk.PackageID) error {
func PackageGetCachedStorages(ctx CommandContext, packageID cdssdk.PackageID) error {
userID := cdssdk.UserID(1)
resp, err := ctx.Cmdline.Svc.PackageSvc().GetCachedNodes(userID, packageID)
fmt.Printf("resp: %v\n", resp)
@@ -181,7 +181,7 @@ func PackageGetCachedNodes(ctx CommandContext, packageID cdssdk.PackageID) error
return nil
}

// PackageGetLoadedNodes 获取指定包裹的已加载节点信息。
// PackageGetLoadedStorages 获取指定包裹的已加载节点信息。
//
// 参数:
//
@@ -191,9 +191,9 @@ func PackageGetCachedNodes(ctx CommandContext, packageID cdssdk.PackageID) error
// 返回值:
//
// error - 操作过程中发生的任何错误。
func PackageGetLoadedNodes(ctx CommandContext, packageID cdssdk.PackageID) error {
func PackageGetLoadedStorages(ctx CommandContext, packageID cdssdk.PackageID) error {
userID := cdssdk.UserID(1)
nodeIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedNodes(userID, packageID)
nodeIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedStorages(userID, packageID)
fmt.Printf("nodeIDs: %v\n", nodeIDs)
if err != nil {
return fmt.Errorf("get package %d loaded nodes failed, err: %w", packageID, err)
@@ -212,8 +212,8 @@ func init() {
commands.MustAdd(PackageDeletePackage, "pkg", "delete")

// 查询package缓存到哪些节点
commands.MustAdd(PackageGetCachedNodes, "pkg", "cached")
commands.MustAdd(PackageGetCachedStorages, "pkg", "cached")

// 查询package调度到哪些节点
commands.MustAdd(PackageGetLoadedNodes, "pkg", "loaded")
commands.MustAdd(PackageGetLoadedStorages, "pkg", "loaded")
}

+ 11
- 11
client/internal/http/package.go View File

@@ -130,9 +130,9 @@ func (s *PackageService) ListBucketPackages(ctx *gin.Context) {
}))
}

// GetCachedNodes 处理获取包的缓存节点的HTTP请求。
func (s *PackageService) GetCachedNodes(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetCachedNodes")
// GetCachedStorages 处理获取包的缓存节点的HTTP请求。
func (s *PackageService) GetCachedStorages(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetCachedStorages")

var req cdsapi.PackageGetCachedNodesReq
if err := ctx.ShouldBindQuery(&req); err != nil {
@@ -148,29 +148,29 @@ func (s *PackageService) GetCachedNodes(ctx *gin.Context) {
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetCachedNodesResp{PackageCachingInfo: resp}))
ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetCachedStoragesResp{PackageCachingInfo: resp}))
}

// GetLoadedNodes 处理获取包的加载节点的HTTP请求。
func (s *PackageService) GetLoadedNodes(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetLoadedNodes")
// GetLoadedStorages 处理获取包的加载节点的HTTP请求。
func (s *PackageService) GetLoadedStorages(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.GetLoadedStorages")

var req cdsapi.PackageGetLoadedNodesReq
var req cdsapi.PackageGetLoadedStoragesReq
if err := ctx.ShouldBindQuery(&req); err != nil {
log.Warnf("binding query: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

nodeIDs, err := s.svc.PackageSvc().GetLoadedNodes(req.UserID, req.PackageID)
stgIDs, err := s.svc.PackageSvc().GetLoadedStorages(req.UserID, req.PackageID)
if err != nil {
log.Warnf("get package loaded nodes failed: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "get package loaded nodes failed"))
return
}

ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetLoadedNodesResp{
NodeIDs: nodeIDs,
ctx.JSON(http.StatusOK, OK(cdsapi.PackageGetLoadedStoragesResp{
StorageIDs: stgIDs,
}))
}



+ 3
- 3
client/internal/http/server.go View File

@@ -41,7 +41,7 @@ func (s *Server) Serve() error {
func (s *Server) initRouters() {
rt := s.engine.Use()

initTemp(rt, s)
// initTemp(rt, s)

rt.GET(cdsapi.ObjectDownloadPath, s.Object().Download)
rt.POST(cdsapi.ObjectUploadPath, s.Object().Upload)
@@ -55,8 +55,8 @@ func (s *Server) initRouters() {
rt.POST(cdsapi.PackageCreatePath, s.Package().Create)
rt.POST(cdsapi.PackageDeletePath, s.Package().Delete)
rt.GET(cdsapi.PackageListBucketPackagesPath, s.Package().ListBucketPackages)
rt.GET(cdsapi.PackageGetCachedNodesPath, s.Package().GetCachedNodes)
rt.GET(cdsapi.PackageGetLoadedNodesPath, s.Package().GetLoadedNodes)
rt.GET(cdsapi.PackageGetCachedStoragesPath, s.Package().GetCachedStorages)
rt.GET(cdsapi.PackageGetLoadedStoragesPath, s.Package().GetLoadedStorages)

rt.POST(cdsapi.StorageLoadPackagePath, s.Storage().LoadPackage)
rt.POST(cdsapi.StorageCreatePackagePath, s.Storage().CreatePackage)


+ 13
- 11
client/internal/http/temp.go View File

@@ -1,5 +1,6 @@
package http

/*
import (
"net/http"

@@ -132,7 +133,7 @@ func (s *TempService) GetObjectDetail(ctx *gin.Context) {
var allNodeIDs []cdssdk.NodeID
allNodeIDs = append(allNodeIDs, details.PinnedAt...)
for _, b := range details.Blocks {
allNodeIDs = append(allNodeIDs, b.NodeID)
allNodeIDs = append(allNodeIDs, b.StorageID)
}
allNodeIDs = append(allNodeIDs, loadedNodeIDs...)

@@ -165,23 +166,23 @@ func (s *TempService) GetObjectDetail(ctx *gin.Context) {
switch details.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
for _, blk := range details.Blocks {
if !lo.Contains(details.PinnedAt, blk.NodeID) {
if !lo.Contains(details.PinnedAt, blk.StorageID) {
blocks = append(blocks, ObjectBlockDetail{
Type: "Rep",
FileHash: blk.FileHash,
LocationType: "Agent",
LocationName: allNodes[blk.NodeID].Name,
LocationName: allNodes[blk.StorageID].Name,
})
}
}
case *cdssdk.RepRedundancy:
for _, blk := range details.Blocks {
if !lo.Contains(details.PinnedAt, blk.NodeID) {
if !lo.Contains(details.PinnedAt, blk.StorageID) {
blocks = append(blocks, ObjectBlockDetail{
Type: "Rep",
FileHash: blk.FileHash,
LocationType: "Agent",
LocationName: allNodes[blk.NodeID].Name,
LocationName: allNodes[blk.StorageID].Name,
})
}
}
@@ -192,7 +193,7 @@ func (s *TempService) GetObjectDetail(ctx *gin.Context) {
Type: "Block",
FileHash: blk.FileHash,
LocationType: "Agent",
LocationName: allNodes[blk.NodeID].Name,
LocationName: allNodes[blk.StorageID].Name,
})
}
}
@@ -322,25 +323,25 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) {
switch obj.Object.Redundancy.(type) {
case *cdssdk.NoneRedundancy:
for _, blk := range obj.Blocks {
if !lo.Contains(obj.PinnedAt, blk.NodeID) {
if !lo.Contains(obj.PinnedAt, blk.StorageID) {
blocks = append(blocks, ObjectBlockDetail{
ObjectID: obj.Object.ObjectID,
Type: "Rep",
FileHash: blk.FileHash,
LocationType: "Agent",
LocationName: allNodes[blk.NodeID].Name,
LocationName: allNodes[blk.StorageID].Name,
})
}
}
case *cdssdk.RepRedundancy:
for _, blk := range obj.Blocks {
if !lo.Contains(obj.PinnedAt, blk.NodeID) {
if !lo.Contains(obj.PinnedAt, blk.StorageID) {
blocks = append(blocks, ObjectBlockDetail{
ObjectID: obj.Object.ObjectID,
Type: "Rep",
FileHash: blk.FileHash,
LocationType: "Agent",
LocationName: allNodes[blk.NodeID].Name,
LocationName: allNodes[blk.StorageID].Name,
})
}
}
@@ -352,7 +353,7 @@ func (s *TempService) GetDatabaseAll(ctx *gin.Context) {
Type: "Block",
FileHash: blk.FileHash,
LocationType: "Agent",
LocationName: allNodes[blk.NodeID].Name,
LocationName: allNodes[blk.StorageID].Name,
})
}
}
@@ -389,3 +390,4 @@ func auth(ctx *gin.Context) {
ctx.AbortWithStatus(http.StatusUnauthorized)
}
}
*/

+ 1
- 1
client/internal/services/bucket.go View File

@@ -5,7 +5,7 @@ import (

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)



+ 6
- 6
client/internal/services/cache.go View File

@@ -26,16 +26,16 @@ func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID c
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

getStg, err := coorCli.GetStorageDetail(coormq.ReqGetStorageDetail(stgID))
getStg, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{stgID}))
if err != nil {
return 0, "", fmt.Errorf("get storage detail: %w", err)
}

if getStg.Storage.Shard == nil {
if getStg.Storages[0].Shard == nil {
return 0, "", fmt.Errorf("shard storage is not enabled")
}

agentCli, err := stgglb.AgentMQPool.Acquire(getStg.Storage.Shard.MasterHub)
agentCli, err := stgglb.AgentMQPool.Acquire(getStg.Storages[0].MasterHub.NodeID)
if err != nil {
return 0, "", fmt.Errorf("new agent client: %w", err)
}
@@ -46,7 +46,7 @@ func (svc *CacheService) StartCacheMovePackage(userID cdssdk.UserID, packageID c
return 0, "", fmt.Errorf("start cache move package: %w", err)
}

return getStg.Storage.Shard.MasterHub, startResp.TaskID, nil
return getStg.Storages[0].MasterHub.NodeID, startResp.TaskID, nil
}

func (svc *CacheService) WaitCacheMovePackage(hubID cdssdk.NodeID, taskID string, waitTimeout time.Duration) (bool, error) {
@@ -72,14 +72,14 @@ func (svc *CacheService) WaitCacheMovePackage(hubID cdssdk.NodeID, taskID string
return true, nil
}

func (svc *CacheService) CacheRemovePackage(packageID cdssdk.PackageID, nodeID cdssdk.NodeID) error {
func (svc *CacheService) CacheRemovePackage(packageID cdssdk.PackageID, stgID cdssdk.StorageID) error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new agent client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

_, err = coorCli.CacheRemovePackage(coormq.ReqCacheRemoveMovedPackage(packageID, nodeID))
_, err = coorCli.CacheRemovePackage(coormq.ReqCacheRemoveMovedPackage(packageID, stgID))
if err != nil {
return fmt.Errorf("requesting to coordinator: %w", err)
}


+ 1
- 1
client/internal/services/object.go View File

@@ -9,7 +9,7 @@ import (
mytask "gitlink.org.cn/cloudream/storage/client/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/downloader"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"


+ 7
- 7
client/internal/services/package.go View File

@@ -116,21 +116,21 @@ func (svc *PackageService) GetCachedNodes(userID cdssdk.UserID, packageID cdssdk
defer stgglb.CoordinatorMQPool.Release(coorCli)

// 向协调器请求获取包的缓存节点信息
resp, err := coorCli.GetPackageCachedNodes(coormq.NewGetPackageCachedNodes(userID, packageID))
resp, err := coorCli.GetPackageCachedStorages(coormq.ReqGetPackageCachedStorages(userID, packageID))
if err != nil {
return cdssdk.PackageCachingInfo{}, fmt.Errorf("get package cached nodes: %w", err)
}

// 构造并返回缓存信息
tmp := cdssdk.PackageCachingInfo{
NodeInfos: resp.NodeInfos,
PackageSize: resp.PackageSize,
StorageInfos: resp.StorageInfos,
PackageSize: resp.PackageSize,
}
return tmp, nil
}

// GetLoadedNodes 获取指定包加载的节点列表
func (svc *PackageService) GetLoadedNodes(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]cdssdk.NodeID, error) {
// GetLoadedStorages 获取指定包加载的节点列表
func (svc *PackageService) GetLoadedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) ([]cdssdk.StorageID, error) {
// 从协调器MQ池中获取客户端
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
@@ -139,9 +139,9 @@ func (svc *PackageService) GetLoadedNodes(userID cdssdk.UserID, packageID cdssdk
defer stgglb.CoordinatorMQPool.Release(coorCli)

// 向协调器请求获取加载指定包的节点ID列表
resp, err := coorCli.GetPackageLoadedNodes(coormq.NewGetPackageLoadedNodes(userID, packageID))
resp, err := coorCli.GetPackageLoadedStorages(coormq.ReqGetPackageLoadedStorages(userID, packageID))
if err != nil {
return nil, fmt.Errorf("get package loaded nodes: %w", err)
}
return resp.NodeIDs, nil
return resp.StorageIDs, nil
}

+ 9
- 9
client/internal/services/storage.go View File

@@ -7,7 +7,7 @@ import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)
@@ -57,16 +57,16 @@ func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, package
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgResp, err := coorCli.GetStorageDetail(coormq.ReqGetStorageDetail(storageID))
stgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{storageID}))
if err != nil {
return 0, "", fmt.Errorf("getting storage info: %w", err)
}

if stgResp.Storage.Shard == nil {
if stgResp.Storages[0].Shard == nil {
return 0, "", fmt.Errorf("shard storage is not enabled")
}

agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storage.Shard.MasterHub)
agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.NodeID)
if err != nil {
return 0, "", fmt.Errorf("new agent client: %w", err)
}
@@ -77,7 +77,7 @@ func (svc *StorageService) StartStorageLoadPackage(userID cdssdk.UserID, package
return 0, "", fmt.Errorf("start storage load package: %w", err)
}

return stgResp.Storage.Shard.MasterHub, startResp.TaskID, nil
return stgResp.Storages[0].MasterHub.NodeID, startResp.TaskID, nil
}

type StorageLoadPackageResult struct {
@@ -128,16 +128,16 @@ func (svc *StorageService) StartStorageCreatePackage(userID cdssdk.UserID, bucke
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

stgResp, err := coorCli.GetStorageDetail(coormq.ReqGetStorageDetail(storageID))
stgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails([]cdssdk.StorageID{storageID}))
if err != nil {
return 0, "", fmt.Errorf("getting storage info: %w", err)
}

if stgResp.Storage.Shard == nil {
if stgResp.Storages[0].Shard == nil {
return 0, "", fmt.Errorf("shard storage is not enabled")
}

agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storage.Shard.MasterHub)
agentCli, err := stgglb.AgentMQPool.Acquire(stgResp.Storages[0].MasterHub.NodeID)
if err != nil {
return 0, "", fmt.Errorf("new agent client: %w", err)
}
@@ -148,7 +148,7 @@ func (svc *StorageService) StartStorageCreatePackage(userID cdssdk.UserID, bucke
return 0, "", fmt.Errorf("start storage upload package: %w", err)
}

return stgResp.Storage.Shard.MasterHub, startResp.TaskID, nil
return stgResp.Storages[0].MasterHub.NodeID, startResp.TaskID, nil
}

func (svc *StorageService) WaitStorageCreatePackage(nodeID cdssdk.NodeID, taskID string, waitTimeout time.Duration) (bool, cdssdk.PackageID, error) {


+ 3
- 3
common/globals/utils.go View File

@@ -3,10 +3,10 @@ package stgglb
import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

// 根据当前节点与目标地址的距离关系,选择合适的地址
func SelectGRPCAddress(node *cdssdk.Node) (string, int) {
func SelectGRPCAddress(node cdssdk.Node, addr cdssdk.GRPCAddressInfo) (string, int) {
if Local != nil && Local.LocationID == node.LocationID {
return node.LocalIP, node.LocalGRPCPort
return addr.LocalIP, addr.LocalGRPCPort
}

return node.ExternalIP, node.ExternalGRPCPort
return addr.ExternalIP, addr.ExternalGRPCPort
}

+ 25
- 24
common/models/models.go View File

@@ -7,19 +7,19 @@ import (
)

type ObjectBlock struct {
ObjectID cdssdk.ObjectID `db:"ObjectID" json:"objectID"`
Index int `db:"Index" json:"index"`
NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"` // 这个块应该在哪个节点上
FileHash string `db:"FileHash" json:"fileHash"`
ObjectID cdssdk.ObjectID `gorm:"colunm:ObjectID; primaryKey" json:"objectID"`
Index int `gorm:"colunm:Index; primaryKey" json:"index"`
StorageID cdssdk.StorageID `gorm:"colunm:StorageID; primaryKey" json:"storageID"` // 这个块应该在哪个节点上
FileHash cdssdk.FileHash `gorm:"colunm:FileHash" json:"fileHash"`
}

type ObjectDetail struct {
Object cdssdk.Object `json:"object"`
PinnedAt []cdssdk.NodeID `json:"pinnedAt"`
Blocks []ObjectBlock `json:"blocks"`
Object cdssdk.Object `json:"object"`
PinnedAt []cdssdk.StorageID `json:"pinnedAt"`
Blocks []ObjectBlock `json:"blocks"`
}

func NewObjectDetail(object cdssdk.Object, pinnedAt []cdssdk.NodeID, blocks []ObjectBlock) ObjectDetail {
func NewObjectDetail(object cdssdk.Object, pinnedAt []cdssdk.StorageID, blocks []ObjectBlock) ObjectDetail {
return ObjectDetail{
Object: object,
PinnedAt: pinnedAt,
@@ -70,10 +70,10 @@ func DetailsFillPinnedAt(objs []ObjectDetail, pinnedAt []cdssdk.PinnedObject) {
}

type GrouppedObjectBlock struct {
ObjectID cdssdk.ObjectID
Index int
FileHash string
NodeIDs []cdssdk.NodeID
ObjectID cdssdk.ObjectID
Index int
FileHash cdssdk.FileHash
StorageIDs []cdssdk.StorageID
}

func (o *ObjectDetail) GroupBlocks() []GrouppedObjectBlock {
@@ -87,7 +87,7 @@ func (o *ObjectDetail) GroupBlocks() []GrouppedObjectBlock {
FileHash: block.FileHash,
}
}
grp.NodeIDs = append(grp.NodeIDs, block.NodeID)
grp.StorageIDs = append(grp.StorageIDs, block.StorageID)
grps[block.Index] = grp
}

@@ -102,21 +102,22 @@ type LocalMachineInfo struct {
}

type PackageAccessStat struct {
PackageID cdssdk.PackageID `db:"PackageID" json:"packageID"`
NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"`
Amount float64 `db:"Amount" json:"Amount"` // 前一日的读取量的滑动平均值
Counter float64 `db:"Counter" json:"counter"` // 当日的读取量
PackageID cdssdk.PackageID `gorm:"colunm:PackageID; primaryKey" json:"packageID"`
StorageID cdssdk.StorageID `gorm:"colunm:StorageID; primaryKey" json:"storageID"`
Amount float64 `gorm:"colunm:Amount" json:"amount"` // 前一日的读取量的滑动平均值
Counter float64 `gorm:"colunm:Counter" json:"counter"` // 当日的读取量
}

type ObjectAccessStat struct {
ObjectID cdssdk.ObjectID `db:"ObjectID" json:"objectID"`
NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"`
Amount float64 `db:"Amount" json:"Amount"` // 前一日的读取量的滑动平均值
Counter float64 `db:"Counter" json:"counter"` // 当日的读取量
ObjectID cdssdk.ObjectID `gorm:"colunm:ObjectID; primaryKey" json:"objectID"`
StorageID cdssdk.StorageID `gorm:"colunm:StorageID; primaryKey" json:"storageID"`
Amount float64 `gorm:"colunm:Amount" json:"amount"` // 前一日的读取量的滑动平均值
Counter float64 `gorm:"colunm:Counter" json:"counter"` // 当日的读取量
}

type StorageDetail struct {
Storage cdssdk.Storage `json:"storage"`
Shard *cdssdk.ShardStorage `json:"shard"`
Shared *cdssdk.SharedStorage `json:"shared"`
Storage cdssdk.Storage `json:"storage"`
MasterHub *cdssdk.Node `json:"masterHub"`
Shard *cdssdk.ShardStorage `json:"shard"`
Shared *cdssdk.SharedStorage `json:"shared"`
}

+ 0
- 110
common/pkgs/cmd/download_package.go View File

@@ -1,110 +0,0 @@
package cmd

import (
"fmt"
"io"
"os"
"path/filepath"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

"gitlink.org.cn/cloudream/common/pkgs/distlock"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

// 下载包结构体,存储用户ID、包ID和输出路径。
type DownloadPackage struct {
userID cdssdk.UserID
packageID cdssdk.PackageID
outputPath string
}

// 下载包执行上下文,包含分布式锁服务。
type DownloadPackageContext struct {
Distlock *distlock.Service
}

// 新建一个下载包实例。
// userID: 用户标识。
// packageID: 包标识。
// outputPath: 输出路径。
func NewDownloadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, outputPath string) *DownloadPackage {
return &DownloadPackage{
userID: userID,
packageID: packageID,
outputPath: outputPath,
}
}

// 执行下载包操作。
// ctx: 下载包执行上下文。
// 返回值: 执行过程中可能出现的错误。
func (t *DownloadPackage) Execute(ctx *DownloadPackageContext) error {
// 获取协调器MQ客户端
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli) // 确保释放客户端资源

// 获取包内对象详情
getObjectDetails, err := coorCli.GetPackageObjectDetails(coormq.NewGetPackageObjectDetails(t.packageID))
if err != nil {
return fmt.Errorf("getting package object details: %w", err)
}

// 创建下载对象迭代器
objIter := iterator.NewDownloadObjectIterator(getObjectDetails.Objects, &iterator.DownloadContext{
Distlock: ctx.Distlock,
})
defer objIter.Close() // 确保迭代器关闭

// 写入对象数据到本地
return t.writeObjects(objIter)
}

// 将下载的对象写入本地文件系统。
// objIter: 下载中的对象迭代器。
// 返回值: 写入过程中可能出现的错误。
func (t *DownloadPackage) writeObjects(objIter iterator.DownloadingObjectIterator) error {
for {
objInfo, err := objIter.MoveNext()
if err == iterator.ErrNoMoreItem {
break // 没有更多对象时结束循环
}
if err != nil {
return err
}

err = func() error {
defer objInfo.File.Close() // 确保文件资源被释放

fullPath := filepath.Join(t.outputPath, objInfo.Object.Path) // 计算文件完整路径

dirPath := filepath.Dir(fullPath) // 获取文件所在目录路径
if err := os.MkdirAll(dirPath, 0755); err != nil { // 创建目录,如果不存在
return fmt.Errorf("creating object dir: %w", err)
}

outputFile, err := os.Create(fullPath) // 创建本地文件
if err != nil {
return fmt.Errorf("creating object file: %w", err)
}
defer outputFile.Close() // 确保文件关闭

_, err = io.Copy(outputFile, objInfo.File) // 将对象数据写入本地文件
if err != nil {
return fmt.Errorf("copy object data to local file failed, err: %w", err)
}

return nil
}()
if err != nil {
return err // 如果写入过程中出现错误,返回该错误
}
}

return nil // 没有错误,返回nil
}

+ 31
- 31
common/pkgs/cmd/upload_objects.go View File

@@ -16,9 +16,11 @@ import (
"gitlink.org.cn/cloudream/common/utils/sort2"

stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/ops2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
@@ -41,8 +43,8 @@ type ObjectUploadResult struct {
Object cdssdk.Object
}

type UploadNodeInfo struct {
Node cdssdk.Node
type UploadStorageInfo struct {
Storage stgmod.StorageDetail
Delay time.Duration
IsSameLocation bool
}
@@ -69,42 +71,40 @@ func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult
return nil, fmt.Errorf("new coordinator client: %w", err)
}

getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
getUserStgsResp, err := coorCli.GetUserStorageDetails(coormq.ReqGetUserStorageDetails(t.userID))
if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err)
}

cons := ctx.Connectivity.GetAll()
userNodes := lo.Map(getUserNodesResp.Nodes, func(node cdssdk.Node, index int) UploadNodeInfo {
var userStgs []UploadStorageInfo
for _, stg := range getUserStgsResp.Storages {
if stg.MasterHub == nil {
continue
}

delay := time.Duration(math.MaxInt64)

con, ok := cons[node.NodeID]
con, ok := cons[stg.MasterHub.NodeID]
if ok && con.Delay != nil {
delay = *con.Delay
}

return UploadNodeInfo{
Node: node,
userStgs = append(userStgs, UploadStorageInfo{
Storage: stg,
Delay: delay,
IsSameLocation: node.LocationID == stgglb.Local.LocationID,
}
})
if len(userNodes) == 0 {
IsSameLocation: stg.MasterHub.LocationID == stgglb.Local.LocationID,
})
}

if len(userStgs) == 0 {
return nil, fmt.Errorf("user no available nodes")
}

// 给上传节点的IPFS加锁
ipfsReqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if stgglb.Local.NodeID != nil {
ipfsReqBlder.IPFS().Buzy(*stgglb.Local.NodeID)
}
for _, node := range userNodes {
if stgglb.Local.NodeID != nil && node.Node.NodeID == *stgglb.Local.NodeID {
continue
}

ipfsReqBlder.IPFS().Buzy(node.Node.NodeID)
for _, us := range userStgs {
ipfsReqBlder.Shard().Buzy(us.Storage.Storage.StorageID)
}
// TODO 考虑加Object的Create锁
// 防止上传的副本被清除
@@ -114,7 +114,7 @@ func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult
}
defer ipfsMutex.Unlock()

rets, err := uploadAndUpdatePackage(t.packageID, t.objectIter, userNodes, t.nodeAffinity)
rets, err := uploadAndUpdatePackage(t.packageID, t.objectIter, userStgs, t.nodeAffinity)
if err != nil {
return nil, err
}
@@ -128,26 +128,26 @@ func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult
// 1. 选择设置了亲和性的节点
// 2. 从与当前客户端相同地域的节点中随机选一个
// 3. 没有的话从所有节点选择延迟最低的节点
func chooseUploadNode(nodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) UploadNodeInfo {
func chooseUploadNode(nodes []UploadStorageInfo, nodeAffinity *cdssdk.NodeID) UploadStorageInfo {
if nodeAffinity != nil {
aff, ok := lo.Find(nodes, func(node UploadNodeInfo) bool { return node.Node.NodeID == *nodeAffinity })
aff, ok := lo.Find(nodes, func(node UploadStorageInfo) bool { return node.Storage.MasterHub.NodeID == *nodeAffinity })
if ok {
return aff
}
}

sameLocationNodes := lo.Filter(nodes, func(e UploadNodeInfo, i int) bool { return e.IsSameLocation })
sameLocationNodes := lo.Filter(nodes, func(e UploadStorageInfo, i int) bool { return e.IsSameLocation })
if len(sameLocationNodes) > 0 {
return sameLocationNodes[rand.Intn(len(sameLocationNodes))]
}

// 选择延迟最低的节点
nodes = sort2.Sort(nodes, func(e1, e2 UploadNodeInfo) int { return sort2.Cmp(e1.Delay, e2.Delay) })
nodes = sort2.Sort(nodes, func(e1, e2 UploadStorageInfo) int { return sort2.Cmp(e1.Delay, e2.Delay) })

return nodes[0]
}

func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, userNodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) ([]ObjectUploadResult, error) {
func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, userNodes []UploadStorageInfo, nodeAffinity *cdssdk.NodeID) ([]ObjectUploadResult, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
@@ -182,7 +182,7 @@ func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.Uplo
Error: err,
})

adds = append(adds, coormq.NewAddObjectEntry(objInfo.Path, objInfo.Size, fileHash, uploadTime, uploadNode.Node.NodeID))
adds = append(adds, coormq.NewAddObjectEntry(objInfo.Path, objInfo.Size, fileHash, uploadTime, uploadNode.Storage.Storage.StorageID))
return nil
}()
if err != nil {
@@ -213,10 +213,10 @@ func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.Uplo
return uploadRets, nil
}

func uploadFile(file io.Reader, uploadNode UploadNodeInfo) (string, error) {
func uploadFile(file io.Reader, uploadStg UploadStorageInfo) (cdssdk.FileHash, error) {
ft := ioswitch2.NewFromTo()
fromExec, hd := ioswitch2.NewFromDriver(-1)
ft.AddFrom(fromExec).AddTo(ioswitch2.NewToNode(uploadNode.Node, -1, "fileHash"))
ft.AddFrom(fromExec).AddTo(ioswitch2.NewToShardStore(*uploadStg.Storage.MasterHub, uploadStg.Storage.Storage, -1, "fileHash"))

parser := parser.NewParser(cdssdk.DefaultECRedundancy)
plans := exec.NewPlanBuilder()
@@ -235,5 +235,5 @@ func uploadFile(file io.Reader, uploadNode UploadNodeInfo) (string, error) {
return "", err
}

return ret["fileHash"].(string), nil
return ret["fileHash"].(*ops2.FileHashValue).Hash, nil
}

+ 21
- 5
common/pkgs/connectivity/collector.go View File

@@ -172,11 +172,27 @@ func (r *Collector) testing() {
func (r *Collector) ping(node cdssdk.Node) Connectivity {
log := logger.WithType[Collector]("").WithField("NodeID", node.NodeID)

ip := node.ExternalIP
port := node.ExternalGRPCPort
if node.LocationID == stgglb.Local.LocationID {
ip = node.LocalIP
port = node.LocalGRPCPort
var ip string
var port int
switch addr := node.Address.(type) {
case *cdssdk.GRPCAddressInfo:
if node.LocationID == stgglb.Local.LocationID {
ip = addr.LocalIP
port = addr.LocalGRPCPort
} else {
ip = addr.ExternalIP
port = addr.ExternalGRPCPort
}
default:
// TODO 增加对HTTP模式的agent的支持

log.Warnf("unsupported address type: %v", addr)

return Connectivity{
ToNodeID: node.NodeID,
Delay: nil,
TestTime: time.Now(),
}
}

agtCli, err := stgglb.AgentRPCPool.Acquire(ip, port)


+ 2
- 0
common/pkgs/db/bucket.go View File

@@ -1,5 +1,6 @@
package db

/*
import (
"database/sql"
"errors"
@@ -136,3 +137,4 @@ func (db *BucketDB) Delete(ctx SQLContext, bucketID cdssdk.BucketID) error {
}
return nil
}
*/

+ 2
- 0
common/pkgs/db/object.go View File

@@ -1,5 +1,6 @@
package db

/*
import (
"fmt"
"strings"
@@ -372,3 +373,4 @@ func (*ObjectDB) DeleteInPackage(ctx SQLContext, packageID cdssdk.PackageID) err
_, err := ctx.Exec("delete from Object where PackageID = ?", packageID)
return err
}
*/

+ 2
- 0
common/pkgs/db/package.go View File

@@ -1,5 +1,6 @@
package db

/*
import (
"database/sql"
"errors"
@@ -203,3 +204,4 @@ func (*PackageDB) ChangeState(ctx SQLContext, packageID cdssdk.PackageID, state
_, err := ctx.Exec("update Package set State = ? where PackageID = ?", state, packageID)
return err
}
*/

+ 2
- 1
common/pkgs/db2/bucket.go View File

@@ -3,10 +3,11 @@ package db2
import (
"errors"
"fmt"

"gorm.io/gorm"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type BucketDB struct {


+ 36
- 45
common/pkgs/db2/cache.go View File

@@ -4,7 +4,8 @@ import (
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gorm.io/gorm/clause"
)

type CacheDB struct {
@@ -15,9 +16,9 @@ func (db *DB) Cache() *CacheDB {
return &CacheDB{DB: db}
}

func (*CacheDB) Get(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID) (model.Cache, error) {
func (*CacheDB) Get(ctx SQLContext, fileHash cdssdk.FileHash, stgID cdssdk.StorageID) (model.Cache, error) {
var ret model.Cache
err := ctx.Table("Cache").Where("FileHash = ? AND NodeID = ?", fileHash, nodeID).First(&ret).Error
err := ctx.Table("Cache").Where("FileHash = ? AND StorageID = ?", fileHash, stgID).First(&ret).Error
return ret, err
}

@@ -27,15 +28,15 @@ func (*CacheDB) BatchGetAllFileHashes(ctx SQLContext, start int, count int) ([]s
return ret, err
}

func (*CacheDB) GetByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]model.Cache, error) {
func (*CacheDB) GetByStorageID(ctx SQLContext, stgID cdssdk.StorageID) ([]model.Cache, error) {
var ret []model.Cache
err := ctx.Table("Cache").Where("NodeID = ?", nodeID).Find(&ret).Error
err := ctx.Table("Cache").Where("StorageID = ?", stgID).Find(&ret).Error
return ret, err
}

// Create 创建一条缓存记录,如果已有则不进行操作
func (*CacheDB) Create(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID, priority int) error {
cache := model.Cache{FileHash: fileHash, NodeID: nodeID, CreateTime: time.Now(), Priority: priority}
func (*CacheDB) Create(ctx SQLContext, fileHash cdssdk.FileHash, stgID cdssdk.StorageID, priority int) error {
cache := model.Cache{FileHash: fileHash, StorageID: stgID, CreateTime: time.Now(), Priority: priority}
return ctx.Where(cache).Attrs(cache).FirstOrCreate(&cache).Error
}

@@ -44,17 +45,14 @@ func (*CacheDB) BatchCreate(ctx SQLContext, caches []model.Cache) error {
if len(caches) == 0 {
return nil
}
return BatchNamedExec(
ctx,
"insert into Cache(FileHash,NodeID,CreateTime,Priority) values(:FileHash,:NodeID,:CreateTime,:Priority)"+
" on duplicate key update CreateTime=values(CreateTime), Priority=values(Priority)",
4,
caches,
nil,
)

return ctx.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "FileHash"}, {Name: "StorageID"}},
DoUpdates: clause.AssignmentColumns([]string{"CreateTime", "Priority"}),
}).Create(&caches).Error
}

func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeID cdssdk.NodeID, priority int) error {
func (db *CacheDB) BatchCreateOnSameStorage(ctx SQLContext, fileHashes []cdssdk.FileHash, stgID cdssdk.StorageID, priority int) error {
if len(fileHashes) == 0 {
return nil
}
@@ -64,51 +62,44 @@ func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeI
for _, hash := range fileHashes {
caches = append(caches, model.Cache{
FileHash: hash,
NodeID: nodeID,
StorageID: stgID,
CreateTime: nowTime,
Priority: priority,
})
}

return BatchNamedExec(ctx,
"insert into Cache(FileHash,NodeID,CreateTime,Priority) values(:FileHash,:NodeID,:CreateTime,:Priority)"+
" on duplicate key update CreateTime=values(CreateTime), Priority=values(Priority)",
4,
caches,
nil,
)
return db.BatchCreate(ctx, caches)
}

func (*CacheDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error {
func (*CacheDB) StorageBatchDelete(ctx SQLContext, stgID cdssdk.StorageID, fileHashes []cdssdk.FileHash) error {
if len(fileHashes) == 0 {
return nil
}

return ctx.Table("Cache").Where("NodeID = ? AND FileHash IN (?)", nodeID, fileHashes).Delete(&model.Cache{}).Error
return ctx.Table("Cache").Where("StorageID = ? AND FileHash IN (?)", stgID, fileHashes).Delete(&model.Cache{}).Error
}

// GetCachingFileNodes 查找缓存了指定文件的节点
func (*CacheDB) GetCachingFileNodes(ctx SQLContext, fileHash string) ([]cdssdk.Node, error) {
var nodes []cdssdk.Node
err := ctx.Table("Cache").Select("Node.*").
Joins("JOIN Node ON Cache.NodeID = Node.NodeID").
// GetCachingFileStorages 查找缓存了指定文件的存储服务
func (*CacheDB) GetCachingFileStorages(ctx SQLContext, fileHash cdssdk.FileHash) ([]cdssdk.Storage, error) {
var stgs []cdssdk.Storage
err := ctx.Table("Cache").Select("Storage.*").
Joins("JOIN Storage ON Cache.StorageID = Storage.StorageID").
Where("Cache.FileHash = ?", fileHash).
Find(&nodes).Error
return nodes, err
Find(&stgs).Error
return stgs, err
}

// DeleteNodeAll 删除一个节点所有的记录
func (*CacheDB) DeleteNodeAll(ctx SQLContext, nodeID cdssdk.NodeID) error {
return ctx.Where("NodeID = ?", nodeID).Delete(&model.Cache{}).Error
// DeleteStorageAll 删除一个存储服务所有的记录
func (*CacheDB) DeleteStorageAll(ctx SQLContext, StorageID cdssdk.StorageID) error {
return ctx.Where("StorageID = ?", StorageID).Delete(&model.Cache{}).Error
}

// FindCachingFileUserNodes 在缓存表中查询指定数据所在的节点
func (*CacheDB) FindCachingFileUserNodes(ctx SQLContext, userID cdssdk.NodeID, fileHash string) ([]cdssdk.Node, error) {
var nodes []cdssdk.Node
err := ctx.Table("Cache").Select("Node.*").
Joins("JOIN UserNode ON Cache.NodeID = UserNode.NodeID").
Joins("JOIN Node ON UserNode.NodeID = Node.NodeID").
Where("Cache.FileHash = ? AND UserNode.UserID = ?", fileHash, userID).
Find(&nodes).Error
return nodes, err
// FindCachingFileUserStorages 在缓存表中查询指定数据所在的节点
func (*CacheDB) FindCachingFileUserStorages(ctx SQLContext, userID cdssdk.UserID, fileHash string) ([]cdssdk.Storage, error) {
var stgs []cdssdk.Storage
err := ctx.Table("Cache").Select("Storage.*").
Joins("JOIN UserStorage ON Cache.StorageID = UserStorage.StorageID").
Where("Cache.FileHash = ? AND UserStorage.UserID = ?", fileHash, userID).
Find(&stgs).Error
return stgs, err
}

common/pkgs/db/config/config.go → common/pkgs/db2/config/config.go View File


+ 1
- 1
common/pkgs/db2/db2.go View File

@@ -3,7 +3,7 @@ package db2
import (
_ "github.com/go-sql-driver/mysql"
"github.com/sirupsen/logrus"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/config"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/config"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)


+ 2
- 1
common/pkgs/db2/location.go View File

@@ -2,7 +2,8 @@ package db2

import (
"fmt"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"

"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type LocationDB struct {


common/pkgs/db/model/model.go → common/pkgs/db2/model/model.go View File

@@ -14,23 +14,23 @@ import (
type Storage = cdssdk.Storage

type User struct {
UserID cdssdk.UserID `db:"UserID" json:"userID"`
Password string `db:"Password" json:"password"`
UserID cdssdk.UserID `gorm:"colunm:UserID" json:"userID"`
Password string `gorm:"colunm:Password" json:"password"`
}

type UserBucket struct {
UserID cdssdk.UserID `db:"UserID" json:"userID"`
BucketID cdssdk.BucketID `db:"BucketID" json:"bucketID"`
UserID cdssdk.UserID `gorm:"column:UserID; primaryKey" json:"userID"`
BucketID cdssdk.BucketID `gorm:"column:BucketID; primaryKey" json:"bucketID"`
}

type UserNode struct {
UserID cdssdk.UserID `db:"UserID" json:"userID"`
NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"`
UserID cdssdk.UserID `gorm:"column:UserID; primaryKey" json:"userID"`
NodeID cdssdk.NodeID `gorm:"column:NodeID; primaryKey" json:"nodeID"`
}

type UserStorage struct {
UserID cdssdk.UserID `db:"UserID" json:"userID"`
StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"`
UserID cdssdk.UserID `gorm:"column:UserID; primaryKey" json:"userID"`
StorageID cdssdk.StorageID `gorm:"column:StorageID; primaryKey" json:"storageID"`
}

type Bucket = cdssdk.Bucket
@@ -76,10 +76,10 @@ func (o *RedundancyWarpper) Scan(src interface{}) error {
type ObjectBlock = stgmod.ObjectBlock

type Cache struct {
FileHash string `db:"FileHash" json:"fileHash"`
NodeID cdssdk.NodeID `db:"NodeID" json:"nodeID"`
CreateTime time.Time `db:"CreateTime" json:"createTime"`
Priority int `db:"Priority" json:"priority"`
FileHash cdssdk.FileHash `gorm:"colunm:FileHash; primaryKey" json:"fileHash"`
StorageID cdssdk.StorageID `gorm:"colunm:StorageID; primaryKey" json:"storageID"`
CreateTime time.Time `gorm:"colunm:CreateTime" json:"createTime"`
Priority int `gorm:"colunm:Priority" json:"priority"`
}

const (
@@ -90,13 +90,13 @@ const (

// Storage当前加载的Package
type StoragePackage struct {
StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"`
PackageID cdssdk.PackageID `db:"PackageID" json:"packageID"`
UserID cdssdk.UserID `db:"UserID" json:"userID"`
State string `db:"State" json:"state"`
StorageID cdssdk.StorageID `gorm:"column:StorageID; primaryKey" json:"storageID"`
PackageID cdssdk.PackageID `gorm:"column:PackageID; primaryKey" json:"packageID"`
UserID cdssdk.UserID `gorm:"column:UserID; primaryKey" json:"userID"`
State string `gorm:"column:State" json:"state"`
}

type Location struct {
LocationID cdssdk.LocationID `db:"LocationID" json:"locationID"`
Name string `db:"Name" json:"name"`
LocationID cdssdk.LocationID `gorm:"colunm:LocationID; primaryKey; autoIncrement" json:"locationID"`
Name string `gorm:"colunm:Name" json:"name"`
}

+ 7
- 0
common/pkgs/db2/node.go View File

@@ -28,6 +28,13 @@ func (*NodeDB) GetByID(ctx SQLContext, nodeID cdssdk.NodeID) (cdssdk.Node, error
return ret, err
}

func (*NodeDB) BatchGetByID(ctx SQLContext, nodeIDs []cdssdk.NodeID) ([]cdssdk.Node, error) {
var ret []cdssdk.Node
err := ctx.Table("node").Where("NodeID IN (?)", nodeIDs).Find(&ret).Error

return ret, err
}

// GetUserNodes 根据用户id查询可用node
func (*NodeDB) GetUserNodes(ctx SQLContext, userID cdssdk.UserID) ([]cdssdk.Node, error) {
var nodes []cdssdk.Node


+ 1
- 1
common/pkgs/db2/node_connectivity.go View File

@@ -2,7 +2,7 @@ package db2

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gorm.io/gorm/clause"
)



+ 21
- 24
common/pkgs/db2/object.go View File

@@ -2,14 +2,16 @@ package db2

import (
"fmt"
"gitlink.org.cn/cloudream/common/utils/sort2"
"strings"
"time"

"gitlink.org.cn/cloudream/common/utils/sort2"
"gorm.io/gorm/clause"

"github.com/samber/lo"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

@@ -101,13 +103,13 @@ func (db *ObjectDB) BatchUpert(ctx SQLContext, objs []cdssdk.Object) error {
}

func (db *ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Object, error) {
var ret []model.TempObject
var ret []cdssdk.Object
err := ctx.Table("Object").Where("PackageID = ?", packageID).Order("ObjectID ASC").Find(&ret).Error
return lo.Map(ret, func(o model.TempObject, idx int) model.Object { return o.ToObject() }), err
return ret, err
}

func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID cdssdk.PackageID) ([]stgmod.ObjectDetail, error) {
var objs []model.TempObject
var objs []cdssdk.Object
err := ctx.Table("Object").Where("PackageID = ?", packageID).Order("ObjectID ASC").Find(&objs).Error
if err != nil {
return nil, fmt.Errorf("getting objects: %w", err)
@@ -140,7 +142,7 @@ func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID cdssdk.Pac
details := make([]stgmod.ObjectDetail, len(objs))
for i, obj := range objs {
details[i] = stgmod.ObjectDetail{
Object: obj.ToObject(),
Object: obj,
}
}

@@ -149,18 +151,13 @@ func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID cdssdk.Pac
return details, nil
}

func (db *ObjectDB) GetObjectsIfAnyBlockOnNode(ctx SQLContext, nodeID cdssdk.NodeID) ([]cdssdk.Object, error) {
var temps []model.TempObject
err := ctx.Table("Object").Where("ObjectID IN (SELECT ObjectID FROM ObjectBlock WHERE NodeID = ?)", nodeID).Order("ObjectID ASC").Find(&temps).Error
func (db *ObjectDB) GetObjectsIfAnyBlockOnStorage(ctx SQLContext, stgID cdssdk.StorageID) ([]cdssdk.Object, error) {
var objs []cdssdk.Object
err := ctx.Table("Object").Where("ObjectID IN (SELECT ObjectID FROM ObjectBlock WHERE StorageID = ?)", stgID).Order("ObjectID ASC").Find(&objs).Error
if err != nil {
return nil, fmt.Errorf("getting objects: %w", err)
}

objs := make([]cdssdk.Object, len(temps))
for i := range temps {
objs[i] = temps[i].ToObject()
}

return objs, nil
}

@@ -224,10 +221,10 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []
objBlocks := make([]stgmod.ObjectBlock, len(adds))
for i, add := range adds {
objBlocks[i] = stgmod.ObjectBlock{
ObjectID: addedObjIDs[i],
Index: 0,
NodeID: add.NodeID,
FileHash: add.FileHash,
ObjectID: addedObjIDs[i],
Index: 0,
StorageID: add.StorageID,
FileHash: add.FileHash,
}
}
if err := ctx.Table("ObjectBlock").Create(&objBlocks).Error; err != nil {
@@ -239,7 +236,7 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []
for _, add := range adds {
caches = append(caches, model.Cache{
FileHash: add.FileHash,
NodeID: add.NodeID,
StorageID: add.StorageID,
CreateTime: time.Now(),
Priority: 0,
})
@@ -270,10 +267,10 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.Updating
}

// 目前只能使用这种方式来同时更新大量数据
err := BatchNamedExec(ctx,
"insert into Object(ObjectID, PackageID, Path, Size, FileHash, Redundancy, CreateTime, UpdateTime)"+
" values(:ObjectID, :PackageID, :Path, :Size, :FileHash, :Redundancy, :CreateTime, :UpdateTime) as new"+
" on duplicate key update Redundancy=new.Redundancy", 8, dummyObjs, nil)
err := ctx.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "ObjectID"}},
DoUpdates: clause.AssignmentColumns([]string{"Redundancy", "UpdateTime"})},
).Create(&dummyObjs).Error
if err != nil {
return fmt.Errorf("batch update object redundancy: %w", err)
}
@@ -304,7 +301,7 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.Updating
for _, blk := range obj.Blocks {
caches = append(caches, model.Cache{
FileHash: blk.FileHash,
NodeID: blk.NodeID,
StorageID: blk.StorageID,
CreateTime: time.Now(),
Priority: 0,
})


+ 4
- 4
common/pkgs/db2/object_access_stat.go View File

@@ -15,10 +15,10 @@ func (db *DB) ObjectAccessStat() *ObjectAccessStatDB {
return &ObjectAccessStatDB{db}
}

func (*ObjectAccessStatDB) Get(ctx SQLContext, objID cdssdk.ObjectID, nodeID cdssdk.NodeID) (stgmod.ObjectAccessStat, error) {
func (*ObjectAccessStatDB) Get(ctx SQLContext, objID cdssdk.ObjectID, stgID cdssdk.StorageID) (stgmod.ObjectAccessStat, error) {
var ret stgmod.ObjectAccessStat
err := ctx.Table("ObjectAccessStat").
Where("ObjectID = ? AND NodeID = ?", objID, nodeID).
Where("ObjectID = ? AND StorageID = ?", objID, stgID).
First(&ret).Error
return ret, err
}
@@ -43,14 +43,14 @@ func (*ObjectAccessStatDB) BatchGetByObjectID(ctx SQLContext, objIDs []cdssdk.Ob
return ret, err
}

func (*ObjectAccessStatDB) BatchGetByObjectIDOnNode(ctx SQLContext, objIDs []cdssdk.ObjectID, nodeID cdssdk.NodeID) ([]stgmod.ObjectAccessStat, error) {
func (*ObjectAccessStatDB) BatchGetByObjectIDOnStorage(ctx SQLContext, objIDs []cdssdk.ObjectID, stgID cdssdk.StorageID) ([]stgmod.ObjectAccessStat, error) {
if len(objIDs) == 0 {
return nil, nil
}

var ret []stgmod.ObjectAccessStat
err := ctx.Table("ObjectAccessStat").
Where("ObjectID IN ? AND NodeID = ?", objIDs, nodeID).
Where("ObjectID IN ? AND StorageID = ?", objIDs, stgID).
Find(&ret).Error
return ret, err
}


+ 6
- 6
common/pkgs/db2/object_block.go View File

@@ -16,9 +16,9 @@ func (db *DB) ObjectBlock() *ObjectBlockDB {
return &ObjectBlockDB{DB: db}
}

func (db *ObjectBlockDB) GetByNodeID(ctx SQLContext, nodeID cdssdk.NodeID) ([]stgmod.ObjectBlock, error) {
func (db *ObjectBlockDB) GetByStorageID(ctx SQLContext, stgID cdssdk.StorageID) ([]stgmod.ObjectBlock, error) {
var rets []stgmod.ObjectBlock
err := ctx.Table("ObjectBlock").Where("NodeID = ?", nodeID).Find(&rets).Error
err := ctx.Table("ObjectBlock").Where("StorageID = ?", stgID).Find(&rets).Error
return rets, err
}

@@ -32,8 +32,8 @@ func (db *ObjectBlockDB) BatchGetByObjectID(ctx SQLContext, objectIDs []cdssdk.O
return blocks, err
}

func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, nodeID cdssdk.NodeID, fileHash string) error {
block := stgmod.ObjectBlock{ObjectID: objectID, Index: index, NodeID: nodeID, FileHash: fileHash}
func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index int, stgID cdssdk.StorageID, fileHash cdssdk.FileHash) error {
block := stgmod.ObjectBlock{ObjectID: objectID, Index: index, StorageID: stgID, FileHash: fileHash}
return ctx.Table("ObjectBlock").Create(&block).Error
}

@@ -61,12 +61,12 @@ func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID cdssdk.Packag
return ctx.Table("ObjectBlock").Where("ObjectID IN (SELECT ObjectID FROM Object WHERE PackageID = ?)", packageID).Delete(&stgmod.ObjectBlock{}).Error
}

func (db *ObjectBlockDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error {
func (db *ObjectBlockDB) StorageBatchDelete(ctx SQLContext, stgID cdssdk.StorageID, fileHashes []cdssdk.FileHash) error {
if len(fileHashes) == 0 {
return nil
}

return ctx.Table("ObjectBlock").Where("NodeID = ? AND FileHash IN (?)", nodeID, fileHashes).Delete(&stgmod.ObjectBlock{}).Error
return ctx.Table("ObjectBlock").Where("StorageID = ? AND FileHash IN (?)", stgID, fileHashes).Delete(&stgmod.ObjectBlock{}).Error
}

func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (int, error) {


+ 2
- 1
common/pkgs/db2/package.go View File

@@ -3,10 +3,11 @@ package db2
import (
"errors"
"fmt"

"gorm.io/gorm"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type PackageDB struct {


+ 4
- 4
common/pkgs/db2/package_access_stat.go View File

@@ -14,9 +14,9 @@ func (db *DB) PackageAccessStat() *PackageAccessStatDB {
return &PackageAccessStatDB{db}
}

func (*PackageAccessStatDB) Get(ctx SQLContext, pkgID cdssdk.PackageID, nodeID cdssdk.NodeID) (stgmod.PackageAccessStat, error) {
func (*PackageAccessStatDB) Get(ctx SQLContext, pkgID cdssdk.PackageID, stgID cdssdk.StorageID) (stgmod.PackageAccessStat, error) {
var ret stgmod.PackageAccessStat
err := ctx.Table("PackageAccessStat").Where("PackageID = ? AND NodeID = ?", pkgID, nodeID).First(&ret).Error
err := ctx.Table("PackageAccessStat").Where("PackageID = ? AND StorageID = ?", pkgID, stgID).First(&ret).Error
return ret, err
}

@@ -41,8 +41,8 @@ func (*PackageAccessStatDB) BatchAddCounter(ctx SQLContext, entries []coormq.Add
return nil
}

sql := "INSERT INTO PackageAccessStat(PackageID, NodeID, Counter, Amount) " +
"VALUES(:PackageID, :NodeID, :Counter, 0) ON DUPLICATE KEY UPDATE Counter = Counter + VALUES(Counter)"
sql := "INSERT INTO PackageAccessStat(PackageID, StorageID, Counter, Amount) " +
"VALUES(:PackageID, :StorageID, :Counter, 0) ON DUPLICATE KEY UPDATE Counter = Counter + VALUES(Counter)"

return ctx.Exec(sql, entries).Error
}


+ 6
- 0
common/pkgs/db2/shard_storage.go View File

@@ -17,3 +17,9 @@ func (*ShardStorageDB) GetByStorageID(ctx SQLContext, stgID cdssdk.StorageID) (c
err := ctx.Table("ShardStorage").First(&ret, stgID).Error
return ret, err
}

func (*ShardStorageDB) BatchGetByStorageIDs(ctx SQLContext, stgIDs []cdssdk.StorageID) ([]cdssdk.ShardStorage, error) {
var ret []cdssdk.ShardStorage
err := ctx.Table("ShardStorage").Find(&ret, "StorageID IN (?)", stgIDs).Error
return ret, err
}

+ 6
- 0
common/pkgs/db2/shared_storage.go View File

@@ -17,3 +17,9 @@ func (*SharedStorageDB) GetByStorageID(ctx SQLContext, stgID cdssdk.StorageID) (
err := ctx.Table("SharedStorage").First(&ret, stgID).Error
return ret, err
}

func (*SharedStorageDB) BatchGetByStorageIDs(ctx SQLContext, stgIDs []cdssdk.StorageID) ([]cdssdk.SharedStorage, error) {
var ret []cdssdk.SharedStorage
err := ctx.Table("SharedStorage").Find(&ret, "StorageID IN (?)", stgIDs).Error
return ret, err
}

+ 21
- 5
common/pkgs/db2/storage.go View File

@@ -4,7 +4,7 @@ import (
"fmt"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StorageDB struct {
@@ -21,6 +21,26 @@ func (db *StorageDB) GetByID(ctx SQLContext, stgID cdssdk.StorageID) (model.Stor
return stg, err
}

func (StorageDB) GetAllIDs(ctx SQLContext) ([]cdssdk.StorageID, error) {
var stgs []cdssdk.StorageID
err := ctx.Table("Storage").Select("StorageID").Find(&stgs).Error
return stgs, err
}

func (db *StorageDB) BatchGetByID(ctx SQLContext, stgIDs []cdssdk.StorageID) ([]model.Storage, error) {
var stgs []model.Storage
err := ctx.Table("Storage").Find(&stgs, "StorageID IN (?)", stgIDs).Error
return stgs, err
}

func (db *StorageDB) GetUserStorages(ctx SQLContext, userID cdssdk.UserID) ([]model.Storage, error) {
var stgs []model.Storage
err := ctx.Table("Storage").Select("Storage.*").
Joins("inner join UserStorage on Storage.StorageID = UserStorage.StorageID").
Where("UserID = ?", userID).Find(&stgs).Error
return stgs, err
}

func (db *StorageDB) BatchGetAllStorageIDs(ctx SQLContext, start int, count int) ([]cdssdk.StorageID, error) {
var ret []cdssdk.StorageID
err := ctx.Table("Storage").Select("StorageID").Find(ret).Limit(count).Offset(start).Error
@@ -56,7 +76,3 @@ func (db *StorageDB) GetUserStorageByName(ctx SQLContext, userID cdssdk.UserID,

return stg, err
}

// func (db *StorageDB) ChangeState(ctx SQLContext, storageID cdssdk.StorageID, state string) error {
// return ctx.Table("Storage").Where("StorageID = ?", storageID).Update("State", state).Error
// }

+ 1
- 1
common/pkgs/db2/storage_package.go View File

@@ -2,7 +2,7 @@ package db2

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StoragePackageDB struct {


+ 1
- 1
common/pkgs/db2/user.go View File

@@ -2,7 +2,7 @@ package db2

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type UserDB struct {


+ 1
- 1
common/pkgs/db2/user_bucket.go View File

@@ -2,7 +2,7 @@ package db2

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type UserBucketDB struct {


+ 8
- 8
common/pkgs/distlock/reqbuilder/ipfs.go View File

@@ -8,31 +8,31 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/lockprovider"
)

type IPFSLockReqBuilder struct {
type ShardStoreLockReqBuilder struct {
*LockRequestBuilder
}

func (b *LockRequestBuilder) IPFS() *IPFSLockReqBuilder {
return &IPFSLockReqBuilder{LockRequestBuilder: b}
func (b *LockRequestBuilder) Shard() *ShardStoreLockReqBuilder {
return &ShardStoreLockReqBuilder{LockRequestBuilder: b}
}
func (b *IPFSLockReqBuilder) Buzy(nodeID cdssdk.NodeID) *IPFSLockReqBuilder {
func (b *ShardStoreLockReqBuilder) Buzy(stgID cdssdk.StorageID) *ShardStoreLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Path: b.makePath(stgID),
Name: lockprovider.IPFSBuzyLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *IPFSLockReqBuilder) GC(nodeID cdssdk.NodeID) *IPFSLockReqBuilder {
func (b *ShardStoreLockReqBuilder) GC(stgID cdssdk.StorageID) *ShardStoreLockReqBuilder {
b.locks = append(b.locks, distlock.Lock{
Path: b.makePath(nodeID),
Path: b.makePath(stgID),
Name: lockprovider.IPFSGCLock,
Target: *lockprovider.NewStringLockTarget(),
})
return b
}

func (b *IPFSLockReqBuilder) makePath(nodeID cdssdk.NodeID) []string {
func (b *ShardStoreLockReqBuilder) makePath(nodeID cdssdk.StorageID) []string {
return []string{lockprovider.IPFSLockPathPrefix, strconv.FormatInt(int64(nodeID), 10)}
}

+ 1
- 1
common/pkgs/downloader/downloader.go View File

@@ -117,7 +117,7 @@ func (d *Downloader) DownloadPackage(pkgID cdssdk.PackageID) DownloadIterator {

type ObjectECStrip struct {
Data []byte
ObjectFileHash string // 添加这条缓存时,Object的FileHash
ObjectFileHash cdssdk.FileHash // 添加这条缓存时,Object的FileHash
}

type ECStripKey struct {


+ 66
- 57
common/pkgs/downloader/iterator.go View File

@@ -28,8 +28,8 @@ import (
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

type DownloadNodeInfo struct {
Node cdssdk.Node
type downloadStorageInfo struct {
Storage stgmod.StorageDetail
ObjectPinned bool
Blocks []stgmod.ObjectBlock
Distance float64
@@ -46,8 +46,8 @@ type DownloadObjectIterator struct {
currentIndex int
inited bool

coorCli *coormq.Client
allNodes map[cdssdk.NodeID]cdssdk.Node
coorCli *coormq.Client
allStorages map[cdssdk.StorageID]stgmod.StorageDetail
}

func NewDownloadObjectIterator(downloader *Downloader, downloadObjs []downloadReqeust2) *DownloadObjectIterator {
@@ -82,29 +82,37 @@ func (i *DownloadObjectIterator) init() error {
}
i.coorCli = coorCli

allNodeIDs := make(map[cdssdk.NodeID]bool)
allStgIDsMp := make(map[cdssdk.StorageID]bool)
for _, obj := range i.reqs {
if obj.Detail == nil {
continue
}

for _, p := range obj.Detail.PinnedAt {
allNodeIDs[p] = true
allStgIDsMp[p] = true
}

for _, b := range obj.Detail.Blocks {
allNodeIDs[b.NodeID] = true
allStgIDsMp[b.StorageID] = true
}
}

getNodes, err := coorCli.GetNodes(coormq.NewGetNodes(lo.Keys(allNodeIDs)))
stgIDs := lo.Keys(allStgIDsMp)
getStgs, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(stgIDs))
if err != nil {
return fmt.Errorf("getting nodes: %w", err)
return fmt.Errorf("getting storage details: %w", err)
}

i.allNodes = make(map[cdssdk.NodeID]cdssdk.Node)
for _, n := range getNodes.Nodes {
i.allNodes[n.NodeID] = n
i.allStorages = make(map[cdssdk.StorageID]stgmod.StorageDetail)
for idx, s := range getStgs.Storages {
if s == nil {
return fmt.Errorf("storage %v not found", stgIDs[idx])
}
if s.Shard == nil {
return fmt.Errorf("storage %v has no shard store", stgIDs[idx])
}

i.allStorages[s.Storage.StorageID] = *s
}

return nil
@@ -180,35 +188,35 @@ func (i *DownloadObjectIterator) Close() {
}

func (iter *DownloadObjectIterator) downloadNoneOrRepObject(obj downloadReqeust2) (io.ReadCloser, error) {
allNodes, err := iter.sortDownloadNodes(obj)
allStgs, err := iter.sortDownloadStorages(obj)
if err != nil {
return nil, err
}

bsc, blocks := iter.getMinReadingBlockSolution(allNodes, 1)
osc, node := iter.getMinReadingObjectSolution(allNodes, 1)
bsc, blocks := iter.getMinReadingBlockSolution(allStgs, 1)
osc, stg := iter.getMinReadingObjectSolution(allStgs, 1)
if bsc < osc {
logger.Debugf("downloading object %v from node %v(%v)", obj.Raw.ObjectID, blocks[0].Node.Name, blocks[0].Node.NodeID)
return iter.downloadFromNode(&blocks[0].Node, obj)
logger.Debugf("downloading object %v from storage %v", obj.Raw.ObjectID, blocks[0].Storage.Storage)
return iter.downloadFromStorage(&blocks[0].Storage, obj)
}

if osc == math.MaxFloat64 {
// bsc >= osc,如果osc是MaxFloat64,那么bsc也一定是,也就意味着没有足够块来恢复文件
return nil, fmt.Errorf("no node has this object")
return nil, fmt.Errorf("no storage has this object")
}

logger.Debugf("downloading object %v from node %v(%v)", obj.Raw.ObjectID, node.Name, node.NodeID)
return iter.downloadFromNode(node, obj)
logger.Debugf("downloading object %v from storage %v(%v)", obj.Raw.ObjectID, stg)
return iter.downloadFromStorage(stg, obj)
}

func (iter *DownloadObjectIterator) downloadECObject(req downloadReqeust2, ecRed *cdssdk.ECRedundancy) (io.ReadCloser, error) {
allNodes, err := iter.sortDownloadNodes(req)
allNodes, err := iter.sortDownloadStorages(req)
if err != nil {
return nil, err
}

bsc, blocks := iter.getMinReadingBlockSolution(allNodes, ecRed.K)
osc, node := iter.getMinReadingObjectSolution(allNodes, ecRed.K)
osc, stg := iter.getMinReadingObjectSolution(allNodes, ecRed.K)

if bsc < osc {
var logStrs []any = []any{fmt.Sprintf("downloading ec object %v from blocks: ", req.Raw.ObjectID)}
@@ -216,7 +224,7 @@ func (iter *DownloadObjectIterator) downloadECObject(req downloadReqeust2, ecRed
if i > 0 {
logStrs = append(logStrs, ", ")
}
logStrs = append(logStrs, fmt.Sprintf("%v@%v(%v)", b.Block.Index, b.Node.Name, b.Node.NodeID))
logStrs = append(logStrs, fmt.Sprintf("%v@%v(%v)", b.Block.Index, b.Storage))
}
logger.Debug(logStrs...)

@@ -266,30 +274,30 @@ func (iter *DownloadObjectIterator) downloadECObject(req downloadReqeust2, ecRed
return nil, fmt.Errorf("no enough blocks to reconstruct the object %v , want %d, get only %d", req.Raw.ObjectID, ecRed.K, len(blocks))
}

logger.Debugf("downloading ec object %v from node %v(%v)", req.Raw.ObjectID, node.Name, node.NodeID)
return iter.downloadFromNode(node, req)
logger.Debugf("downloading ec object %v from storage %v(%v)", req.Raw.ObjectID, stg)
return iter.downloadFromStorage(stg, req)
}

func (iter *DownloadObjectIterator) sortDownloadNodes(req downloadReqeust2) ([]*DownloadNodeInfo, error) {
var nodeIDs []cdssdk.NodeID
func (iter *DownloadObjectIterator) sortDownloadStorages(req downloadReqeust2) ([]*downloadStorageInfo, error) {
var stgIDs []cdssdk.StorageID
for _, id := range req.Detail.PinnedAt {
if !lo.Contains(nodeIDs, id) {
nodeIDs = append(nodeIDs, id)
if !lo.Contains(stgIDs, id) {
stgIDs = append(stgIDs, id)
}
}
for _, b := range req.Detail.Blocks {
if !lo.Contains(nodeIDs, b.NodeID) {
nodeIDs = append(nodeIDs, b.NodeID)
if !lo.Contains(stgIDs, b.StorageID) {
stgIDs = append(stgIDs, b.StorageID)
}
}

downloadNodeMap := make(map[cdssdk.NodeID]*DownloadNodeInfo)
downloadNodeMap := make(map[cdssdk.StorageID]*downloadStorageInfo)
for _, id := range req.Detail.PinnedAt {
node, ok := downloadNodeMap[id]
if !ok {
mod := iter.allNodes[id]
node = &DownloadNodeInfo{
Node: mod,
mod := iter.allStorages[id]
node = &downloadStorageInfo{
Storage: mod,
ObjectPinned: true,
Distance: iter.getNodeDistance(mod),
}
@@ -300,34 +308,34 @@ func (iter *DownloadObjectIterator) sortDownloadNodes(req downloadReqeust2) ([]*
}

for _, b := range req.Detail.Blocks {
node, ok := downloadNodeMap[b.NodeID]
node, ok := downloadNodeMap[b.StorageID]
if !ok {
mod := iter.allNodes[b.NodeID]
node = &DownloadNodeInfo{
Node: mod,
mod := iter.allStorages[b.StorageID]
node = &downloadStorageInfo{
Storage: mod,
Distance: iter.getNodeDistance(mod),
}
downloadNodeMap[b.NodeID] = node
downloadNodeMap[b.StorageID] = node
}

node.Blocks = append(node.Blocks, b)
}

return sort2.Sort(lo.Values(downloadNodeMap), func(left, right *DownloadNodeInfo) int {
return sort2.Sort(lo.Values(downloadNodeMap), func(left, right *downloadStorageInfo) int {
return sort2.Cmp(left.Distance, right.Distance)
}), nil
}

func (iter *DownloadObjectIterator) getMinReadingBlockSolution(sortedNodes []*DownloadNodeInfo, k int) (float64, []downloadBlock) {
func (iter *DownloadObjectIterator) getMinReadingBlockSolution(sortedStgs []*downloadStorageInfo, k int) (float64, []downloadBlock) {
gotBlocksMap := bitmap.Bitmap64(0)
var gotBlocks []downloadBlock
dist := float64(0.0)
for _, n := range sortedNodes {
for _, n := range sortedStgs {
for _, b := range n.Blocks {
if !gotBlocksMap.Get(b.Index) {
gotBlocks = append(gotBlocks, downloadBlock{
Node: n.Node,
Block: b,
Storage: n.Storage,
Block: b,
})
gotBlocksMap.Set(b.Index, true)
dist += n.Distance
@@ -342,32 +350,32 @@ func (iter *DownloadObjectIterator) getMinReadingBlockSolution(sortedNodes []*Do
return math.MaxFloat64, gotBlocks
}

func (iter *DownloadObjectIterator) getMinReadingObjectSolution(sortedNodes []*DownloadNodeInfo, k int) (float64, *cdssdk.Node) {
func (iter *DownloadObjectIterator) getMinReadingObjectSolution(sortedStgs []*downloadStorageInfo, k int) (float64, *stgmod.StorageDetail) {
dist := math.MaxFloat64
var downloadNode *cdssdk.Node
for _, n := range sortedNodes {
var downloadStg *stgmod.StorageDetail
for _, n := range sortedStgs {
if n.ObjectPinned && float64(k)*n.Distance < dist {
dist = float64(k) * n.Distance
node := n.Node
downloadNode = &node
stg := n.Storage
downloadStg = &stg
}
}

return dist, downloadNode
return dist, downloadStg
}

func (iter *DownloadObjectIterator) getNodeDistance(node cdssdk.Node) float64 {
func (iter *DownloadObjectIterator) getNodeDistance(stg stgmod.StorageDetail) float64 {
if stgglb.Local.NodeID != nil {
if node.NodeID == *stgglb.Local.NodeID {
if stg.MasterHub.NodeID == *stgglb.Local.NodeID {
return consts.NodeDistanceSameNode
}
}

if node.LocationID == stgglb.Local.LocationID {
if stg.MasterHub.LocationID == stgglb.Local.LocationID {
return consts.NodeDistanceSameLocation
}

c := iter.downloader.conn.Get(node.NodeID)
c := iter.downloader.conn.Get(stg.MasterHub.NodeID)
if c == nil || c.Delay == nil || *c.Delay > time.Duration(float64(time.Millisecond)*iter.downloader.cfg.HighLatencyNodeMs) {
return consts.NodeDistanceHighLatencyNode
}
@@ -375,7 +383,7 @@ func (iter *DownloadObjectIterator) getNodeDistance(node cdssdk.Node) float64 {
return consts.NodeDistanceOther
}

func (iter *DownloadObjectIterator) downloadFromNode(node *cdssdk.Node, req downloadReqeust2) (io.ReadCloser, error) {
func (iter *DownloadObjectIterator) downloadFromStorage(stg *stgmod.StorageDetail, req downloadReqeust2) (io.ReadCloser, error) {
var strHandle *exec.DriverReadStream
ft := ioswitch2.NewFromTo()

@@ -387,7 +395,8 @@ func (iter *DownloadObjectIterator) downloadFromNode(node *cdssdk.Node, req down
len := req.Raw.Length
toExec.Range.Length = &len
}
ft.AddFrom(ioswitch2.NewFromNode(req.Detail.Object.FileHash, node, -1)).AddTo(toExec)
// TODO FileHash应该是FileHash类型
ft.AddFrom(ioswitch2.NewFromShardstore(req.Detail.Object.FileHash, *stg.MasterHub, stg.Storage, -1)).AddTo(toExec)
strHandle = handle

parser := parser.NewParser(cdssdk.DefaultECRedundancy)


+ 6
- 6
common/pkgs/downloader/lrc.go View File

@@ -12,21 +12,21 @@ import (
)

func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red *cdssdk.LRCRedundancy) (io.ReadCloser, error) {
allNodes, err := iter.sortDownloadNodes(req)
allStgs, err := iter.sortDownloadStorages(req)
if err != nil {
return nil, err
}

var blocks []downloadBlock
selectedBlkIdx := make(map[int]bool)
for _, node := range allNodes {
for _, b := range node.Blocks {
for _, stg := range allStgs {
for _, b := range stg.Blocks {
if b.Index >= red.M() || selectedBlkIdx[b.Index] {
continue
}
blocks = append(blocks, downloadBlock{
Node: node.Node,
Block: b,
Storage: stg.Storage,
Block: b,
})
selectedBlkIdx[b.Index] = true
}
@@ -40,7 +40,7 @@ func (iter *DownloadObjectIterator) downloadLRCObject(req downloadReqeust2, red
if i > 0 {
logStrs = append(logStrs, ", ")
}
logStrs = append(logStrs, fmt.Sprintf("%v@%v(%v)", b.Block.Index, b.Node.Name, b.Node.NodeID))
logStrs = append(logStrs, fmt.Sprintf("%v@%v", b.Block.Index, b.Storage))
}
logger.Debug(logStrs...)



+ 2
- 2
common/pkgs/downloader/lrc_strip_iterator.go View File

@@ -95,8 +95,8 @@ func (s *LRCStripIterator) Close() {
func (s *LRCStripIterator) downloading() {
var froms []ioswitchlrc.From
for _, b := range s.blocks {
node := b.Node
froms = append(froms, ioswitchlrc.NewFromNode(b.Block.FileHash, &node, b.Block.Index))
stg := b.Storage
froms = append(froms, ioswitchlrc.NewFromNode(b.Block.FileHash, *stg.MasterHub, stg.Storage, b.Block.Index))
}

toExec, hd := ioswitchlrc.NewToDriverWithRange(-1, exec.Range{


+ 4
- 4
common/pkgs/downloader/strip_iterator.go View File

@@ -15,8 +15,8 @@ import (
)

type downloadBlock struct {
Node cdssdk.Node
Block stgmod.ObjectBlock
Storage stgmod.StorageDetail
Block stgmod.ObjectBlock
}

type Strip struct {
@@ -194,8 +194,8 @@ func (s *StripIterator) readStrip(stripIndex int64, buf []byte) (int, error) {

ft := ioswitch2.NewFromTo()
for _, b := range s.blocks {
node := b.Node
ft.AddFrom(ioswitch2.NewFromNode(b.Block.FileHash, &node, b.Block.Index))
stg := b.Storage
ft.AddFrom(ioswitch2.NewFromShardstore(b.Block.FileHash, *stg.MasterHub, stg.Storage, b.Block.Index))
}

toExec, hd := ioswitch2.NewToDriverWithRange(-1, exec.Range{


+ 3
- 2
common/pkgs/ioswitch2/agent_worker.go View File

@@ -17,11 +17,12 @@ var _ = serder.UseTypeUnionExternallyTagged(types.Ref(types.NewTypeUnion[exec.Wo
)))

type AgentWorker struct {
Node cdssdk.Node
Node cdssdk.Node
Address cdssdk.GRPCAddressInfo
}

func (w *AgentWorker) NewClient() (exec.WorkerClient, error) {
cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&w.Node))
cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Node, w.Address))
if err != nil {
return nil, err
}


+ 20
- 18
common/pkgs/ioswitch2/fromto.go View File

@@ -3,7 +3,6 @@ package ioswitch2
import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
)

type From interface {
@@ -58,22 +57,22 @@ func (f *FromDriver) GetDataIndex() int {
return f.DataIndex
}

type FromNode struct {
FileHash types.FileHash
Node cdssdk.Node
type FromShardstore struct {
FileHash cdssdk.FileHash
Hub cdssdk.Node
Storage cdssdk.Storage
DataIndex int
}

func NewFromNode(fileHash types.FileHash, node cdssdk.Node, storage cdssdk.Storage, dataIndex int) *FromNode {
return &FromNode{
func NewFromShardstore(fileHash cdssdk.FileHash, hub cdssdk.Node, storage cdssdk.Storage, dataIndex int) *FromShardstore {
return &FromShardstore{
FileHash: fileHash,
Node: node,
Hub: hub,
DataIndex: dataIndex,
}
}

func (f *FromNode) GetDataIndex() int {
func (f *FromShardstore) GetDataIndex() int {
return f.DataIndex
}

@@ -108,35 +107,38 @@ func (t *ToDriver) GetRange() exec.Range {
return t.Range
}

type ToNode struct {
Node cdssdk.Node
type ToShardStore struct {
Hub cdssdk.Node
Storage cdssdk.Storage
DataIndex int
Range exec.Range
FileHashStoreKey string
}

func NewToNode(node cdssdk.Node, dataIndex int, fileHashStoreKey string) *ToNode {
return &ToNode{
Node: node,
func NewToShardStore(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string) *ToShardStore {
return &ToShardStore{
Hub: hub,
Storage: stg,
DataIndex: dataIndex,
FileHashStoreKey: fileHashStoreKey,
}
}

func NewToNodeWithRange(node cdssdk.Node, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToNode {
return &ToNode{
Node: node,
func NewToShardStoreWithRange(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToShardStore {
return &ToShardStore{
Hub: hub,
Storage: stg,
DataIndex: dataIndex,
FileHashStoreKey: fileHashStoreKey,
Range: rng,
}
}

func (t *ToNode) GetDataIndex() int {
func (t *ToShardStore) GetDataIndex() int {
return t.DataIndex
}

func (t *ToNode) GetRange() exec.Range {
func (t *ToShardStore) GetRange() exec.Range {
return t.Range
}



+ 32
- 5
common/pkgs/ioswitch2/http_hub_worker.go View File

@@ -54,19 +54,46 @@ type HttpHubWorkerClient struct {
}

func (c *HttpHubWorkerClient) ExecutePlan(ctx context.Context, plan exec.Plan) error {
return c.cli.ExecuteIOPlan(plan)
return c.cli.ExecuteIOPlan(cdsapi.ExecuteIOPlanReq{
Plan: plan,
})
}
func (c *HttpHubWorkerClient) SendStream(ctx context.Context, planID exec.PlanID, id exec.VarID, stream io.ReadCloser) error {
return c.cli.SendStream(planID, id, stream)
return c.cli.SendStream(cdsapi.SendStreamReq{
SendStreamInfo: cdsapi.SendStreamInfo{
PlanID: planID,
VarID: id,
},
Stream: stream,
})
}
func (c *HttpHubWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error {
return c.cli.SendVar(planID, id, value)
return c.cli.SendVar(cdsapi.SendVarReq{
PlanID: planID,
VarID: id,
VarValue: value,
})
}
func (c *HttpHubWorkerClient) GetStream(ctx context.Context, planID exec.PlanID, streamID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) {
return c.cli.GetStream(planID, streamID, signalID, signal)
return c.cli.GetStream(cdsapi.GetStreamReq{
PlanID: planID,
VarID: streamID,
SignalID: signalID,
Signal: signal,
})
}
func (c *HttpHubWorkerClient) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) {
return c.cli.GetVar(planID, varID, signalID, signal)
resp, err := c.cli.GetVar(cdsapi.GetVarReq{
PlanID: planID,
VarID: varID,
SignalID: signalID,
Signal: signal,
})
if err != nil {
return nil, err
}

return resp.Value, err
}
func (c *HttpHubWorkerClient) Close() error {
//stgglb.AgentRPCPool.Release(c.cli)


+ 13
- 4
common/pkgs/ioswitch2/ops2/multipart.go View File

@@ -2,6 +2,7 @@ package ops2

import (
"encoding/json"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/sdks/cloudstorage"
@@ -11,12 +12,21 @@ import (
func init() {
exec.UseOp[*MultipartManage]()
exec.UseOp[*MultipartUpload]()
exec.UseVarValue[*InitUploadValue]()
}

type InitUploadValue struct {
UploadID string `json:"uploadID"`
}

func (v *InitUploadValue) Clone() exec.VarValue {
return &*v
}

type MultipartManage struct {
Address cdssdk.StorageAddress `json:"address"`
UploadID *exec.StringVar `json:"uploadID"`
ObjectID *exec.StringVar `json:"objectID"`
UploadID exec.VarID `json:"uploadID"`
ObjectID exec.VarID `json:"objectID"`
}

func (o *MultipartManage) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
@@ -41,8 +51,7 @@ func (o *MultipartManage) Execute(ctx *exec.ExecContext, e *exec.Executor) error
if err != nil {
return err
}
o.UploadID.Value = uploadID
e.PutVars(o.UploadID)
e.PutVar(o.UploadID, &InitUploadValue{UploadID: uploadID})

objectID, err := client.CompleteMultipartUpload()
if err != nil {


+ 12
- 10
common/pkgs/ioswitch2/ops2/shard_store.go View File

@@ -21,7 +21,7 @@ func init() {
}

type FileHashValue struct {
Hash types.FileHash `json:"hash"`
Hash cdssdk.FileHash `json:"hash"`
}

func (v *FileHashValue) Clone() exec.VarValue {
@@ -45,9 +45,9 @@ func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
return fmt.Errorf("getting shard store pool: %w", err)
}

store, err := pool.Get(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store %v: %w", o.StorageID, err)
store := pool.Get(o.StorageID)
if store == nil {
return fmt.Errorf("shard store %v not found", o.StorageID)
}

file, err := store.Open(o.Open)
@@ -87,9 +87,9 @@ func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
return fmt.Errorf("getting shard store pool: %w", err)
}

store, err := pool.Get(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store %v: %w", o.StorageID, err)
store := pool.Get(o.StorageID)
if store == nil {
return fmt.Errorf("shard store %v not found", o.StorageID)
}

input, err := exec.BindVar[*exec.StreamValue](e, ctx.Context, o.Input)
@@ -158,10 +158,11 @@ func (t *ShardReadNode) GenerateOp() (exec.Op, error) {

type ShardWriteNode struct {
dag.NodeBase
StorageID cdssdk.StorageID
FileHashStoreKey string
}

func (b *GraphNodeBuilder) NewShardWrite(fileHashStoreKey string) *ShardWriteNode {
func (b *GraphNodeBuilder) NewShardWrite(stgID cdssdk.StorageID, fileHashStoreKey string) *ShardWriteNode {
node := &ShardWriteNode{
FileHashStoreKey: fileHashStoreKey,
}
@@ -188,8 +189,9 @@ func (t *ShardWriteNode) FileHashVar() *dag.Var {

func (t *ShardWriteNode) GenerateOp() (exec.Op, error) {
return &ShardWrite{
Input: t.InputStreams().Get(0).VarID,
FileHash: t.OutputValues().Get(0).VarID,
Input: t.InputStreams().Get(0).VarID,
FileHash: t.OutputValues().Get(0).VarID,
StorageID: t.StorageID,
}, nil
}



+ 8
- 8
common/pkgs/ioswitch2/parser/parser.go View File

@@ -234,7 +234,7 @@ func (p *DefaultParser) buildFromNode(ctx *ParseContext, f ioswitch2.From) (ops2
}

switch f := f.(type) {
case *ioswitch2.FromNode:
case *ioswitch2.FromShardstore:
t := ctx.DAG.NewShardRead(f.Storage.StorageID, types.NewOpen(f.FileHash))

if f.DataIndex == -1 {
@@ -243,17 +243,17 @@ func (p *DefaultParser) buildFromNode(ctx *ParseContext, f ioswitch2.From) (ops2
t.Open.WithNullableLength(blkRange.Offset, blkRange.Length)
}

switch typeInfo := f.Node.Address.(type) {
switch addr := f.Hub.Address.(type) {
case *cdssdk.HttpAddressInfo:
t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Node: f.Node})
t.Env().ToEnvWorker(&ioswitch2.HttpHubWorker{Node: f.Hub})
t.Env().Pinned = true

case *cdssdk.GRPCAddressInfo:
t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: f.Node})
t.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: f.Hub, Address: *addr})
t.Env().Pinned = true

default:
return nil, fmt.Errorf("unsupported node address type %T", typeInfo)
return nil, fmt.Errorf("unsupported node address type %T", addr)
}

return t, nil
@@ -280,9 +280,9 @@ func (p *DefaultParser) buildFromNode(ctx *ParseContext, f ioswitch2.From) (ops2

func (p *DefaultParser) buildToNode(ctx *ParseContext, t ioswitch2.To) (ops2.ToNode, error) {
switch t := t.(type) {
case *ioswitch2.ToNode:
n := ctx.DAG.NewShardWrite(t.FileHashStoreKey)
n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: t.Node})
case *ioswitch2.ToShardStore:
n := ctx.DAG.NewShardWrite(t.Storage.StorageID, t.FileHashStoreKey)
n.Env().ToEnvWorker(&ioswitch2.AgentWorker{Node: t.Hub})
n.Env().Pinned = true

return n, nil


+ 3
- 2
common/pkgs/ioswitchlrc/agent_worker.go View File

@@ -15,11 +15,12 @@ import (
// )))

type AgentWorker struct {
Node cdssdk.Node
Node cdssdk.Node
Address cdssdk.GRPCAddressInfo
}

func (w *AgentWorker) NewClient() (exec.WorkerClient, error) {
cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&w.Node))
cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(w.Node, w.Address))
if err != nil {
return nil, err
}


+ 10
- 8
common/pkgs/ioswitchlrc/fromto.go View File

@@ -3,7 +3,6 @@ package ioswitchlrc
import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
)

type From interface {
@@ -38,13 +37,13 @@ func (f *FromDriver) GetDataIndex() int {
}

type FromNode struct {
FileHash types.FileHash
FileHash cdssdk.FileHash
Node cdssdk.Node
Storage cdssdk.Storage
DataIndex int
}

func NewFromNode(fileHash types.FileHash, node cdssdk.Node, storage cdssdk.Storage, dataIndex int) *FromNode {
func NewFromNode(fileHash cdssdk.FileHash, node cdssdk.Node, storage cdssdk.Storage, dataIndex int) *FromNode {
return &FromNode{
FileHash: fileHash,
Node: node,
@@ -88,23 +87,26 @@ func (t *ToDriver) GetRange() exec.Range {
}

type ToNode struct {
Node cdssdk.Node
Hub cdssdk.Node
Storage cdssdk.Storage
DataIndex int
Range exec.Range
FileHashStoreKey string
}

func NewToNode(node cdssdk.Node, dataIndex int, fileHashStoreKey string) *ToNode {
func NewToStorage(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string) *ToNode {
return &ToNode{
Node: node,
Hub: hub,
Storage: stg,
DataIndex: dataIndex,
FileHashStoreKey: fileHashStoreKey,
}
}

func NewToNodeWithRange(node cdssdk.Node, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToNode {
func NewToStorageWithRange(hub cdssdk.Node, stg cdssdk.Storage, dataIndex int, fileHashStoreKey string, rng exec.Range) *ToNode {
return &ToNode{
Node: node,
Hub: hub,
Storage: stg,
DataIndex: dataIndex,
FileHashStoreKey: fileHashStoreKey,
Range: rng,


+ 7
- 7
common/pkgs/ioswitchlrc/ops2/shard_store.go View File

@@ -21,7 +21,7 @@ func init() {
}

type FileHashValue struct {
Hash types.FileHash `json:"hash"`
Hash cdssdk.FileHash `json:"hash"`
}

func (v *FileHashValue) Clone() exec.VarValue {
@@ -45,9 +45,9 @@ func (o *ShardRead) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
return fmt.Errorf("getting shard store pool: %w", err)
}

store, err := pool.Get(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store %v: %w", o.StorageID, err)
store := pool.Get(o.StorageID)
if store == nil {
return fmt.Errorf("shard store %v not found", o.StorageID)
}

file, err := store.Open(o.Open)
@@ -87,9 +87,9 @@ func (o *ShardWrite) Execute(ctx *exec.ExecContext, e *exec.Executor) error {
return fmt.Errorf("getting shard store pool: %w", err)
}

store, err := pool.Get(o.StorageID)
if err != nil {
return fmt.Errorf("getting shard store %v: %w", o.StorageID, err)
store := pool.Get(o.StorageID)
if store == nil {
return fmt.Errorf("shard store %v not found", o.StorageID)
}

input, err := exec.BindVar[*exec.StreamValue](e, ctx.Context, o.Input)


+ 3
- 2
common/pkgs/ioswitchlrc/parser/passes.go View File

@@ -6,6 +6,7 @@ import (

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/dag"
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/ops2"
@@ -71,7 +72,7 @@ func buildFromNode(ctx *GenerateContext, f ioswitchlrc.From) (ops2.FromNode, err
t.Open.WithNullableLength(blkRange.Offset, blkRange.Length)
}

t.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: f.Node})
t.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: f.Node, Address: *f.Node.Address.(*cdssdk.GRPCAddressInfo)})
t.Env().Pinned = true

return t, nil
@@ -100,7 +101,7 @@ func buildToNode(ctx *GenerateContext, t ioswitchlrc.To) (ops2.ToNode, error) {
switch t := t.(type) {
case *ioswitchlrc.ToNode:
n := ctx.DAG.NewShardWrite(t.FileHashStoreKey)
n.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: t.Node})
n.Env().ToEnvWorker(&ioswitchlrc.AgentWorker{Node: t.Hub})
n.Env().Pinned = true

return n, nil


+ 5
- 6
common/pkgs/mq/agent/cache.go View File

@@ -3,7 +3,6 @@ package agent
import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
)

type CacheService interface {
@@ -24,13 +23,13 @@ type CheckCache struct {
}
type CheckCacheResp struct {
mq.MessageBodyBase
FileHashes []types.FileHash `json:"fileHashes"`
FileHashes []cdssdk.FileHash `json:"fileHashes"`
}

func NewCheckCache(stgID cdssdk.StorageID) *CheckCache {
return &CheckCache{StorageID: stgID}
}
func NewCheckCacheResp(fileHashes []types.FileHash) *CheckCacheResp {
func NewCheckCacheResp(fileHashes []cdssdk.FileHash) *CheckCacheResp {
return &CheckCacheResp{
FileHashes: fileHashes,
}
@@ -44,14 +43,14 @@ var _ = Register(Service.CacheGC)

type CacheGC struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
Avaiables []types.FileHash `json:"avaiables"`
StorageID cdssdk.StorageID `json:"storageID"`
Avaiables []cdssdk.FileHash `json:"avaiables"`
}
type CacheGCResp struct {
mq.MessageBodyBase
}

func ReqCacheGC(stgID cdssdk.StorageID, avaiables []types.FileHash) *CacheGC {
func ReqCacheGC(stgID cdssdk.StorageID, avaiables []cdssdk.FileHash) *CacheGC {
return &CacheGC{
StorageID: stgID,
Avaiables: avaiables,


+ 1
- 1
common/pkgs/mq/agent/storage.go View File

@@ -4,7 +4,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StorageService interface {


+ 1
- 1
common/pkgs/mq/coordinator/bucket.go View File

@@ -3,7 +3,7 @@ package coordinator
import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type BucketService interface {


+ 3
- 3
common/pkgs/mq/coordinator/cache.go View File

@@ -42,16 +42,16 @@ var _ = Register(Service.CacheRemovePackage)
type CacheRemovePackage struct {
mq.MessageBodyBase
PackageID cdssdk.PackageID `json:"packageID"`
NodeID cdssdk.NodeID `json:"nodeID"`
StorageID cdssdk.StorageID `json:"storageID"`
}
type CacheRemovePackageResp struct {
mq.MessageBodyBase
}

func ReqCacheRemoveMovedPackage(packageID cdssdk.PackageID, nodeID cdssdk.NodeID) *CacheRemovePackage {
func ReqCacheRemoveMovedPackage(packageID cdssdk.PackageID, stgID cdssdk.StorageID) *CacheRemovePackage {
return &CacheRemovePackage{
PackageID: packageID,
NodeID: nodeID,
StorageID: stgID,
}
}
func RespCacheRemovePackage() *CacheRemovePackageResp {


+ 2
- 2
common/pkgs/mq/coordinator/object.go View File

@@ -6,7 +6,7 @@ import (
"gitlink.org.cn/cloudream/common/sdks/storage/cdsapi"

stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type ObjectService interface {
@@ -122,7 +122,7 @@ type UpdateObjectRedundancyResp struct {
type UpdatingObjectRedundancy struct {
ObjectID cdssdk.ObjectID `json:"objectID" db:"ObjectID"`
Redundancy cdssdk.Redundancy `json:"redundancy" db:"Redundancy"`
PinnedAt []cdssdk.NodeID `json:"pinnedAt"`
PinnedAt []cdssdk.StorageID `json:"pinnedAt"`
Blocks []stgmod.ObjectBlock `json:"blocks"`
}



+ 34
- 34
common/pkgs/mq/coordinator/package.go View File

@@ -6,7 +6,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type PackageService interface {
@@ -20,9 +20,9 @@ type PackageService interface {

DeletePackage(msg *DeletePackage) (*DeletePackageResp, *mq.CodeMessage)

GetPackageCachedNodes(msg *GetPackageCachedNodes) (*GetPackageCachedNodesResp, *mq.CodeMessage)
GetPackageCachedStorages(msg *GetPackageCachedStorages) (*GetPackageCachedStoragesResp, *mq.CodeMessage)

GetPackageLoadedNodes(msg *GetPackageLoadedNodes) (*GetPackageLoadedNodesResp, *mq.CodeMessage)
GetPackageLoadedStorages(msg *GetPackageLoadedStorages) (*GetPackageLoadedStoragesResp, *mq.CodeMessage)
}

// 获取Package基本信息
@@ -129,11 +129,11 @@ type UpdatePackageResp struct {
Added []cdssdk.Object `json:"added"`
}
type AddObjectEntry struct {
Path string `json:"path"`
Size int64 `json:"size,string"`
FileHash string `json:"fileHash"`
UploadTime time.Time `json:"uploadTime"` // 开始上传文件的时间
NodeID cdssdk.NodeID `json:"nodeID"`
Path string `json:"path"`
Size int64 `json:"size,string"`
FileHash cdssdk.FileHash `json:"fileHash"`
UploadTime time.Time `json:"uploadTime"` // 开始上传文件的时间
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewUpdatePackage(packageID cdssdk.PackageID, adds []AddObjectEntry, deletes []cdssdk.ObjectID) *UpdatePackage {
@@ -148,13 +148,13 @@ func NewUpdatePackageResp(added []cdssdk.Object) *UpdatePackageResp {
Added: added,
}
}
func NewAddObjectEntry(path string, size int64, fileHash string, uploadTime time.Time, nodeID cdssdk.NodeID) AddObjectEntry {
func NewAddObjectEntry(path string, size int64, fileHash cdssdk.FileHash, uploadTime time.Time, stgID cdssdk.StorageID) AddObjectEntry {
return AddObjectEntry{
Path: path,
Size: size,
FileHash: fileHash,
UploadTime: uploadTime,
NodeID: nodeID,
StorageID: stgID,
}
}
func (client *Client) UpdatePackage(msg *UpdatePackage) (*UpdatePackageResp, error) {
@@ -187,72 +187,72 @@ func (client *Client) DeletePackage(msg *DeletePackage) (*DeletePackageResp, err
}

// 根据PackageID获取object分布情况
var _ = Register(Service.GetPackageCachedNodes)
var _ = Register(Service.GetPackageCachedStorages)

type GetPackageCachedNodes struct {
type GetPackageCachedStorages struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
}

type PackageCachedNodeInfo struct {
NodeID int64 `json:"nodeID"`
type PackageCachedStorageInfo struct {
StorageID int64 `json:"storageID"`
FileSize int64 `json:"fileSize"`
ObjectCount int64 `json:"objectCount"`
}

type GetPackageCachedNodesResp struct {
type GetPackageCachedStoragesResp struct {
mq.MessageBodyBase
cdssdk.PackageCachingInfo
}

func NewGetPackageCachedNodes(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageCachedNodes {
return &GetPackageCachedNodes{
func ReqGetPackageCachedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageCachedStorages {
return &GetPackageCachedStorages{
UserID: userID,
PackageID: packageID,
}
}

func NewGetPackageCachedNodesResp(nodeInfos []cdssdk.NodePackageCachingInfo, packageSize int64) *GetPackageCachedNodesResp {
return &GetPackageCachedNodesResp{
func ReqGetPackageCachedStoragesResp(nodeInfos []cdssdk.StoragePackageCachingInfo, packageSize int64) *GetPackageCachedStoragesResp {
return &GetPackageCachedStoragesResp{
PackageCachingInfo: cdssdk.PackageCachingInfo{
NodeInfos: nodeInfos,
PackageSize: packageSize,
StorageInfos: nodeInfos,
PackageSize: packageSize,
},
}
}

func (client *Client) GetPackageCachedNodes(msg *GetPackageCachedNodes) (*GetPackageCachedNodesResp, error) {
return mq.Request(Service.GetPackageCachedNodes, client.rabbitCli, msg)
func (client *Client) GetPackageCachedStorages(msg *GetPackageCachedStorages) (*GetPackageCachedStoragesResp, error) {
return mq.Request(Service.GetPackageCachedStorages, client.rabbitCli, msg)
}

// 根据PackageID获取storage分布情况
var _ = Register(Service.GetPackageLoadedNodes)
var _ = Register(Service.GetPackageLoadedStorages)

type GetPackageLoadedNodes struct {
type GetPackageLoadedStorages struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
PackageID cdssdk.PackageID `json:"packageID"`
}

type GetPackageLoadedNodesResp struct {
type GetPackageLoadedStoragesResp struct {
mq.MessageBodyBase
NodeIDs []cdssdk.NodeID `json:"nodeIDs"`
StorageIDs []cdssdk.StorageID `json:"storageIDs"`
}

func NewGetPackageLoadedNodes(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageLoadedNodes {
return &GetPackageLoadedNodes{
func ReqGetPackageLoadedStorages(userID cdssdk.UserID, packageID cdssdk.PackageID) *GetPackageLoadedStorages {
return &GetPackageLoadedStorages{
UserID: userID,
PackageID: packageID,
}
}

func NewGetPackageLoadedNodesResp(nodeIDs []cdssdk.NodeID) *GetPackageLoadedNodesResp {
return &GetPackageLoadedNodesResp{
NodeIDs: nodeIDs,
func NewGetPackageLoadedStoragesResp(stgIDs []cdssdk.StorageID) *GetPackageLoadedStoragesResp {
return &GetPackageLoadedStoragesResp{
StorageIDs: stgIDs,
}
}

func (client *Client) GetPackageLoadedNodes(msg *GetPackageLoadedNodes) (*GetPackageLoadedNodesResp, error) {
return mq.Request(Service.GetPackageLoadedNodes, client.rabbitCli, msg)
func (client *Client) GetPackageLoadedStorages(msg *GetPackageLoadedStorages) (*GetPackageLoadedStoragesResp, error) {
return mq.Request(Service.GetPackageLoadedStorages, client.rabbitCli, msg)
}

+ 43
- 15
common/pkgs/mq/coordinator/storage.go View File

@@ -4,13 +4,15 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
)

type StorageService interface {
GetStorage(msg *GetStorage) (*GetStorageResp, *mq.CodeMessage)

GetStorageDetail(msg *GetStorageDetail) (*GetStorageDetailResp, *mq.CodeMessage)
GetStorageDetails(msg *GetStorageDetails) (*GetStorageDetailsResp, *mq.CodeMessage)

GetUserStorageDetails(msg *GetUserStorageDetails) (*GetUserStorageDetailsResp, *mq.CodeMessage)

GetStorageByName(msg *GetStorageByName) (*GetStorageByNameResp, *mq.CodeMessage)

@@ -46,29 +48,29 @@ func (client *Client) GetStorage(msg *GetStorage) (*GetStorageResp, error) {
}

// 获取Storage的详细信息
var _ = Register(Service.GetStorageDetail)
var _ = Register(Service.GetStorageDetails)

type GetStorageDetail struct {
type GetStorageDetails struct {
mq.MessageBodyBase
StorageID cdssdk.StorageID `json:"storageID"`
StorageIDs []cdssdk.StorageID `json:"storageIDs"`
}
type GetStorageDetailResp struct {
type GetStorageDetailsResp struct {
mq.MessageBodyBase
Storage stgmod.StorageDetail `json:"storage"`
Storages []*stgmod.StorageDetail `json:"storages"`
}

func ReqGetStorageDetail(storageID cdssdk.StorageID) *GetStorageDetail {
return &GetStorageDetail{
StorageID: storageID,
func ReqGetStorageDetails(storageIDs []cdssdk.StorageID) *GetStorageDetails {
return &GetStorageDetails{
StorageIDs: storageIDs,
}
}
func RespGetStorageDetail(stg stgmod.StorageDetail) *GetStorageDetailResp {
return &GetStorageDetailResp{
Storage: stg,
func RespGetStorageDetails(stgs []*stgmod.StorageDetail) *GetStorageDetailsResp {
return &GetStorageDetailsResp{
Storages: stgs,
}
}
func (client *Client) GetStorageDetail(msg *GetStorageDetail) (*GetStorageDetailResp, error) {
return mq.Request(Service.GetStorageDetail, client.rabbitCli, msg)
func (client *Client) GetStorageDetails(msg *GetStorageDetails) (*GetStorageDetailsResp, error) {
return mq.Request(Service.GetStorageDetails, client.rabbitCli, msg)
}

var _ = Register(Service.GetStorageByName)
@@ -98,6 +100,32 @@ func (client *Client) GetStorageByName(msg *GetStorageByName) (*GetStorageByName
return mq.Request(Service.GetStorageByName, client.rabbitCli, msg)
}

// 获取用户的Storage信息
var _ = Register(Service.GetUserStorageDetails)

type GetUserStorageDetails struct {
mq.MessageBodyBase
UserID cdssdk.UserID `json:"userID"`
}
type GetUserStorageDetailsResp struct {
mq.MessageBodyBase
Storages []stgmod.StorageDetail `json:"storages"`
}

func ReqGetUserStorageDetails(userID cdssdk.UserID) *GetUserStorageDetails {
return &GetUserStorageDetails{
UserID: userID,
}
}
func RespGetUserStorageDetails(stgs []stgmod.StorageDetail) *GetUserStorageDetailsResp {
return &GetUserStorageDetailsResp{
Storages: stgs,
}
}
func (client *Client) GetUserStorageDetails(msg *GetUserStorageDetails) (*GetUserStorageDetailsResp, error) {
return mq.Request(Service.GetUserStorageDetails, client.rabbitCli, msg)
}

// 提交调度记录
var _ = Register(Service.StoragePackageLoaded)



+ 3
- 3
common/pkgs/mq/scanner/event/agent_cache_gc.go View File

@@ -4,12 +4,12 @@ import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type AgentCacheGC struct {
EventBase
NodeID cdssdk.NodeID `json:"nodeID"`
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewAgentCacheGC(nodeID cdssdk.NodeID) *AgentCacheGC {
func NewAgentCacheGC(stgID cdssdk.StorageID) *AgentCacheGC {
return &AgentCacheGC{
NodeID: nodeID,
StorageID: stgID,
}
}



+ 3
- 3
common/pkgs/mq/scanner/event/agent_check_cache.go View File

@@ -4,12 +4,12 @@ import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

type AgentCheckCache struct {
EventBase
NodeID cdssdk.NodeID `json:"nodeID"`
StorageID cdssdk.StorageID `json:"storageID"`
}

func NewAgentCheckCache(nodeID cdssdk.NodeID) *AgentCheckCache {
func NewAgentCheckCache(stgID cdssdk.StorageID) *AgentCheckCache {
return &AgentCheckCache{
NodeID: nodeID,
StorageID: stgID,
}
}



+ 49
- 2
common/pkgs/storage/shard/pool/pool.go View File

@@ -1,21 +1,68 @@
package pool

import (
"fmt"
"sync"

"gitlink.org.cn/cloudream/common/pkgs/async"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/storages/local"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
)

type ShardStorePool struct {
stores map[cdssdk.StorageID]*shardStore
lock sync.Mutex
}

func New() *ShardStorePool {

return &ShardStorePool{
stores: make(map[cdssdk.StorageID]*shardStore),
}
}

func (p *ShardStorePool) PutNew(stg cdssdk.Storage, config cdssdk.ShardStoreConfig) error {
p.lock.Lock()
defer p.lock.Unlock()

switch confg := config.(type) {
case *cdssdk.LocalShardStorage:
if _, ok := p.stores[stg.StorageID]; ok {
return fmt.Errorf("storage %s already exists", stg.StorageID)
}

store, err := local.New(stg, *confg)
if err != nil {
return fmt.Errorf("new local shard store: %v", err)
}

ch := store.Start()

p.stores[stg.StorageID] = &shardStore{
Store: store,
EventChan: ch,
}
return nil

default:
return fmt.Errorf("unsupported shard store type: %T", confg)
}
}

func (p *ShardStorePool) Get(stgID cdssdk.StorageID) (types.ShardStore, error) {
// 不存在时返回nil
func (p *ShardStorePool) Get(stgID cdssdk.StorageID) types.ShardStore {
p.lock.Lock()
defer p.lock.Unlock()

store, ok := p.stores[stgID]
if !ok {
return nil
}

return store.Store
}

type shardStore struct {
Store types.ShardStore
EventChan *async.UnboundChannel[types.StoreEvent]
}

+ 138
- 8
common/pkgs/storage/shard/storages/local/local.go View File

@@ -1,44 +1,174 @@
package local

import (
"crypto/sha256"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"

"gitlink.org.cn/cloudream/common/pkgs/async"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/storages/utils"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
)

const (
TempDir = "tmp"
BlocksDir = "blocks"
)

type Local struct {
cfg cdssdk.LocalShardStorage
}

func New(stg cdssdk.Storage, cfg cdssdk.LocalShardStorage) *Local {
func New(stg cdssdk.Storage, cfg cdssdk.LocalShardStorage) (*Local, error) {
_, ok := stg.Address.(*cdssdk.LocalStorageAddress)
if !ok {
return nil, fmt.Errorf("storage address(%T) is not local", stg)
}

return &Local{
cfg: cfg,
}
}, nil
}

func (s *Local) Start() *async.UnboundChannel[types.StoreEvent] {
// TODO 暂时没有需要后台执行的任务
return async.NewUnboundChannel[types.StoreEvent]()
}

func (s *Local) New() io.Writer {
func (s *Local) New() types.Writer {
file, err := os.CreateTemp(filepath.Join(s.cfg.Root, "tmp"), "tmp-*")
if err != nil {
return utils.ErrorWriter(err)
}

return &Writer{
path: filepath.Join(s.cfg.Root, "tmp", file.Name()),
file: file,
hasher: sha256.New(),
owner: s,
}
}

// 使用F函数创建Option对象
func (s *Local) Open(opt types.OpenOption) (io.ReadCloser, error) {
fileName := string(opt.FileHash)
if len(fileName) < 2 {
return nil, fmt.Errorf("invalid file name")
}

}
filePath := filepath.Join(s.cfg.Root, BlocksDir, fileName)
file, err := os.Open(filePath)
if err != nil {
return nil, err
}

func (s *Local) Remove(hash types.FileHash) error {
if opt.Offset > 0 {
_, err = file.Seek(opt.Offset, io.SeekStart)
if err != nil {
file.Close()
return nil, err
}
}

if opt.Length >= 0 {
return io2.Length(file, opt.Length), nil
}

return file, nil
}

// 遍历所有文件,callback返回false则停止遍历
func (s *Local) ListAll() ([]types.FileInfo, error) {
var infos []types.FileInfo

blockDir := filepath.Join(s.cfg.Root, BlocksDir)
err := filepath.WalkDir(blockDir, func(path string, d fs.DirEntry, err error) error {
if d.IsDir() {
return nil
}

info, ok := d.(fs.FileInfo)
if !ok {
return nil
}

// TODO 简单检查一下文件名是否合法

infos = append(infos, types.FileInfo{
Hash: cdssdk.FileHash(info.Name()),
Size: info.Size(),
Description: filepath.Join(blockDir, path),
})
return nil
})
if err != nil {
return nil, err
}

return infos, nil
}

func (s *Local) Purge(removes []cdssdk.FileHash) error {
for _, hash := range removes {
fileName := string(hash)

path := filepath.Join(s.cfg.Root, BlocksDir, fileName[:2], fileName)
err := os.Remove(path)
if err != nil {
logger.Warnf("remove file %v: %v", path, err)
}
}

// TODO 无法保证原子性,所以删除失败只打日志
return nil
}

func (s *Local) Purge(availables []types.FileHash) error {
func (s *Local) Stats() types.Stats {
// TODO 统计本地存储的相关信息
return types.Stats{
Status: types.StatusOK,
}
}

func (s *Local) onWritterAbort(w *Writer) {
logger.Debugf("writting file %v aborted", w.path)
s.removeTempFile(w.path)
}

func (s *Local) Stats() (types.Stats, error) {
func (s *Local) onWritterFinish(w *Writer, hash cdssdk.FileHash) (types.FileInfo, error) {
logger.Debugf("write file %v finished, size: %v, hash: %v", w.path, w.size, hash)

blockDir := filepath.Join(s.cfg.Root, BlocksDir, string(hash)[:2])
err := os.MkdirAll(blockDir, 0755)
if err != nil {
s.removeTempFile(w.path)
logger.Warnf("make block dir %v: %v", blockDir, err)
return types.FileInfo{}, fmt.Errorf("making block dir: %w", err)
}

name := filepath.Join(blockDir, string(hash))
err = os.Rename(w.path, name)
if err != nil {
s.removeTempFile(w.path)
logger.Warnf("rename %v to %v: %v", w.path, name, err)
return types.FileInfo{}, fmt.Errorf("rename file: %w", err)
}

return types.FileInfo{
Hash: hash,
Size: w.size,
Description: w.path,
}, nil
}

func (s *Local) removeTempFile(path string) {
err := os.Remove(path)
if err != nil {
logger.Warnf("removing temp file %v: %v", path, err)
}
}

+ 48
- 2
common/pkgs/storage/shard/storages/local/writer.go View File

@@ -1,20 +1,66 @@
package local

import "gitlink.org.cn/cloudream/storage/common/pkgs/shardstore/types"
import (
"encoding/hex"
"fmt"
"hash"
"os"
"strings"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"
)

type Writer struct {
path string
file *os.File
hasher hash.Hash
size int64
closed bool
owner *Local
}

func (w *Writer) Write(data []byte) error {
func (w *Writer) Write(data []byte) (int, error) {
n, err := w.file.Write(data)
if err != nil {
return 0, err
}

w.hasher.Write(data[:n])
w.size += int64(n)
return n, nil
}

// 取消写入
func (w *Writer) Abort() error {
if w.closed {
return nil
}
w.closed = true

err := w.file.Close()
w.owner.onWritterAbort(w)
return err
}

// 结束写入,获得文件哈希值
func (w *Writer) Finish() (types.FileInfo, error) {
if w.closed {
return types.FileInfo{}, fmt.Errorf("stream closed")
}
w.closed = true

err := w.file.Close()
if err != nil {
w.owner.onWritterAbort(w)
return types.FileInfo{}, err
}

sum := w.hasher.Sum(nil)
info, err := w.owner.onWritterFinish(w, cdssdk.FileHash(strings.ToUpper(hex.EncodeToString(sum))))
if err != nil {
// 无需再调onWritterAbort, onWritterFinish会处理
return types.FileInfo{}, err
}
return info, nil
}

+ 26
- 0
common/pkgs/storage/shard/storages/utils/utils.go View File

@@ -0,0 +1,26 @@
package utils

import "gitlink.org.cn/cloudream/storage/common/pkgs/storage/shard/types"

type errorWriter struct {
err error
}

func (w *errorWriter) Write(data []byte) (int, error) {
return 0, w.err
}

// 取消写入。要求允许在调用了Finish之后再调用此函数,且此时不应该有任何影响。
// 方便defer机制
func (w *errorWriter) Abort() error {
return w.err
}

// 结束写入,获得文件哈希值
func (w *errorWriter) Finish() (types.FileInfo, error) {
return types.FileInfo{}, w.err
}

func ErrorWriter(err error) types.Writer {
return &errorWriter{err: err}
}

+ 7
- 3
common/pkgs/storage/shard/types/option.go View File

@@ -1,14 +1,18 @@
package types

import "fmt"
import (
"fmt"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

type OpenOption struct {
FileHash FileHash
FileHash cdssdk.FileHash
Offset int64
Length int64
}

func NewOpen(fileHash FileHash) OpenOption {
func NewOpen(fileHash cdssdk.FileHash) OpenOption {
return OpenOption{
FileHash: fileHash,
Offset: 0,


+ 13
- 7
common/pkgs/storage/shard/types/shardstore.go View File

@@ -1,8 +1,11 @@
package types

import "io"
import (
"io"

type FileHash string
"gitlink.org.cn/cloudream/common/pkgs/async"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

type Status interface {
String() string
@@ -16,17 +19,20 @@ func (s *OKStatus) String() string {

var StatusOK = &OKStatus{}

type StoreEvent interface {
}

type ShardStore interface {
// 启动服务
Start() *async.UnboundChannel[StoreEvent]
// 准备写入一个新文件,写入后获得FileHash
New() Writer
// 使用F函数创建Option对象
Open(opt OpenOption) (io.ReadCloser, error)
// 删除文件
Remove(hash FileHash) error
// 获取所有文件信息,尽量保证操作是原子的
ListAll() ([]FileInfo, error)
// 清除其他文件,只保留给定的文件,尽量保证操作是原子的
Purge(availables []FileHash) error
// 删除指定的文件
Purge(removes []cdssdk.FileHash) error
// 获得存储系统信息
Stats() Stats
}
@@ -37,7 +43,7 @@ type Config interface {

type FileInfo struct {
// 文件的SHA256哈希值,全大写的16进制字符串格式
Hash FileHash
Hash cdssdk.FileHash
Size int64
// 文件描述信息,比如文件名,用于调试
Description string


+ 26
- 20
coordinator/internal/cmd/migrate.go View File

@@ -6,6 +6,8 @@ import (

"github.com/spf13/cobra"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gitlink.org.cn/cloudream/storage/coordinator/internal/config"
"gorm.io/driver/mysql"
"gorm.io/gorm"
@@ -39,29 +41,33 @@ func migrate(configPath string) {
os.Exit(1)
}

err = db.AutoMigrate(&cdssdk.Node{})
if err != nil {
fmt.Printf("migratting model Node: %v\n", err)
os.Exit(1)
}

err = db.AutoMigrate(&cdssdk.Storage{})
if err != nil {
fmt.Printf("migratting model Storage: %v\n", err)
os.Exit(1)
}
migrateOne(db, cdssdk.Bucket{})
migrateOne(db, model.Cache{})
migrateOne(db, model.Location{})
migrateOne(db, model.NodeConnectivity{})
migrateOne(db, cdssdk.Node{})
migrateOne(db, stgmod.ObjectAccessStat{})
migrateOne(db, stgmod.ObjectBlock{})
migrateOne(db, cdssdk.Object{})
migrateOne(db, stgmod.PackageAccessStat{})
migrateOne(db, cdssdk.Package{})
migrateOne(db, cdssdk.PinnedObject{})
migrateOne(db, cdssdk.ShardStorage{})
migrateOne(db, cdssdk.SharedStorage{})
migrateOne(db, model.StoragePackage{})
migrateOne(db, cdssdk.Storage{})
migrateOne(db, model.UserStorage{})
migrateOne(db, model.UserBucket{})
migrateOne(db, model.User{})
migrateOne(db, model.UserNode{})

err = db.AutoMigrate(&cdssdk.ShardStorage{})
if err != nil {
fmt.Printf("migratting model ShardStorage: %v\n", err)
os.Exit(1)
}
fmt.Println("migrate success")
}

err = db.AutoMigrate(&cdssdk.SharedStorage{})
func migrateOne[T any](db *gorm.DB, model T) {
err := db.AutoMigrate(model)
if err != nil {
fmt.Printf("migratting model SharedStorage: %v\n", err)
fmt.Printf("migratting model %T: %v\n", model, err)
os.Exit(1)
}

fmt.Println("migrate success")
}

+ 1
- 7
coordinator/internal/cmd/serve.go View File

@@ -5,7 +5,6 @@ import (
"os"

"gitlink.org.cn/cloudream/common/pkgs/logger"
mydb "gitlink.org.cn/cloudream/storage/common/pkgs/db"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/coordinator/internal/config"
@@ -25,17 +24,12 @@ func serve(configPath string) {
os.Exit(1)
}

db, err := mydb.NewDB(&config.Cfg().DB)
if err != nil {
logger.Fatalf("new db failed, err: %s", err.Error())
}

db2, err := db2.NewDB(&config.Cfg().DB)
if err != nil {
logger.Fatalf("new db2 failed, err: %s", err.Error())
}

coorSvr, err := coormq.NewServer(mq.NewService(db, db2), &config.Cfg().RabbitMQ)
coorSvr, err := coormq.NewServer(mq.NewService(db2), &config.Cfg().RabbitMQ)
if err != nil {
logger.Fatalf("new coordinator server failed, err: %s", err.Error())
}


+ 1
- 1
coordinator/internal/config/config.go View File

@@ -3,7 +3,7 @@ package config
import (
log "gitlink.org.cn/cloudream/common/pkgs/logger"
c "gitlink.org.cn/cloudream/common/utils/config"
db "gitlink.org.cn/cloudream/storage/common/pkgs/db/config"
db "gitlink.org.cn/cloudream/storage/common/pkgs/db2/config"
stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq"
)



+ 2
- 1
coordinator/internal/mq/bucket.go View File

@@ -2,13 +2,14 @@ package mq

import (
"fmt"

"gitlink.org.cn/cloudream/storage/common/pkgs/db2"

"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)



+ 7
- 6
coordinator/internal/mq/cache.go View File

@@ -2,6 +2,7 @@ package mq

import (
"fmt"

"gitlink.org.cn/cloudream/storage/common/pkgs/db2"

"gitlink.org.cn/cloudream/common/consts/errorcode"
@@ -17,9 +18,9 @@ func (svc *Service) CachePackageMoved(msg *coormq.CachePackageMoved) (*coormq.Ca
return fmt.Errorf("getting package by id: %w", err)
}

_, err = svc.db2.Node().GetByID(tx, msg.StorageID)
_, err = svc.db2.Storage().GetByID(tx, msg.StorageID)
if err != nil {
return fmt.Errorf("getting node by id: %w", err)
return fmt.Errorf("getting storage by id: %w", err)
}

err = svc.db2.PinnedObject().CreateFromPackage(tx, msg.PackageID, msg.StorageID)
@@ -44,12 +45,12 @@ func (svc *Service) CacheRemovePackage(msg *coormq.CacheRemovePackage) (*coormq.
return fmt.Errorf("getting package by id: %w", err)
}

_, err = svc.db2.Node().GetByID(tx, msg.NodeID)
_, err = svc.db2.Storage().GetByID(tx, msg.StorageID)
if err != nil {
return fmt.Errorf("getting node by id: %w", err)
return fmt.Errorf("getting storage by id: %w", err)
}

err = svc.db2.PinnedObject().DeleteInPackageAtNode(tx, msg.PackageID, msg.NodeID)
err = svc.db2.PinnedObject().DeleteInPackageAtStorage(tx, msg.PackageID, msg.StorageID)
if err != nil {
return fmt.Errorf("delete pinned objects in package at node: %w", err)
}
@@ -57,7 +58,7 @@ func (svc *Service) CacheRemovePackage(msg *coormq.CacheRemovePackage) (*coormq.
return nil
})
if err != nil {
logger.WithField("PackageID", msg.PackageID).WithField("NodeID", msg.NodeID).Warn(err.Error())
logger.WithField("PackageID", msg.PackageID).WithField("NodeID", msg.StorageID).Warn(err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "remove pinned package failed")
}



+ 18
- 48
coordinator/internal/mq/object.go View File

@@ -3,6 +3,7 @@ package mq
import (
"database/sql"
"fmt"

"gitlink.org.cn/cloudream/storage/common/pkgs/db2"

"github.com/samber/lo"
@@ -68,7 +69,8 @@ func (svc *Service) GetPackageObjectDetails(msg *coormq.GetPackageObjectDetails)
}

func (svc *Service) GetObjectDetails(msg *coormq.GetObjectDetails) (*coormq.GetObjectDetailsResp, *mq.CodeMessage) {
details := make([]*stgmod.ObjectDetail, len(msg.ObjectIDs))
detailsMp := make(map[cdssdk.ObjectID]*stgmod.ObjectDetail)

err := svc.db2.DoTx(func(tx db2.SQLContext) error {
var err error

@@ -79,22 +81,10 @@ func (svc *Service) GetObjectDetails(msg *coormq.GetObjectDetails) (*coormq.GetO
if err != nil {
return fmt.Errorf("batch get objects: %w", err)
}

objIDIdx := 0
objIdx := 0
for objIDIdx < len(msg.ObjectIDs) && objIdx < len(objs) {
if msg.ObjectIDs[objIDIdx] < objs[objIdx].ObjectID {
objIDIdx++
continue
}

// 由于是使用msg.ObjectIDs去查询Object,因此不存在msg.ObjectIDs > Object.ObjectID的情况,
// 下面同理
obj := stgmod.ObjectDetail{
Object: objs[objIDIdx],
for _, obj := range objs {
detailsMp[obj.ObjectID] = &stgmod.ObjectDetail{
Object: obj,
}
details[objIDIdx] = &obj
objIdx++
}

// 查询合并
@@ -102,22 +92,9 @@ func (svc *Service) GetObjectDetails(msg *coormq.GetObjectDetails) (*coormq.GetO
if err != nil {
return fmt.Errorf("batch get object blocks: %w", err)
}

objIDIdx = 0
blkIdx := 0
for objIDIdx < len(msg.ObjectIDs) && blkIdx < len(blocks) {
if details[objIDIdx] == nil {
objIDIdx++
continue
}

if msg.ObjectIDs[objIDIdx] < blocks[blkIdx].ObjectID {
objIDIdx++
continue
}

details[objIDIdx].Blocks = append(details[objIDIdx].Blocks, blocks[blkIdx])
blkIdx++
for _, block := range blocks {
d := detailsMp[block.ObjectID]
d.Blocks = append(d.Blocks, block)
}

// 查询合并
@@ -125,23 +102,11 @@ func (svc *Service) GetObjectDetails(msg *coormq.GetObjectDetails) (*coormq.GetO
if err != nil {
return fmt.Errorf("batch get pinned objects: %w", err)
}

objIDIdx = 0
pinIdx := 0
for objIDIdx < len(msg.ObjectIDs) && pinIdx < len(pinneds) {
if details[objIDIdx] == nil {
objIDIdx++
continue
}

if msg.ObjectIDs[objIDIdx] < pinneds[pinIdx].ObjectID {
objIDIdx++
continue
}

details[objIDIdx].PinnedAt = append(details[objIDIdx].PinnedAt, pinneds[pinIdx].NodeID)
pinIdx++
for _, pinned := range pinneds {
d := detailsMp[pinned.ObjectID]
d.PinnedAt = append(d.PinnedAt, pinned.StorageID)
}

return nil
})

@@ -150,6 +115,11 @@ func (svc *Service) GetObjectDetails(msg *coormq.GetObjectDetails) (*coormq.GetO
return nil, mq.Failed(errorcode.OperationFailed, "get object details failed")
}

details := make([]*stgmod.ObjectDetail, len(msg.ObjectIDs))
for i, objID := range msg.ObjectIDs {
details[i] = detailsMp[objID]
}

return mq.ReplyOK(coormq.RespGetObjectDetails(details))
}



+ 19
- 18
coordinator/internal/mq/package.go View File

@@ -3,9 +3,10 @@ package mq
import (
"database/sql"
"fmt"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"sort"

"gitlink.org.cn/cloudream/storage/common/pkgs/db2"

"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
@@ -147,7 +148,7 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack
return mq.ReplyOK(coormq.NewDeletePackageResp())
}

func (svc *Service) GetPackageCachedNodes(msg *coormq.GetPackageCachedNodes) (*coormq.GetPackageCachedNodesResp, *mq.CodeMessage) {
func (svc *Service) GetPackageCachedStorages(msg *coormq.GetPackageCachedStorages) (*coormq.GetPackageCachedStoragesResp, *mq.CodeMessage) {
isAva, err := svc.db2.Package().IsAvailable(svc.db2.DefCtx(), msg.UserID, msg.PackageID)
if err != nil {
logger.WithField("UserID", msg.UserID).
@@ -172,16 +173,16 @@ func (svc *Service) GetPackageCachedNodes(msg *coormq.GetPackageCachedNodes) (*c
}

var packageSize int64
nodeInfoMap := make(map[cdssdk.NodeID]*cdssdk.NodePackageCachingInfo)
stgInfoMap := make(map[cdssdk.StorageID]*cdssdk.StoragePackageCachingInfo)
for _, obj := range objDetails {
// 只要存了文件的一个块,就认为此节点存了整个文件
for _, block := range obj.Blocks {
info, ok := nodeInfoMap[block.NodeID]
info, ok := stgInfoMap[block.StorageID]
if !ok {
info = &cdssdk.NodePackageCachingInfo{
NodeID: block.NodeID,
info = &cdssdk.StoragePackageCachingInfo{
StorageID: block.StorageID,
}
nodeInfoMap[block.NodeID] = info
stgInfoMap[block.StorageID] = info

}

@@ -190,18 +191,18 @@ func (svc *Service) GetPackageCachedNodes(msg *coormq.GetPackageCachedNodes) (*c
}
}

var nodeInfos []cdssdk.NodePackageCachingInfo
for _, nodeInfo := range nodeInfoMap {
var nodeInfos []cdssdk.StoragePackageCachingInfo
for _, nodeInfo := range stgInfoMap {
nodeInfos = append(nodeInfos, *nodeInfo)
}

sort.Slice(nodeInfos, func(i, j int) bool {
return nodeInfos[i].NodeID < nodeInfos[j].NodeID
return nodeInfos[i].StorageID < nodeInfos[j].StorageID
})
return mq.ReplyOK(coormq.NewGetPackageCachedNodesResp(nodeInfos, packageSize))
return mq.ReplyOK(coormq.ReqGetPackageCachedStoragesResp(nodeInfos, packageSize))
}

func (svc *Service) GetPackageLoadedNodes(msg *coormq.GetPackageLoadedNodes) (*coormq.GetPackageLoadedNodesResp, *mq.CodeMessage) {
func (svc *Service) GetPackageLoadedStorages(msg *coormq.GetPackageLoadedStorages) (*coormq.GetPackageLoadedStoragesResp, *mq.CodeMessage) {
storages, err := svc.db2.StoragePackage().FindPackageStorages(svc.db2.DefCtx(), msg.PackageID)
if err != nil {
logger.WithField("PackageID", msg.PackageID).
@@ -209,16 +210,16 @@ func (svc *Service) GetPackageLoadedNodes(msg *coormq.GetPackageLoadedNodes) (*c
return nil, mq.Failed(errorcode.OperationFailed, "get storages by packageID failed")
}

uniqueNodeIDs := make(map[cdssdk.NodeID]bool)
var nodeIDs []cdssdk.NodeID
uniqueStgIDs := make(map[cdssdk.StorageID]bool)
var stgIDs []cdssdk.StorageID
for _, stg := range storages {
if !uniqueNodeIDs[stg.NodeID] {
uniqueNodeIDs[stg.NodeID] = true
nodeIDs = append(nodeIDs, stg.NodeID)
if !uniqueStgIDs[stg.StorageID] {
uniqueStgIDs[stg.StorageID] = true
stgIDs = append(stgIDs, stg.StorageID)
}
}

return mq.ReplyOK(coormq.NewGetPackageLoadedNodesResp(nodeIDs))
return mq.ReplyOK(coormq.NewGetPackageLoadedStoragesResp(stgIDs))
}

func (svc *Service) AddAccessStat(msg *coormq.AddAccessStat) {


+ 3
- 6
coordinator/internal/mq/service.go View File

@@ -1,18 +1,15 @@
package mq

import (
mydb "gitlink.org.cn/cloudream/storage/common/pkgs/db"
mydb2 "gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
)

type Service struct {
db *mydb.DB
db2 *mydb2.DB
db2 *db2.DB
}

func NewService(db *mydb.DB, db2 *mydb2.DB) *Service {
func NewService(db2 *db2.DB) *Service {
return &Service{
db: db,
db2: db2,
}
}

+ 118
- 15
coordinator/internal/mq/storage.go View File

@@ -4,8 +4,10 @@ import (
"database/sql"
"fmt"

"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gorm.io/gorm"

"gitlink.org.cn/cloudream/common/pkgs/mq"
@@ -24,33 +26,134 @@ func (svc *Service) GetStorage(msg *coormq.GetStorage) (*coormq.GetStorageResp,
return mq.ReplyOK(coormq.RespGetStorage(stg))
}

func (svc *Service) GetStorageDetail(msg *coormq.GetStorageDetail) (*coormq.GetStorageDetailResp, *mq.CodeMessage) {
var ret stgmod.StorageDetail
func (svc *Service) GetStorageDetails(msg *coormq.GetStorageDetails) (*coormq.GetStorageDetailsResp, *mq.CodeMessage) {
stgsMp := make(map[cdssdk.StorageID]*stgmod.StorageDetail)

svc.db2.DoTx(func(tx db2.SQLContext) error {
stg, err := svc.db2.Storage().GetByID(tx, msg.StorageID)
if err != nil {
stgs, err := svc.db2.Storage().BatchGetByID(tx, msg.StorageIDs)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting storage: %w", err)
}
ret.Storage = stg
var masterHubIDs []cdssdk.NodeID
for _, stg := range stgs {
stgsMp[stg.StorageID] = &stgmod.StorageDetail{
Storage: stg,
}
masterHubIDs = append(masterHubIDs, stg.MasterHub)
}

shard, err := svc.db2.ShardStorage().GetByStorageID(tx, msg.StorageID)
if err == nil {
ret.Shard = &shard
} else if err != gorm.ErrRecordNotFound {
// 获取监护Hub信息
masterHubs, err := svc.db2.Node().BatchGetByID(tx, masterHubIDs)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting master hub: %w", err)
}
masterHubMap := make(map[cdssdk.NodeID]cdssdk.Node)
for _, hub := range masterHubs {
masterHubMap[hub.NodeID] = hub
}

// 获取分片存储
shards, err := svc.db2.ShardStorage().BatchGetByStorageIDs(tx, msg.StorageIDs)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting shard storage: %w", err)
}
for _, shard := range shards {
stgsMp[shard.StorageID].Shard = &shard
}

for _, stg := range stgsMp {
if stg.Shard != nil {
hub := masterHubMap[stg.MasterHub.NodeID]
stg.MasterHub = &hub
}
}

shared, err := svc.db2.SharedStorage().GetByStorageID(tx, msg.StorageID)
if err == nil {
ret.Shared = &shared
} else if err != gorm.ErrRecordNotFound {
// 获取共享存储的相关信息
shareds, err := svc.db2.SharedStorage().BatchGetByStorageIDs(tx, msg.StorageIDs)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting shared storage: %w", err)
}
for _, shared := range shareds {
stgsMp[shared.StorageID].Shared = &shared
}

return nil
})

return mq.ReplyOK(coormq.RespGetStorageDetail(ret))
ret := make([]*stgmod.StorageDetail, len(msg.StorageIDs))
for i, id := range msg.StorageIDs {
stg, ok := stgsMp[id]
if !ok {
ret[i] = nil
continue
}
ret[i] = stg
}

return mq.ReplyOK(coormq.RespGetStorageDetails(ret))
}

func (svc *Service) GetUserStorageDetails(msg *coormq.GetUserStorageDetails) (*coormq.GetUserStorageDetailsResp, *mq.CodeMessage) {
stgsMp := make(map[cdssdk.StorageID]*stgmod.StorageDetail)

svc.db2.DoTx(func(tx db2.SQLContext) error {
stgs, err := svc.db2.Storage().GetUserStorages(tx, msg.UserID)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting user storages: %w", err)
}
var masterHubIDs []cdssdk.NodeID
for _, stg := range stgs {
stgsMp[stg.StorageID] = &stgmod.StorageDetail{
Storage: stg,
}
masterHubIDs = append(masterHubIDs, stg.MasterHub)
}

// 监护Hub的信息
masterHubs, err := svc.db2.Node().BatchGetByID(tx, masterHubIDs)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting master hub: %w", err)
}
masterHubMap := make(map[cdssdk.NodeID]cdssdk.Node)
for _, hub := range masterHubs {
masterHubMap[hub.NodeID] = hub
}

stgIDs := lo.Map(stgs, func(stg cdssdk.Storage, i int) cdssdk.StorageID { return stg.StorageID })

// 获取分片存储信息
shards, err := svc.db2.ShardStorage().BatchGetByStorageIDs(tx, stgIDs)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting shard storage: %w", err)
}
for _, shard := range shards {
stgsMp[shard.StorageID].Shard = &shard
}
for _, stg := range stgsMp {
if stg.Shard != nil {
hub := masterHubMap[stg.MasterHub.NodeID]
stg.MasterHub = &hub
}
}

// 获取共享存储的相关信息
shareds, err := svc.db2.SharedStorage().BatchGetByStorageIDs(tx, stgIDs)
if err != nil && err != gorm.ErrRecordNotFound {
return fmt.Errorf("getting shared storage: %w", err)
}
for _, shared := range shareds {
stgsMp[shared.StorageID].Shared = &shared
}

return nil
})

var ret []stgmod.StorageDetail
for _, id := range stgsMp {
ret = append(ret, *id)
}

return mq.ReplyOK(coormq.RespGetUserStorageDetails(ret))
}

func (svc *Service) GetStorageByName(msg *coormq.GetStorageByName) (*coormq.GetStorageByNameResp, *mq.CodeMessage) {
@@ -89,7 +192,7 @@ func (svc *Service) StoragePackageLoaded(msg *coormq.StoragePackageLoaded) (*coo
return fmt.Errorf("getting storage: %w", err)
}

err = svc.db2.PinnedObject().CreateFromPackage(tx, msg.PackageID, stg.NodeID)
err = svc.db2.PinnedObject().CreateFromPackage(tx, msg.PackageID, stg.StorageID)
if err != nil {
return fmt.Errorf("creating pinned object from package: %w", err)
}


+ 5
- 6
coordinator/internal/mq/temp.go View File

@@ -1,15 +1,14 @@
package mq

import (
"database/sql"
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

@@ -18,15 +17,15 @@ func (svc *Service) GetDatabaseAll(msg *coormq.GetDatabaseAll) (*coormq.GetDatab
var pkgs []cdssdk.Package
var objs []stgmod.ObjectDetail

err := svc.db.DoTx(sql.LevelSerializable, func(tx *sqlx.Tx) error {
err := svc.db2.DoTx(func(tx db2.SQLContext) error {
var err error
bkts, err = svc.db.Bucket().GetUserBuckets(tx, msg.UserID)
bkts, err = svc.db2.Bucket().GetUserBuckets(tx, msg.UserID)
if err != nil {
return fmt.Errorf("get user buckets: %w", err)
}

for _, bkt := range bkts {
ps, err := svc.db.Package().GetBucketPackages(tx, msg.UserID, bkt.BucketID)
ps, err := svc.db2.Package().GetBucketPackages(tx, msg.UserID, bkt.BucketID)
if err != nil {
return fmt.Errorf("get bucket packages: %w", err)
}
@@ -34,7 +33,7 @@ func (svc *Service) GetDatabaseAll(msg *coormq.GetDatabaseAll) (*coormq.GetDatab
}

for _, pkg := range pkgs {
os, err := svc.db.Object().GetPackageObjectDetails(tx, pkg.PackageID)
os, err := svc.db2.Object().GetPackageObjectDetails(tx, pkg.PackageID)
if err != nil {
return fmt.Errorf("get package object details: %w", err)
}


+ 1
- 1
scanner/internal/config/config.go View File

@@ -4,7 +4,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
log "gitlink.org.cn/cloudream/common/pkgs/logger"
c "gitlink.org.cn/cloudream/common/utils/config"
db "gitlink.org.cn/cloudream/storage/common/pkgs/db/config"
db "gitlink.org.cn/cloudream/storage/common/pkgs/db2/config"
stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq"
)



+ 24
- 13
scanner/internal/event/agent_cache_gc.go View File

@@ -1,14 +1,14 @@
package event

import (
"database/sql"
"fmt"
"time"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"

agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
@@ -37,7 +37,7 @@ func (t *AgentCacheGC) TryMerge(other Event) bool {
return false
}

if event.NodeID != t.NodeID {
if event.StorageID != t.StorageID {
return false
}

@@ -57,7 +57,7 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) {
// 使用分布式锁进行资源锁定
mutex, err := reqbuilder.NewBuilder().
// 执行IPFS垃圾回收
IPFS().GC(t.NodeID).
Shard().GC(t.StorageID).
MutexLock(execCtx.Args.DistLock)
if err != nil {
log.Warnf("acquire locks failed, err: %s", err.Error())
@@ -66,9 +66,20 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) {
defer mutex.Unlock()

// 收集需要进行垃圾回收的文件哈希值
var allFileHashes []string
err = execCtx.Args.DB.DoTx(sql.LevelSerializable, func(tx *sqlx.Tx) error {
blocks, err := execCtx.Args.DB.ObjectBlock().GetByNodeID(tx, t.NodeID)
var allFileHashes []cdssdk.FileHash
var masterHub cdssdk.Node
err = execCtx.Args.DB.DoTx(func(tx db2.SQLContext) error {
stg, err := execCtx.Args.DB.Storage().GetByID(tx, t.StorageID)
if err != nil {
return fmt.Errorf("getting storage by id: %w", err)
}

masterHub, err = execCtx.Args.DB.Node().GetByID(tx, stg.MasterHub)
if err != nil {
return fmt.Errorf("getting master hub by id: %w", err)
}

blocks, err := execCtx.Args.DB.ObjectBlock().GetByStorageID(tx, t.StorageID)
if err != nil {
return fmt.Errorf("getting object blocks by node id: %w", err)
}
@@ -76,7 +87,7 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) {
allFileHashes = append(allFileHashes, c.FileHash)
}

objs, err := execCtx.Args.DB.PinnedObject().GetObjectsByNodeID(tx, t.NodeID)
objs, err := execCtx.Args.DB.PinnedObject().GetObjectsByStorageID(tx, t.StorageID)
if err != nil {
return fmt.Errorf("getting pinned objects by node id: %w", err)
}
@@ -87,22 +98,22 @@ func (t *AgentCacheGC) Execute(execCtx ExecuteContext) {
return nil
})
if err != nil {
log.WithField("NodeID", t.NodeID).Warn(err.Error())
log.WithField("NodeID", t.StorageID).Warn(err.Error())
return
}

// 获取与节点通信的代理客户端
agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID)
agtCli, err := stgglb.AgentMQPool.Acquire(masterHub.NodeID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error())
log.WithField("NodeID", t.StorageID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

// 向代理发送垃圾回收请求
_, err = agtCli.CacheGC(agtmq.ReqCacheGC(allFileHashes), mq.RequestOption{Timeout: time.Minute})
_, err = agtCli.CacheGC(agtmq.ReqCacheGC(t.StorageID, allFileHashes), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("ipfs gc: %s", err.Error())
log.WithField("NodeID", t.StorageID).Warnf("ipfs gc: %s", err.Error())
return
}
}


+ 30
- 25
scanner/internal/event/agent_check_cache.go View File

@@ -1,16 +1,15 @@
package event

import (
"database/sql"
"time"

"github.com/jmoiron/sqlx"
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"

"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)
@@ -35,7 +34,7 @@ func (t *AgentCheckCache) TryMerge(other Event) bool {
return false
}

if event.NodeID != t.NodeID {
if event.StorageID != t.StorageID {
return false
}

@@ -51,23 +50,29 @@ func (t *AgentCheckCache) Execute(execCtx ExecuteContext) {
log.Debugf("end, time: %v", time.Since(startTime))
}()

agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID)
stg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.DefCtx(), t.StorageID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("create agent client failed, err: %s", err.Error())
log.WithField("NodeID", t.StorageID).Warnf("getting shard storage by storage id: %s", err.Error())
return
}

agtCli, err := stgglb.AgentMQPool.Acquire(stg.MasterHub)
if err != nil {
log.WithField("NodeID", t.StorageID).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

checkResp, err := agtCli.CheckCache(agtmq.NewCheckCache(), mq.RequestOption{Timeout: time.Minute})
checkResp, err := agtCli.CheckCache(agtmq.NewCheckCache(t.StorageID), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("checking ipfs: %s", err.Error())
log.WithField("NodeID", t.StorageID).Warnf("checking ipfs: %s", err.Error())
return
}

realFileHashes := lo.SliceToMap(checkResp.FileHashes, func(hash string) (string, bool) { return hash, true })
realFileHashes := lo.SliceToMap(checkResp.FileHashes, func(hash cdssdk.FileHash) (cdssdk.FileHash, bool) { return hash, true })

// 在事务中执行缓存更新操作
execCtx.Args.DB.DoTx(sql.LevelSerializable, func(tx *sqlx.Tx) error {
execCtx.Args.DB.DoTx(func(tx db2.SQLContext) error {
t.checkCache(execCtx, tx, realFileHashes)

t.checkPinnedObject(execCtx, tx, realFileHashes)
@@ -78,21 +83,21 @@ func (t *AgentCheckCache) Execute(execCtx ExecuteContext) {
}

// checkCache 对比Cache表中的记录,根据实际存在的文件哈希值,进行增加或删除操作
func (t *AgentCheckCache) checkCache(execCtx ExecuteContext, tx *sqlx.Tx, realFileHashes map[string]bool) {
func (t *AgentCheckCache) checkCache(execCtx ExecuteContext, tx db2.SQLContext, realFileHashes map[cdssdk.FileHash]bool) {
log := logger.WithType[AgentCheckCache]("Event")

caches, err := execCtx.Args.DB.Cache().GetByNodeID(tx, t.NodeID)
caches, err := execCtx.Args.DB.Cache().GetByStorageID(tx, t.StorageID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("getting caches by node id: %s", err.Error())
log.WithField("NodeID", t.StorageID).Warnf("getting caches by node id: %s", err.Error())
return
}

realFileHashesCp := make(map[string]bool)
realFileHashesCp := make(map[cdssdk.FileHash]bool)
for k, v := range realFileHashes {
realFileHashesCp[k] = v
}

var rms []string
var rms []cdssdk.FileHash
for _, c := range caches {
if realFileHashesCp[c.FileHash] {
delete(realFileHashesCp, c.FileHash)
@@ -102,14 +107,14 @@ func (t *AgentCheckCache) checkCache(execCtx ExecuteContext, tx *sqlx.Tx, realFi
}

if len(rms) > 0 {
err = execCtx.Args.DB.Cache().NodeBatchDelete(tx, t.NodeID, rms)
err = execCtx.Args.DB.Cache().StorageBatchDelete(tx, t.StorageID, rms)
if err != nil {
log.Warnf("batch delete node caches: %w", err.Error())
}
}

if len(realFileHashesCp) > 0 {
err = execCtx.Args.DB.Cache().BatchCreateOnSameNode(tx, lo.Keys(realFileHashesCp), t.NodeID, 0)
err = execCtx.Args.DB.Cache().BatchCreateOnSameStorage(tx, lo.Keys(realFileHashesCp), t.StorageID, 0)
if err != nil {
log.Warnf("batch create node caches: %w", err)
return
@@ -118,12 +123,12 @@ func (t *AgentCheckCache) checkCache(execCtx ExecuteContext, tx *sqlx.Tx, realFi
}

// checkPinnedObject 对比PinnedObject表,若实际文件不存在,则进行删除操作
func (t *AgentCheckCache) checkPinnedObject(execCtx ExecuteContext, tx *sqlx.Tx, realFileHashes map[string]bool) {
func (t *AgentCheckCache) checkPinnedObject(execCtx ExecuteContext, tx db2.SQLContext, realFileHashes map[cdssdk.FileHash]bool) {
log := logger.WithType[AgentCheckCache]("Event")

objs, err := execCtx.Args.DB.PinnedObject().GetObjectsByNodeID(tx, t.NodeID)
objs, err := execCtx.Args.DB.PinnedObject().GetObjectsByStorageID(tx, t.StorageID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("getting pinned objects by node id: %s", err.Error())
log.WithField("NodeID", t.StorageID).Warnf("getting pinned objects by node id: %s", err.Error())
return
}

@@ -136,7 +141,7 @@ func (t *AgentCheckCache) checkPinnedObject(execCtx ExecuteContext, tx *sqlx.Tx,
}

if len(rms) > 0 {
err = execCtx.Args.DB.PinnedObject().NodeBatchDelete(tx, t.NodeID, rms)
err = execCtx.Args.DB.PinnedObject().StorageBatchDelete(tx, t.StorageID, rms)
if err != nil {
log.Warnf("batch delete node pinned objects: %s", err.Error())
}
@@ -144,16 +149,16 @@ func (t *AgentCheckCache) checkPinnedObject(execCtx ExecuteContext, tx *sqlx.Tx,
}

// checkObjectBlock 对比ObjectBlock表,若实际文件不存在,则进行删除操作
func (t *AgentCheckCache) checkObjectBlock(execCtx ExecuteContext, tx *sqlx.Tx, realFileHashes map[string]bool) {
func (t *AgentCheckCache) checkObjectBlock(execCtx ExecuteContext, tx db2.SQLContext, realFileHashes map[cdssdk.FileHash]bool) {
log := logger.WithType[AgentCheckCache]("Event")

blocks, err := execCtx.Args.DB.ObjectBlock().GetByNodeID(tx, t.NodeID)
blocks, err := execCtx.Args.DB.ObjectBlock().GetByStorageID(tx, t.StorageID)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("getting object blocks by node id: %s", err.Error())
log.WithField("NodeID", t.StorageID).Warnf("getting object blocks by node id: %s", err.Error())
return
}

var rms []string
var rms []cdssdk.FileHash
for _, b := range blocks {
if realFileHashes[b.FileHash] {
continue
@@ -162,7 +167,7 @@ func (t *AgentCheckCache) checkObjectBlock(execCtx ExecuteContext, tx *sqlx.Tx,
}

if len(rms) > 0 {
err = execCtx.Args.DB.ObjectBlock().NodeBatchDelete(tx, t.NodeID, rms)
err = execCtx.Args.DB.ObjectBlock().StorageBatchDelete(tx, t.StorageID, rms)
if err != nil {
log.Warnf("batch delete node object blocks: %s", err.Error())
}


+ 3
- 3
scanner/internal/event/agent_check_state.go View File

@@ -37,7 +37,7 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) {
log.Debugf("begin with %v", logger.FormatStruct(t.AgentCheckState))
defer log.Debugf("end")

node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.SQLCtx(), t.NodeID)
node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.DefCtx(), t.NodeID)
if err == sql.ErrNoRows {
return
}
@@ -61,7 +61,7 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) {
// 检查上次上报时间,超时的设置为不可用
// TODO 没有上报过是否要特殊处理?
if node.LastReportTime != nil && time.Since(*node.LastReportTime) > time.Duration(config.Cfg().NodeUnavailableSeconds)*time.Second {
err := execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.SQLCtx(), t.NodeID, consts.NodeStateUnavailable)
err := execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.DefCtx(), t.NodeID, consts.NodeStateUnavailable)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("set node state failed, err: %s", err.Error())
}
@@ -70,7 +70,7 @@ func (t *AgentCheckState) Execute(execCtx ExecuteContext) {
}

// TODO 如果以后还有其他的状态,要判断哪些状态下能设置Normal
err = execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.SQLCtx(), t.NodeID, consts.NodeStateNormal)
err = execCtx.Args.DB.Node().UpdateState(execCtx.Args.DB.DefCtx(), t.NodeID, consts.NodeStateNormal)
if err != nil {
log.WithField("NodeID", t.NodeID).Warnf("change node state failed, err: %s", err.Error())
}


+ 8
- 8
scanner/internal/event/agent_check_storage.go View File

@@ -4,13 +4,13 @@ import (
"database/sql"
"time"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/consts"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)
@@ -45,7 +45,7 @@ func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) {

// 读取数据的地方就不加锁了,因为check任务会反复执行,单次失败问题不大

stg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.SQLCtx(), t.StorageID)
stg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.DefCtx(), t.StorageID)
if err != nil {
if err != sql.ErrNoRows {
log.WithField("StorageID", t.StorageID).Warnf("get storage failed, err: %s", err.Error())
@@ -53,7 +53,7 @@ func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) {
return
}

node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.SQLCtx(), stg.NodeID)
node, err := execCtx.Args.DB.Node().GetByID(execCtx.Args.DB.DefCtx(), stg.MasterHub)
if err != nil {
if err != sql.ErrNoRows {
log.WithField("StorageID", t.StorageID).Warnf("get storage node failed, err: %s", err.Error())
@@ -65,16 +65,16 @@ func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) {
return
}

agtCli, err := stgglb.AgentMQPool.Acquire(stg.NodeID)
agtCli, err := stgglb.AgentMQPool.Acquire(stg.MasterHub)
if err != nil {
log.WithField("NodeID", stg.NodeID).Warnf("create agent client failed, err: %s", err.Error())
log.WithField("MasterHub", stg.MasterHub).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

checkResp, err := agtCli.StorageCheck(agtmq.NewStorageCheck(stg.StorageID), mq.RequestOption{Timeout: time.Minute})
if err != nil {
log.WithField("NodeID", stg.NodeID).Warnf("checking storage: %s", err.Error())
log.WithField("MasterHub", stg.MasterHub).Warnf("checking storage: %s", err.Error())
return
}
realPkgs := make(map[cdssdk.UserID]map[cdssdk.PackageID]bool)
@@ -88,7 +88,7 @@ func (t *AgentCheckStorage) Execute(execCtx ExecuteContext) {
pkgs[pkg.PackageID] = true
}

execCtx.Args.DB.DoTx(sql.LevelSerializable, func(tx *sqlx.Tx) error {
execCtx.Args.DB.DoTx(func(tx db2.SQLContext) error {
packages, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(tx, t.StorageID)
if err != nil {
log.Warnf("getting storage package: %s", err.Error())


+ 4
- 4
scanner/internal/event/agent_storage_gc.go View File

@@ -55,21 +55,21 @@ func (t *AgentStorageGC) Execute(execCtx ExecuteContext) {
}
defer mutex.Unlock()

getStg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.SQLCtx(), t.StorageID)
getStg, err := execCtx.Args.DB.Storage().GetByID(execCtx.Args.DB.DefCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("getting storage: %s", err.Error())
return
}

stgPkgs, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.SQLCtx(), t.StorageID)
stgPkgs, err := execCtx.Args.DB.StoragePackage().GetAllByStorageID(execCtx.Args.DB.DefCtx(), t.StorageID)
if err != nil {
log.WithField("StorageID", t.StorageID).Warnf("getting storage packages: %s", err.Error())
return
}

agtCli, err := stgglb.AgentMQPool.Acquire(getStg.NodeID)
agtCli, err := stgglb.AgentMQPool.Acquire(getStg.MasterHub)
if err != nil {
log.WithField("NodeID", getStg.NodeID).Warnf("create agent client failed, err: %s", err.Error())
log.WithField("MasterHub", getStg.MasterHub).Warnf("create agent client failed, err: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)


+ 1
- 1
scanner/internal/event/check_package.go View File

@@ -32,7 +32,7 @@ func (t *CheckPackage) Execute(execCtx ExecuteContext) {
defer log.Debugf("end")

for _, objID := range t.PackageIDs {
err := execCtx.Args.DB.Package().DeleteUnused(execCtx.Args.DB.SQLCtx(), objID)
err := execCtx.Args.DB.Package().DeleteUnused(execCtx.Args.DB.DefCtx(), objID)
if err != nil {
log.WithField("PackageID", objID).Warnf("delete unused package failed, err: %s", err.Error())
}


+ 317
- 241
scanner/internal/event/check_package_redundancy.go
File diff suppressed because it is too large
View File


+ 158
- 182
scanner/internal/event/clean_pinned.go View File

@@ -22,7 +22,7 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch2/parser"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitchlrc/ops2"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)
@@ -68,40 +68,39 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
return
}

stats, err := execCtx.Args.DB.PackageAccessStat().GetByPackageID(execCtx.Args.DB.SQLCtx(), t.PackageID)
stats, err := execCtx.Args.DB.PackageAccessStat().GetByPackageID(execCtx.Args.DB.DefCtx(), t.PackageID)
if err != nil {
log.Warnf("getting package access stat: %s", err.Error())
return
}
var readerNodeIDs []cdssdk.NodeID
var readerStgIDs []cdssdk.StorageID
for _, item := range stats {
// TODO 可以考虑做成配置
if item.Amount >= float64(len(getObjs.Objects)/2) {
readerNodeIDs = append(readerNodeIDs, item.NodeID)
readerStgIDs = append(readerStgIDs, item.StorageID)
}
}

// 注意!需要保证allNodeID包含所有之后可能用到的节点ID
// 注意!需要保证allStgID包含所有之后可能用到的节点ID
// TOOD 可以考虑设计Cache机制
var allNodeID []cdssdk.NodeID
var allStgID []cdssdk.StorageID
for _, obj := range getObjs.Objects {
for _, block := range obj.Blocks {
allNodeID = append(allNodeID, block.NodeID)
allStgID = append(allStgID, block.StorageID)
}
allNodeID = append(allNodeID, obj.PinnedAt...)
allStgID = append(allStgID, obj.PinnedAt...)
}
allNodeID = append(allNodeID, readerNodeIDs...)
allStgID = append(allStgID, readerStgIDs...)

getNodeResp, err := coorCli.GetNodes(coormq.NewGetNodes(lo.Union(allNodeID)))
getStgResp, err := coorCli.GetStorageDetails(coormq.ReqGetStorageDetails(lo.Union(allStgID)))
if err != nil {
log.Warnf("getting nodes: %s", err.Error())
return
}

allNodeInfos := make(map[cdssdk.NodeID]*cdssdk.Node)
for _, node := range getNodeResp.Nodes {
n := node
allNodeInfos[node.NodeID] = &n
allStgInfos := make(map[cdssdk.StorageID]*stgmod.StorageDetail)
for _, stg := range getStgResp.Storages {
allStgInfos[stg.Storage.StorageID] = stg
}

// 只对ec和rep对象进行处理
@@ -116,36 +115,35 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
}

planBld := exec.NewPlanBuilder()
pinPlans := make(map[cdssdk.NodeID]*[]string)
plnningNodeIDs := make(map[cdssdk.NodeID]bool)
planningStgIDs := make(map[cdssdk.StorageID]bool)

// 对于rep对象,统计出所有对象块分布最多的两个节点,用这两个节点代表所有rep对象块的分布,去进行退火算法
var repObjectsUpdating []coormq.UpdatingObjectRedundancy
repMostNodeIDs := t.summaryRepObjectBlockNodes(repObjects)
solu := t.startAnnealing(allNodeInfos, readerNodeIDs, annealingObject{
solu := t.startAnnealing(allStgInfos, readerStgIDs, annealingObject{
totalBlockCount: 1,
minBlockCnt: 1,
pinnedAt: repMostNodeIDs,
blocks: nil,
})
for _, obj := range repObjects {
repObjectsUpdating = append(repObjectsUpdating, t.makePlansForRepObject(solu, obj, pinPlans))
repObjectsUpdating = append(repObjectsUpdating, t.makePlansForRepObject(allStgInfos, solu, obj, planBld, planningStgIDs))
}

// 对于ec对象,则每个对象单独进行退火算法
var ecObjectsUpdating []coormq.UpdatingObjectRedundancy
for _, obj := range ecObjects {
ecRed := obj.Object.Redundancy.(*cdssdk.ECRedundancy)
solu := t.startAnnealing(allNodeInfos, readerNodeIDs, annealingObject{
solu := t.startAnnealing(allStgInfos, readerStgIDs, annealingObject{
totalBlockCount: ecRed.N,
minBlockCnt: ecRed.K,
pinnedAt: obj.PinnedAt,
blocks: obj.Blocks,
})
ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(allNodeInfos, solu, obj, planBld, plnningNodeIDs))
ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(allStgInfos, solu, obj, planBld, planningStgIDs))
}

ioSwRets, err := t.executePlans(execCtx, pinPlans, planBld, plnningNodeIDs)
ioSwRets, err := t.executePlans(execCtx, planBld, planningStgIDs)
if err != nil {
log.Warn(err.Error())
return
@@ -166,65 +164,65 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
}
}

func (t *CleanPinned) summaryRepObjectBlockNodes(objs []stgmod.ObjectDetail) []cdssdk.NodeID {
type nodeBlocks struct {
NodeID cdssdk.NodeID
Count int
func (t *CleanPinned) summaryRepObjectBlockNodes(objs []stgmod.ObjectDetail) []cdssdk.StorageID {
type stgBlocks struct {
StorageID cdssdk.StorageID
Count int
}

nodeBlocksMap := make(map[cdssdk.NodeID]*nodeBlocks)
stgBlocksMap := make(map[cdssdk.StorageID]*stgBlocks)
for _, obj := range objs {
cacheBlockNodes := make(map[cdssdk.NodeID]bool)
cacheBlockStgs := make(map[cdssdk.StorageID]bool)
for _, block := range obj.Blocks {
if _, ok := nodeBlocksMap[block.NodeID]; !ok {
nodeBlocksMap[block.NodeID] = &nodeBlocks{
NodeID: block.NodeID,
Count: 0,
if _, ok := stgBlocksMap[block.StorageID]; !ok {
stgBlocksMap[block.StorageID] = &stgBlocks{
StorageID: block.StorageID,
Count: 0,
}
}
nodeBlocksMap[block.NodeID].Count++
cacheBlockNodes[block.NodeID] = true
stgBlocksMap[block.StorageID].Count++
cacheBlockStgs[block.StorageID] = true
}

for _, nodeID := range obj.PinnedAt {
if cacheBlockNodes[nodeID] {
if cacheBlockStgs[nodeID] {
continue
}

if _, ok := nodeBlocksMap[nodeID]; !ok {
nodeBlocksMap[nodeID] = &nodeBlocks{
NodeID: nodeID,
Count: 0,
if _, ok := stgBlocksMap[nodeID]; !ok {
stgBlocksMap[nodeID] = &stgBlocks{
StorageID: nodeID,
Count: 0,
}
}
nodeBlocksMap[nodeID].Count++
stgBlocksMap[nodeID].Count++
}
}

nodes := lo.Values(nodeBlocksMap)
sort2.Sort(nodes, func(left *nodeBlocks, right *nodeBlocks) int {
stgs := lo.Values(stgBlocksMap)
sort2.Sort(stgs, func(left *stgBlocks, right *stgBlocks) int {
return right.Count - left.Count
})

// 只选出块数超过一半的节点,但要保证至少有两个节点
for i := 2; i < len(nodes); i++ {
if nodes[i].Count < len(objs)/2 {
nodes = nodes[:i]
for i := 2; i < len(stgs); i++ {
if stgs[i].Count < len(objs)/2 {
stgs = stgs[:i]
break
}
}

return lo.Map(nodes, func(item *nodeBlocks, idx int) cdssdk.NodeID { return item.NodeID })
return lo.Map(stgs, func(item *stgBlocks, idx int) cdssdk.StorageID { return item.StorageID })
}

type annealingState struct {
allNodeInfos map[cdssdk.NodeID]*cdssdk.Node // 所有节点的信息
readerNodeIDs []cdssdk.NodeID // 近期可能访问此对象的节点
nodesSortedByReader map[cdssdk.NodeID][]nodeDist // 拥有数据的节点到每个可能访问对象的节点按距离排序
object annealingObject // 进行退火的对象
blockList []objectBlock // 排序后的块分布情况
nodeBlockBitmaps map[cdssdk.NodeID]*bitmap.Bitmap64 // 用位图的形式表示每一个节点上有哪些块
nodeCombTree combinatorialTree // 节点组合树,用于加速计算容灾度
allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail // 所有节点的信息
readerStgIDs []cdssdk.StorageID // 近期可能访问此对象的节点
stgsSortedByReader map[cdssdk.StorageID][]stgDist // 拥有数据的节点到每个可能访问对象的节点按距离排序
object annealingObject // 进行退火的对象
blockList []objectBlock // 排序后的块分布情况
stgBlockBitmaps map[cdssdk.StorageID]*bitmap.Bitmap64 // 用位图的形式表示每一个节点上有哪些块
stgCombTree combinatorialTree // 节点组合树,用于加速计算容灾度

maxScore float64 // 搜索过程中得到过的最大分数
maxScoreRmBlocks []bool // 最大分数对应的删除方案
@@ -236,28 +234,28 @@ type annealingState struct {

type objectBlock struct {
Index int
NodeID cdssdk.NodeID
HasEntity bool // 节点拥有实际的文件数据块
HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块
FileHash string // 只有在拥有实际文件数据块时,这个字段才有值
StorageID cdssdk.StorageID
HasEntity bool // 节点拥有实际的文件数据块
HasShadow bool // 如果节点拥有完整文件数据,那么认为这个节点拥有所有块,这些块被称为影子块
FileHash cdssdk.FileHash // 只有在拥有实际文件数据块时,这个字段才有值
}

type nodeDist struct {
NodeID cdssdk.NodeID
Distance float64
type stgDist struct {
StorageID cdssdk.StorageID
Distance float64
}

type combinatorialTree struct {
nodes []combinatorialTreeNode
blocksMaps map[int]bitmap.Bitmap64
nodeIDToLocalNodeID map[cdssdk.NodeID]int
localNodeIDToNodeID []cdssdk.NodeID
nodes []combinatorialTreeNode
blocksMaps map[int]bitmap.Bitmap64
stgIDToLocalStgID map[cdssdk.StorageID]int
localStgIDToStgID []cdssdk.StorageID
}

type annealingObject struct {
totalBlockCount int
minBlockCnt int
pinnedAt []cdssdk.NodeID
pinnedAt []cdssdk.StorageID
blocks []stgmod.ObjectBlock
}

@@ -267,17 +265,17 @@ const (
iterActionBreak = 2
)

func newCombinatorialTree(nodeBlocksMaps map[cdssdk.NodeID]*bitmap.Bitmap64) combinatorialTree {
func newCombinatorialTree(stgBlocksMaps map[cdssdk.StorageID]*bitmap.Bitmap64) combinatorialTree {
tree := combinatorialTree{
blocksMaps: make(map[int]bitmap.Bitmap64),
nodeIDToLocalNodeID: make(map[cdssdk.NodeID]int),
blocksMaps: make(map[int]bitmap.Bitmap64),
stgIDToLocalStgID: make(map[cdssdk.StorageID]int),
}

tree.nodes = make([]combinatorialTreeNode, (1 << len(nodeBlocksMaps)))
for id, mp := range nodeBlocksMaps {
tree.nodeIDToLocalNodeID[id] = len(tree.localNodeIDToNodeID)
tree.blocksMaps[len(tree.localNodeIDToNodeID)] = *mp
tree.localNodeIDToNodeID = append(tree.localNodeIDToNodeID, id)
tree.nodes = make([]combinatorialTreeNode, (1 << len(stgBlocksMaps)))
for id, mp := range stgBlocksMaps {
tree.stgIDToLocalStgID[id] = len(tree.localStgIDToStgID)
tree.blocksMaps[len(tree.localStgIDToStgID)] = *mp
tree.localStgIDToStgID = append(tree.localStgIDToStgID, id)
}

tree.nodes[0].localNodeID = -1
@@ -288,7 +286,7 @@ func newCombinatorialTree(nodeBlocksMaps map[cdssdk.NodeID]*bitmap.Bitmap64) com
}

func (t *combinatorialTree) initNode(minAvaiLocalNodeID int, parent *combinatorialTreeNode, index *int) {
for i := minAvaiLocalNodeID; i < len(t.nodeIDToLocalNodeID); i++ {
for i := minAvaiLocalNodeID; i < len(t.stgIDToLocalStgID); i++ {
curIndex := *index
*index++
bitMp := t.blocksMaps[i]
@@ -308,7 +306,7 @@ func (t *combinatorialTree) GetDepth(index int) int {
depth := 0

// 反复判断节点在哪个子树。从左到右,子树节点的数量呈现8 4 2的变化,由此可以得到每个子树的索引值的范围
subTreeCount := 1 << len(t.nodeIDToLocalNodeID)
subTreeCount := 1 << len(t.stgIDToLocalStgID)
for index > 0 {
if index < subTreeCount {
// 定位到一个子树后,深度+1,然后进入这个子树,使用同样的方法再进行定位。
@@ -328,16 +326,16 @@ func (t *combinatorialTree) GetDepth(index int) int {

// 更新某一个算力中心节点的块分布位图,同时更新它对应组合树节点的所有子节点。
// 如果更新到某个节点时,已有K个块,那么就不会再更新它的子节点
func (t *combinatorialTree) UpdateBitmap(nodeID cdssdk.NodeID, mp bitmap.Bitmap64, k int) {
t.blocksMaps[t.nodeIDToLocalNodeID[nodeID]] = mp
func (t *combinatorialTree) UpdateBitmap(stgID cdssdk.StorageID, mp bitmap.Bitmap64, k int) {
t.blocksMaps[t.stgIDToLocalStgID[stgID]] = mp
// 首先定义两种遍历树节点时的移动方式:
// 1. 竖直移动(深度增加):从一个节点移动到它最左边的子节点。每移动一步,index+1
// 2. 水平移动:从一个节点移动到它右边的兄弟节点。每移动一步,根据它所在的深度,index+8,+4,+2
// LocalNodeID从0开始,将其+1后得到移动步数steps。
// LocalID从0开始,将其+1后得到移动步数steps。
// 将移动步数拆成多部分,分配到上述的两种移动方式上,并进行任意组合,且保证第一次为至少进行一次的竖直移动,移动之后的节点都会是同一个计算中心节点。
steps := t.nodeIDToLocalNodeID[nodeID] + 1
steps := t.stgIDToLocalStgID[stgID] + 1
for d := 1; d <= steps; d++ {
t.iterCombBits(len(t.nodeIDToLocalNodeID)-1, steps-d, 0, func(i int) {
t.iterCombBits(len(t.stgIDToLocalStgID)-1, steps-d, 0, func(i int) {
index := d + i
node := &t.nodes[index]

@@ -379,7 +377,7 @@ func (t *combinatorialTree) FindKBlocksMaxDepth(k int) int {
// 由于遍历时采用的是深度优先的算法,因此遍历到这个叶子节点时,叶子节点再加一个节点的组合已经在前面搜索过,
// 所以用当前叶子节点深度+1来作为当前分支的结果就可以,即使当前情况下增加任意一个节点依然不够K块,
// 可以使用同样的思路去递推到当前叶子节点增加两个块的情况。
if t.nodes[index].localNodeID == len(t.nodeIDToLocalNodeID)-1 {
if t.nodes[index].localNodeID == len(t.stgIDToLocalStgID)-1 {
if maxDepth < depth+1 {
maxDepth = depth + 1
}
@@ -388,8 +386,8 @@ func (t *combinatorialTree) FindKBlocksMaxDepth(k int) int {
return iterActionNone
})

if maxDepth == -1 || maxDepth > len(t.nodeIDToLocalNodeID) {
return len(t.nodeIDToLocalNodeID)
if maxDepth == -1 || maxDepth > len(t.stgIDToLocalStgID) {
return len(t.stgIDToLocalStgID)
}

return maxDepth
@@ -411,7 +409,7 @@ func (t *combinatorialTree) iterChildren(index int, do func(index int, parentInd
childIndex := index + 1
curDepth := t.GetDepth(index)

childCounts := len(t.nodeIDToLocalNodeID) - 1 - curNode.localNodeID
childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localNodeID
if childCounts == 0 {
return
}
@@ -440,7 +438,7 @@ func (t *combinatorialTree) itering(index int, parentIndex int, depth int, do fu
curNode := &t.nodes[index]
childIndex := index + 1

childCounts := len(t.nodeIDToLocalNodeID) - 1 - curNode.localNodeID
childCounts := len(t.stgIDToLocalStgID) - 1 - curNode.localNodeID
if childCounts == 0 {
return iterActionNone
}
@@ -470,13 +468,13 @@ type annealingSolution struct {
rmBlocks []bool // 要删除哪些块
}

func (t *CleanPinned) startAnnealing(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node, readerNodeIDs []cdssdk.NodeID, object annealingObject) annealingSolution {
func (t *CleanPinned) startAnnealing(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, readerStgIDs []cdssdk.StorageID, object annealingObject) annealingSolution {
state := &annealingState{
allNodeInfos: allNodeInfos,
readerNodeIDs: readerNodeIDs,
nodesSortedByReader: make(map[cdssdk.NodeID][]nodeDist),
object: object,
nodeBlockBitmaps: make(map[cdssdk.NodeID]*bitmap.Bitmap64),
allStgInfos: allStgInfos,
readerStgIDs: readerStgIDs,
stgsSortedByReader: make(map[cdssdk.StorageID][]stgDist),
object: object,
stgBlockBitmaps: make(map[cdssdk.StorageID]*bitmap.Bitmap64),
}

t.initBlockList(state)
@@ -490,7 +488,7 @@ func (t *CleanPinned) startAnnealing(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node

state.rmBlocks = make([]bool, len(state.blockList))
state.inversedIndex = -1
state.nodeCombTree = newCombinatorialTree(state.nodeBlockBitmaps)
state.stgCombTree = newCombinatorialTree(state.stgBlockBitmaps)

state.lastScore = t.calcScore(state)
state.maxScore = state.lastScore
@@ -507,8 +505,8 @@ func (t *CleanPinned) startAnnealing(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node
state.inversedIndex = rand.Intn(len(state.rmBlocks))
block := state.blockList[state.inversedIndex]
state.rmBlocks[state.inversedIndex] = !state.rmBlocks[state.inversedIndex]
state.nodeBlockBitmaps[block.NodeID].Set(block.Index, !state.rmBlocks[state.inversedIndex])
state.nodeCombTree.UpdateBitmap(block.NodeID, *state.nodeBlockBitmaps[block.NodeID], state.object.minBlockCnt)
state.stgBlockBitmaps[block.StorageID].Set(block.Index, !state.rmBlocks[state.inversedIndex])
state.stgCombTree.UpdateBitmap(block.StorageID, *state.stgBlockBitmaps[block.StorageID], state.object.minBlockCnt)

curScore := t.calcScore(state)

@@ -516,8 +514,8 @@ func (t *CleanPinned) startAnnealing(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node
// 如果新方案比旧方案得分低,且没有要求强制接受新方案,那么就将变化改回去
if curScore == 0 || (dScore < 0 && !t.alwaysAccept(curTemp, dScore, coolingRate)) {
state.rmBlocks[state.inversedIndex] = !state.rmBlocks[state.inversedIndex]
state.nodeBlockBitmaps[block.NodeID].Set(block.Index, !state.rmBlocks[state.inversedIndex])
state.nodeCombTree.UpdateBitmap(block.NodeID, *state.nodeBlockBitmaps[block.NodeID], state.object.minBlockCnt)
state.stgBlockBitmaps[block.StorageID].Set(block.Index, !state.rmBlocks[state.inversedIndex])
state.stgCombTree.UpdateBitmap(block.StorageID, *state.stgBlockBitmaps[block.StorageID], state.object.minBlockCnt)
// fmt.Printf("\n")
} else {
// fmt.Printf(" accept!\n")
@@ -537,7 +535,7 @@ func (t *CleanPinned) startAnnealing(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node
}

func (t *CleanPinned) initBlockList(ctx *annealingState) {
blocksMap := make(map[cdssdk.NodeID][]objectBlock)
blocksMap := make(map[cdssdk.StorageID][]objectBlock)

// 先生成所有的影子块
for _, pinned := range ctx.object.pinnedAt {
@@ -545,7 +543,7 @@ func (t *CleanPinned) initBlockList(ctx *annealingState) {
for i := 0; i < ctx.object.totalBlockCount; i++ {
blocks = append(blocks, objectBlock{
Index: i,
NodeID: pinned,
StorageID: pinned,
HasShadow: true,
})
}
@@ -554,7 +552,7 @@ func (t *CleanPinned) initBlockList(ctx *annealingState) {

// 再填充实际块
for _, b := range ctx.object.blocks {
blocks := blocksMap[b.NodeID]
blocks := blocksMap[b.StorageID]

has := false
for i := range blocks {
@@ -572,11 +570,11 @@ func (t *CleanPinned) initBlockList(ctx *annealingState) {

blocks = append(blocks, objectBlock{
Index: b.Index,
NodeID: b.NodeID,
StorageID: b.StorageID,
HasEntity: true,
FileHash: b.FileHash,
})
blocksMap[b.NodeID] = blocks
blocksMap[b.StorageID] = blocks
}

var sortedBlocks []objectBlock
@@ -584,7 +582,7 @@ func (t *CleanPinned) initBlockList(ctx *annealingState) {
sortedBlocks = append(sortedBlocks, bs...)
}
sortedBlocks = sort2.Sort(sortedBlocks, func(left objectBlock, right objectBlock) int {
d := left.NodeID - right.NodeID
d := left.StorageID - right.StorageID
if d != 0 {
return int(d)
}
@@ -597,43 +595,43 @@ func (t *CleanPinned) initBlockList(ctx *annealingState) {

func (t *CleanPinned) initNodeBlockBitmap(state *annealingState) {
for _, b := range state.blockList {
mp, ok := state.nodeBlockBitmaps[b.NodeID]
mp, ok := state.stgBlockBitmaps[b.StorageID]
if !ok {
nb := bitmap.Bitmap64(0)
mp = &nb
state.nodeBlockBitmaps[b.NodeID] = mp
state.stgBlockBitmaps[b.StorageID] = mp
}
mp.Set(b.Index, true)
}
}

func (t *CleanPinned) sortNodeByReaderDistance(state *annealingState) {
for _, r := range state.readerNodeIDs {
var nodeDists []nodeDist
for _, r := range state.readerStgIDs {
var nodeDists []stgDist

for n := range state.nodeBlockBitmaps {
for n := range state.stgBlockBitmaps {
if r == n {
// 同节点时距离视为0.1
nodeDists = append(nodeDists, nodeDist{
NodeID: n,
Distance: consts.NodeDistanceSameNode,
nodeDists = append(nodeDists, stgDist{
StorageID: n,
Distance: consts.NodeDistanceSameNode,
})
} else if state.allNodeInfos[r].LocationID == state.allNodeInfos[n].LocationID {
} else if state.allStgInfos[r].MasterHub.LocationID == state.allStgInfos[n].MasterHub.LocationID {
// 同地区时距离视为1
nodeDists = append(nodeDists, nodeDist{
NodeID: n,
Distance: consts.NodeDistanceSameLocation,
nodeDists = append(nodeDists, stgDist{
StorageID: n,
Distance: consts.NodeDistanceSameLocation,
})
} else {
// 不同地区时距离视为5
nodeDists = append(nodeDists, nodeDist{
NodeID: n,
Distance: consts.NodeDistanceOther,
nodeDists = append(nodeDists, stgDist{
StorageID: n,
Distance: consts.NodeDistanceOther,
})
}
}

state.nodesSortedByReader[r] = sort2.Sort(nodeDists, func(left, right nodeDist) int { return sort2.Cmp(left.Distance, right.Distance) })
state.stgsSortedByReader[r] = sort2.Sort(nodeDists, func(left, right stgDist) int { return sort2.Cmp(left.Distance, right.Distance) })
}
}

@@ -664,21 +662,21 @@ func (t *CleanPinned) calcScore(state *annealingState) float64 {
func (t *CleanPinned) calcDisasterTolerance(state *annealingState) float64 {
if state.inversedIndex != -1 {
node := state.blockList[state.inversedIndex]
state.nodeCombTree.UpdateBitmap(node.NodeID, *state.nodeBlockBitmaps[node.NodeID], state.object.minBlockCnt)
state.stgCombTree.UpdateBitmap(node.StorageID, *state.stgBlockBitmaps[node.StorageID], state.object.minBlockCnt)
}
return float64(len(state.nodeBlockBitmaps) - state.nodeCombTree.FindKBlocksMaxDepth(state.object.minBlockCnt))
return float64(len(state.stgBlockBitmaps) - state.stgCombTree.FindKBlocksMaxDepth(state.object.minBlockCnt))
}

// 计算最小访问数据的代价
func (t *CleanPinned) calcMinAccessCost(state *annealingState) float64 {
cost := math.MaxFloat64
for _, reader := range state.readerNodeIDs {
tarNodes := state.nodesSortedByReader[reader]
for _, reader := range state.readerStgIDs {
tarNodes := state.stgsSortedByReader[reader]
gotBlocks := bitmap.Bitmap64(0)
thisCost := 0.0

for _, tar := range tarNodes {
tarNodeMp := state.nodeBlockBitmaps[tar.NodeID]
tarNodeMp := state.stgBlockBitmaps[tar.StorageID]

// 只需要从目的节点上获得缺少的块
curWeigth := gotBlocks.Weight()
@@ -726,32 +724,40 @@ func (t *CleanPinned) alwaysAccept(curTemp float64, dScore float64, coolingRate
return v > rand.Float64()
}

func (t *CleanPinned) makePlansForRepObject(solu annealingSolution, obj stgmod.ObjectDetail, pinPlans map[cdssdk.NodeID]*[]string) coormq.UpdatingObjectRedundancy {
func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningNodeIDs map[cdssdk.StorageID]bool) coormq.UpdatingObjectRedundancy {
entry := coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: obj.Object.Redundancy,
}

for i, f := range solu.rmBlocks {
hasCache := lo.ContainsBy(obj.Blocks, func(b stgmod.ObjectBlock) bool { return b.NodeID == solu.blockList[i].NodeID }) ||
lo.ContainsBy(obj.PinnedAt, func(n cdssdk.NodeID) bool { return n == solu.blockList[i].NodeID })
hasCache := lo.ContainsBy(obj.Blocks, func(b stgmod.ObjectBlock) bool { return b.StorageID == solu.blockList[i].StorageID }) ||
lo.ContainsBy(obj.PinnedAt, func(n cdssdk.StorageID) bool { return n == solu.blockList[i].StorageID })
willRm := f

if !willRm {
// 如果对象在退火后要保留副本的节点没有副本,则需要在这个节点创建副本
if !hasCache {
pinPlan, ok := pinPlans[solu.blockList[i].NodeID]
if !ok {
pinPlan = &[]string{}
pinPlans[solu.blockList[i].NodeID] = pinPlan
ft := ioswitch2.NewFromTo()

fromStg := allStgInfos[obj.Blocks[0].StorageID]
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg.MasterHub, fromStg.Storage, -1))
toStg := allStgInfos[solu.blockList[i].StorageID]
ft.AddTo(ioswitch2.NewToShardStore(*toStg.MasterHub, toStg.Storage, -1, fmt.Sprintf("%d.0", obj.Object.ObjectID)))

parser := parser.NewParser(cdssdk.DefaultECRedundancy)
err := parser.Parse(ft, planBld)
if err != nil {
// TODO 错误处理
continue
}
*pinPlan = append(*pinPlan, obj.Object.FileHash)
planningNodeIDs[solu.blockList[i].StorageID] = true
}
entry.Blocks = append(entry.Blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
Index: solu.blockList[i].Index,
NodeID: solu.blockList[i].NodeID,
FileHash: obj.Object.FileHash,
ObjectID: obj.Object.ObjectID,
Index: solu.blockList[i].Index,
StorageID: solu.blockList[i].StorageID,
FileHash: obj.Object.FileHash,
})
}
}
@@ -759,29 +765,29 @@ func (t *CleanPinned) makePlansForRepObject(solu annealingSolution, obj stgmod.O
return entry
}

func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningNodeIDs map[cdssdk.NodeID]bool) coormq.UpdatingObjectRedundancy {
func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningNodeIDs map[cdssdk.StorageID]bool) coormq.UpdatingObjectRedundancy {
entry := coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: obj.Object.Redundancy,
}

reconstrct := make(map[cdssdk.NodeID]*[]int)
reconstrct := make(map[cdssdk.StorageID]*[]int)
for i, f := range solu.rmBlocks {
block := solu.blockList[i]
if !f {
entry.Blocks = append(entry.Blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
Index: block.Index,
NodeID: block.NodeID,
FileHash: block.FileHash,
ObjectID: obj.Object.ObjectID,
Index: block.Index,
StorageID: block.StorageID,
FileHash: block.FileHash,
})

// 如果这个块是影子块,那么就要从完整对象里重建这个块
if !block.HasEntity {
re, ok := reconstrct[block.NodeID]
re, ok := reconstrct[block.StorageID]
if !ok {
re = &[]int{}
reconstrct[block.NodeID] = re
reconstrct[block.StorageID] = re
}

*re = append(*re, block.Index)
@@ -794,10 +800,10 @@ func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssd

for id, idxs := range reconstrct {
ft := ioswitch2.NewFromTo()
ft.AddFrom(ioswitch2.NewFromNode(obj.Object.FileHash, allNodeInfos[id], -1))
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *allStgInfos[id].MasterHub, allStgInfos[id].Storage, -1))

for _, i := range *idxs {
ft.AddTo(ioswitch2.NewToNode(*allNodeInfos[id], i, fmt.Sprintf("%d.%d", obj.Object.ObjectID, i)))
ft.AddTo(ioswitch2.NewToShardStore(*allStgInfos[id].MasterHub, allStgInfos[id].Storage, i, fmt.Sprintf("%d.%d", obj.Object.ObjectID, i)))
}

err := parser.Parse(ft, planBld)
@@ -811,16 +817,11 @@ func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssd
return entry
}

func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.NodeID]*[]string, planBld *exec.PlanBuilder, plnningNodeIDs map[cdssdk.NodeID]bool) (map[string]any, error) {
log := logger.WithType[CleanPinned]("Event")

func (t *CleanPinned) executePlans(execCtx ExecuteContext, planBld *exec.PlanBuilder, planningStgIDs map[cdssdk.StorageID]bool) (map[string]exec.VarValue, error) {
// 统一加锁,有重复也没关系
lockBld := reqbuilder.NewBuilder()
for nodeID := range pinPlans {
lockBld.IPFS().Buzy(nodeID)
}
for id := range plnningNodeIDs {
lockBld.IPFS().Buzy(id)
for id := range planningStgIDs {
lockBld.Shard().Buzy(id)
}
lock, err := lockBld.MutexLock(execCtx.Args.DistLock)
if err != nil {
@@ -830,36 +831,15 @@ func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.N

wg := sync.WaitGroup{}

// 执行pin操作
var anyPinErr error
for nodeID, pin := range pinPlans {
wg.Add(1)
go func(nodeID cdssdk.NodeID, pin *[]string) {
defer wg.Done()

agtCli, err := stgglb.AgentMQPool.Acquire(nodeID)
if err != nil {
log.Warnf("new agent client: %s", err.Error())
return
}
defer stgglb.AgentMQPool.Release(agtCli)

_, err = agtCli.PinObject(agtmq.ReqPinObject(*pin, false))
if err != nil {
log.Warnf("pinning object: %s", err.Error())
anyPinErr = err
}
}(nodeID, pin)
}

// 执行IO计划
var ioSwRets map[string]any
var ioSwRets map[string]exec.VarValue
var ioSwErr error
wg.Add(1)
go func() {
defer wg.Done()

ret, err := planBld.Execute().Wait(context.TODO())
// TODO 添加依赖
ret, err := planBld.Execute(exec.NewExecContext()).Wait(context.TODO())
if err != nil {
ioSwErr = fmt.Errorf("executing io switch plan: %w", err)
return
@@ -869,10 +849,6 @@ func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.N

wg.Wait()

if anyPinErr != nil {
return nil, anyPinErr
}

if ioSwErr != nil {
return nil, ioSwErr
}
@@ -880,7 +856,7 @@ func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.N
return ioSwRets, nil
}

func (t *CleanPinned) populateECObjectEntry(entry *coormq.UpdatingObjectRedundancy, obj stgmod.ObjectDetail, ioRets map[string]any) {
func (t *CleanPinned) populateECObjectEntry(entry *coormq.UpdatingObjectRedundancy, obj stgmod.ObjectDetail, ioRets map[string]exec.VarValue) {
for i := range entry.Blocks {
if entry.Blocks[i].FileHash != "" {
continue
@@ -888,7 +864,7 @@ func (t *CleanPinned) populateECObjectEntry(entry *coormq.UpdatingObjectRedundan

key := fmt.Sprintf("%d.%d", obj.Object.ObjectID, entry.Blocks[i].Index)
// 不应该出现key不存在的情况
entry.Blocks[i].FileHash = ioRets[key].(string)
entry.Blocks[i].FileHash = ioRets[key].(*ops2.FileHashValue).Hash
}
}



+ 13
- 13
scanner/internal/event/clean_pinned_test.go View File

@@ -10,15 +10,15 @@ import (

func newTreeTest(nodeBlocksMap []bitmap.Bitmap64) combinatorialTree {
tree := combinatorialTree{
blocksMaps: make(map[int]bitmap.Bitmap64),
nodeIDToLocalNodeID: make(map[cdssdk.NodeID]int),
blocksMaps: make(map[int]bitmap.Bitmap64),
stgIDToLocalStgID: make(map[cdssdk.StorageID]int),
}

tree.nodes = make([]combinatorialTreeNode, (1 << len(nodeBlocksMap)))
for id, mp := range nodeBlocksMap {
tree.nodeIDToLocalNodeID[cdssdk.NodeID(id)] = len(tree.localNodeIDToNodeID)
tree.blocksMaps[len(tree.localNodeIDToNodeID)] = mp
tree.localNodeIDToNodeID = append(tree.localNodeIDToNodeID, cdssdk.NodeID(id))
tree.stgIDToLocalStgID[cdssdk.StorageID(id)] = len(tree.localStgIDToStgID)
tree.blocksMaps[len(tree.localStgIDToStgID)] = mp
tree.localStgIDToStgID = append(tree.localStgIDToStgID, cdssdk.StorageID(id))
}

tree.nodes[0].localNodeID = -1
@@ -125,7 +125,7 @@ func Test_UpdateBitmap(t *testing.T) {
testcases := []struct {
title string
nodeBlocks []bitmap.Bitmap64
updatedNodeID cdssdk.NodeID
updatedNodeID cdssdk.StorageID
updatedBitmap bitmap.Bitmap64
k int
expectedTreeNodeBitmaps []int
@@ -134,7 +134,7 @@ func Test_UpdateBitmap(t *testing.T) {
{
title: "4个节点,更新但值不变",
nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8},
updatedNodeID: cdssdk.NodeID(0),
updatedNodeID: cdssdk.StorageID(0),
updatedBitmap: bitmap.Bitmap64(1),
k: 4,
expectedTreeNodeBitmaps: []int{0, 1, 3, 7, 15, 11, 5, 13, 9, 2, 6, 14, 10, 4, 12, 8},
@@ -143,7 +143,7 @@ func Test_UpdateBitmap(t *testing.T) {
{
title: "4个节点,更新0",
nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8},
updatedNodeID: cdssdk.NodeID(0),
updatedNodeID: cdssdk.StorageID(0),
updatedBitmap: bitmap.Bitmap64(2),
k: 4,
expectedTreeNodeBitmaps: []int{0, 2, 2, 6, 14, 10, 6, 14, 10, 2, 6, 14, 10, 4, 12, 8},
@@ -152,7 +152,7 @@ func Test_UpdateBitmap(t *testing.T) {
{
title: "4个节点,更新1",
nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8},
updatedNodeID: cdssdk.NodeID(1),
updatedNodeID: cdssdk.StorageID(1),
updatedBitmap: bitmap.Bitmap64(1),
k: 4,
expectedTreeNodeBitmaps: []int{0, 1, 1, 5, 13, 9, 5, 13, 9, 1, 5, 13, 9, 4, 12, 8},
@@ -161,7 +161,7 @@ func Test_UpdateBitmap(t *testing.T) {
{
title: "4个节点,更新2",
nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8},
updatedNodeID: cdssdk.NodeID(2),
updatedNodeID: cdssdk.StorageID(2),
updatedBitmap: bitmap.Bitmap64(1),
k: 4,
expectedTreeNodeBitmaps: []int{0, 1, 3, 3, 11, 11, 1, 9, 9, 2, 3, 11, 10, 1, 9, 8},
@@ -170,7 +170,7 @@ func Test_UpdateBitmap(t *testing.T) {
{
title: "4个节点,更新3",
nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8},
updatedNodeID: cdssdk.NodeID(3),
updatedNodeID: cdssdk.StorageID(3),
updatedBitmap: bitmap.Bitmap64(1),
k: 4,
expectedTreeNodeBitmaps: []int{0, 1, 3, 7, 7, 3, 5, 5, 1, 2, 6, 7, 3, 4, 5, 1},
@@ -179,7 +179,7 @@ func Test_UpdateBitmap(t *testing.T) {
{
title: "4个节点,k<4,更新0,0之前没有k个块,现在拥有",
nodeBlocks: []bitmap.Bitmap64{1, 2, 4, 8},
updatedNodeID: cdssdk.NodeID(0),
updatedNodeID: cdssdk.StorageID(0),
updatedBitmap: bitmap.Bitmap64(3),
k: 2,
expectedTreeNodeBitmaps: []int{0, 3, 3, 7, 15, 11, 5, 13, 9, 2, 6, 14, 10, 4, 12, 8},
@@ -187,7 +187,7 @@ func Test_UpdateBitmap(t *testing.T) {
{
title: "4个节点,k<4,更新0,0之前有k个块,现在没有",
nodeBlocks: []bitmap.Bitmap64{3, 4, 0, 0},
updatedNodeID: cdssdk.NodeID(0),
updatedNodeID: cdssdk.StorageID(0),
updatedBitmap: bitmap.Bitmap64(0),
k: 2,
expectedTreeNodeBitmaps: []int{0, 0, 4, 4, 4, 4, 0, 0, 0, 4, 4, 4, 4, 0, 0, 0},


+ 3
- 3
scanner/internal/event/event.go View File

@@ -7,12 +7,12 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/distlock"
event "gitlink.org.cn/cloudream/common/pkgs/event"
"gitlink.org.cn/cloudream/common/pkgs/typedispatcher"
mydb "gitlink.org.cn/cloudream/storage/common/pkgs/db"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
)

type ExecuteArgs struct {
DB *mydb.DB
DB *db2.DB
DistLock *distlock.Service
}

@@ -24,7 +24,7 @@ type Event = event.Event[ExecuteArgs]

type ExecuteOption = event.ExecuteOption

func NewExecutor(db *mydb.DB, distLock *distlock.Service) Executor {
func NewExecutor(db *db2.DB, distLock *distlock.Service) Executor {
return event.NewExecutor(ExecuteArgs{
DB: db,
DistLock: distLock,


+ 19
- 17
scanner/internal/event/event_test.go View File

@@ -1,5 +1,6 @@
package event

/*
import (
"testing"

@@ -11,51 +12,51 @@ import (
func Test_chooseSoManyNodes(t *testing.T) {
testcases := []struct {
title string
allNodes []*NodeLoadInfo
allNodes []*StorageLoadInfo
count int
expectedNodeIDs []cdssdk.NodeID
}{
{
title: "节点数量充足",
allNodes: []*NodeLoadInfo{
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(2)}},
allNodes: []*StorageLoadInfo{
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2)}},
},
count: 2,
expectedNodeIDs: []cdssdk.NodeID{1, 2},
},
{
title: "节点数量超过",
allNodes: []*NodeLoadInfo{
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(2)}},
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(3)}},
allNodes: []*StorageLoadInfo{
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2)}},
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(3)}},
},
count: 2,
expectedNodeIDs: []cdssdk.NodeID{1, 2},
},
{
title: "只有一个节点,节点数量不够",
allNodes: []*NodeLoadInfo{
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
allNodes: []*StorageLoadInfo{
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
},
count: 3,
expectedNodeIDs: []cdssdk.NodeID{1, 1, 1},
},
{
title: "多个同地区节点,节点数量不够",
allNodes: []*NodeLoadInfo{
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(2)}},
allNodes: []*StorageLoadInfo{
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1)}},
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2)}},
},
count: 5,
expectedNodeIDs: []cdssdk.NodeID{1, 1, 1, 2, 2},
},
{
title: "节点数量不够,且在不同地区",
allNodes: []*NodeLoadInfo{
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(1), LocationID: cdssdk.LocationID(1)}},
{Node: cdssdk.Node{NodeID: cdssdk.NodeID(2), LocationID: cdssdk.LocationID(2)}},
allNodes: []*StorageLoadInfo{
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(1), LocationID: cdssdk.LocationID(1)}},
{Storage: cdssdk.Node{NodeID: cdssdk.NodeID(2), LocationID: cdssdk.LocationID(2)}},
},
count: 5,
expectedNodeIDs: []cdssdk.NodeID{1, 2, 1, 2, 1},
@@ -67,9 +68,10 @@ func Test_chooseSoManyNodes(t *testing.T) {
var t CheckPackageRedundancy
chosenNodes := t.chooseSoManyNodes(test.count, test.allNodes)

chosenNodeIDs := lo.Map(chosenNodes, func(item *NodeLoadInfo, idx int) cdssdk.NodeID { return item.Node.NodeID })
chosenNodeIDs := lo.Map(chosenNodes, func(item *StorageLoadInfo, idx int) cdssdk.NodeID { return item.Storage.NodeID })

So(chosenNodeIDs, ShouldResemble, test.expectedNodeIDs)
})
}
}
*/

+ 4
- 4
scanner/internal/event/update_package_access_stat_amount.go View File

@@ -48,26 +48,26 @@ func (t *UpdatePackageAccessStatAmount) Execute(execCtx ExecuteContext) {
}()

if t.PackageIDs == nil {
err := execCtx.Args.DB.PackageAccessStat().UpdateAllAmount(execCtx.Args.DB.SQLCtx(), config.Cfg().AccessStatHistoryAmount)
err := execCtx.Args.DB.PackageAccessStat().UpdateAllAmount(execCtx.Args.DB.DefCtx(), config.Cfg().AccessStatHistoryAmount)
if err != nil {
log.Warnf("update all package access stat amount: %v", err)
return
}

err = execCtx.Args.DB.ObjectAccessStat().UpdateAllAmount(execCtx.Args.DB.SQLCtx(), config.Cfg().AccessStatHistoryAmount)
err = execCtx.Args.DB.ObjectAccessStat().UpdateAllAmount(execCtx.Args.DB.DefCtx(), config.Cfg().AccessStatHistoryAmount)
if err != nil {
log.Warnf("update all object access stat amount: %v", err)
return
}

} else {
err := execCtx.Args.DB.PackageAccessStat().BatchUpdateAmount(execCtx.Args.DB.SQLCtx(), t.PackageIDs, config.Cfg().AccessStatHistoryAmount)
err := execCtx.Args.DB.PackageAccessStat().BatchUpdateAmount(execCtx.Args.DB.DefCtx(), t.PackageIDs, config.Cfg().AccessStatHistoryAmount)
if err != nil {
log.Warnf("batch update package access stat amount: %v", err)
return
}

err = execCtx.Args.DB.ObjectAccessStat().BatchUpdateAmountInPackage(execCtx.Args.DB.SQLCtx(), t.PackageIDs, config.Cfg().AccessStatHistoryAmount)
err = execCtx.Args.DB.ObjectAccessStat().BatchUpdateAmountInPackage(execCtx.Args.DB.DefCtx(), t.PackageIDs, config.Cfg().AccessStatHistoryAmount)
if err != nil {
log.Warnf("batch update object access stat amount in package: %v", err)
return


+ 7
- 9
scanner/internal/tickevent/batch_all_agent_check_cache.go View File

@@ -1,7 +1,6 @@
package tickevent

import (
"github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
@@ -11,7 +10,7 @@ import (
const AGENT_CHECK_CACHE_BATCH_SIZE = 2

type BatchAllAgentCheckCache struct {
nodeIDs []cdssdk.NodeID
stgIDs []cdssdk.StorageID
}

func NewBatchAllAgentCheckCache() *BatchAllAgentCheckCache {
@@ -23,22 +22,21 @@ func (e *BatchAllAgentCheckCache) Execute(ctx ExecuteContext) {
log.Debugf("begin")
defer log.Debugf("end")

if e.nodeIDs == nil || len(e.nodeIDs) == 0 {
nodes, err := ctx.Args.DB.Node().GetAllNodes(ctx.Args.DB.SQLCtx())
if e.stgIDs == nil || len(e.stgIDs) == 0 {
ids, err := ctx.Args.DB.Storage().GetAllIDs(ctx.Args.DB.DefCtx())
if err != nil {
log.Warnf("get all nodes failed, err: %s", err.Error())
return
}

e.nodeIDs = lo.Map(nodes, func(node cdssdk.Node, index int) cdssdk.NodeID { return node.NodeID })

log.Debugf("new check start, get all nodes")
e.stgIDs = ids
}

checkedCnt := 0
for ; checkedCnt < len(e.nodeIDs) && checkedCnt < AGENT_CHECK_CACHE_BATCH_SIZE; checkedCnt++ {
for ; checkedCnt < len(e.stgIDs) && checkedCnt < AGENT_CHECK_CACHE_BATCH_SIZE; checkedCnt++ {
// nil代表进行全量检查
ctx.Args.EventExecutor.Post(event.NewAgentCheckCache(scevt.NewAgentCheckCache(e.nodeIDs[checkedCnt])))
ctx.Args.EventExecutor.Post(event.NewAgentCheckCache(scevt.NewAgentCheckCache(e.stgIDs[checkedCnt])))
}
e.nodeIDs = e.nodeIDs[checkedCnt:]
e.stgIDs = e.stgIDs[checkedCnt:]
}

+ 1
- 1
scanner/internal/tickevent/batch_check_all_package.go View File

@@ -19,7 +19,7 @@ func (e *BatchCheckAllPackage) Execute(ctx ExecuteContext) {
log.Debugf("begin")
defer log.Debugf("end")

packageIDs, err := ctx.Args.DB.Package().BatchGetAllPackageIDs(ctx.Args.DB.SQLCtx(), e.lastCheckStart, CheckPackageBatchSize)
packageIDs, err := ctx.Args.DB.Package().BatchGetAllPackageIDs(ctx.Args.DB.DefCtx(), e.lastCheckStart, CheckPackageBatchSize)
if err != nil {
log.Warnf("batch get package ids failed, err: %s", err.Error())
return


+ 1
- 1
scanner/internal/tickevent/batch_check_all_storage.go View File

@@ -21,7 +21,7 @@ func (e *BatchCheckAllStorage) Execute(ctx ExecuteContext) {
log.Debugf("begin")
defer log.Debugf("end")

storageIDs, err := ctx.Args.DB.Storage().BatchGetAllStorageIDs(ctx.Args.DB.SQLCtx(), e.lastCheckStart, CHECK_STORAGE_BATCH_SIZE)
storageIDs, err := ctx.Args.DB.Storage().BatchGetAllStorageIDs(ctx.Args.DB.DefCtx(), e.lastCheckStart, CHECK_STORAGE_BATCH_SIZE)
if err != nil {
log.Warnf("batch get storage ids failed, err: %s", err.Error())
return


+ 1
- 1
scanner/internal/tickevent/batch_check_package_redudancy.go View File

@@ -31,7 +31,7 @@ func (e *BatchCheckPackageRedundancy) Execute(ctx ExecuteContext) {
return
}

packageIDs, err := ctx.Args.DB.Package().BatchGetAllPackageIDs(ctx.Args.DB.SQLCtx(), e.lastCheckStart, CheckPackageBatchSize)
packageIDs, err := ctx.Args.DB.Package().BatchGetAllPackageIDs(ctx.Args.DB.DefCtx(), e.lastCheckStart, CheckPackageBatchSize)
if err != nil {
log.Warnf("batch get package ids failed, err: %s", err.Error())
return


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save