Browse Source

Merge branch 'feature_sysevent'

gitlink
Sydonian 9 months ago
parent
commit
88ec7c7293
58 changed files with 3594 additions and 231 deletions
  1. +86
    -0
      agent/internal/cmd/serve.go
  2. +61
    -0
      agent/internal/tickevent/report_hub_stats.go
  3. +23
    -0
      agent/internal/tickevent/report_storage_stats.go
  4. +60
    -0
      client/internal/cmdline/sysevent.go
  5. +0
    -1
      client/internal/services/hub.go
  6. +2
    -0
      client/internal/services/object.go
  7. +24
    -0
      common/assets/confs/datamap.config.json
  8. +1
    -0
      common/globals/pools.go
  9. +5
    -0
      common/globals/stats.go
  10. +517
    -0
      common/models/datamap.go
  11. +2
    -80
      common/pkgs/db2/object.go
  12. +19
    -4
      common/pkgs/ioswitch2/agent_worker.go
  13. +20
    -4
      common/pkgs/ioswitch2/http_hub_worker.go
  14. +1
    -0
      common/pkgs/mq/consts.go
  15. +5
    -2
      common/pkgs/mq/coordinator/object.go
  16. +3
    -5
      common/pkgs/mq/coordinator/package.go
  17. +103
    -0
      common/pkgs/servicestats/hub_strorage_transfer.go
  18. +103
    -0
      common/pkgs/servicestats/hub_transfter.go
  19. +37
    -0
      common/pkgs/servicestats/service_stats.go
  20. +12
    -0
      common/pkgs/storage/agtpool/pool.go
  21. +14
    -3
      common/pkgs/storage/local/shard_store.go
  22. +12
    -1
      common/pkgs/storage/s3/shard_store.go
  23. +1
    -1
      common/pkgs/storage/types/shard_store.go
  24. +11
    -0
      common/pkgs/sysevent/config.go
  25. +9
    -4
      common/pkgs/sysevent/publisher.go
  26. +6
    -2
      common/pkgs/sysevent/sysevent.go
  27. +14
    -0
      common/pkgs/sysevent/watcher.go
  28. +1
    -1
      common/pkgs/uploader/create_load.go
  29. +1
    -1
      common/pkgs/uploader/update.go
  30. +47
    -1
      coordinator/internal/cmd/serve.go
  31. +9
    -0
      coordinator/internal/mq/bucket.go
  32. +142
    -4
      coordinator/internal/mq/object.go
  33. +37
    -14
      coordinator/internal/mq/package.go
  34. +6
    -3
      coordinator/internal/mq/service.go
  35. +23
    -0
      coordinator/internal/mq/utils.go
  36. +15
    -0
      datamap/.env
  37. +54
    -0
      datamap/internal/config/config.go
  38. +32
    -0
      datamap/internal/db/db.go
  39. +174
    -0
      datamap/internal/handlers/handlers.go
  40. +187
    -0
      datamap/internal/models/blockdistribution.go
  41. +234
    -0
      datamap/internal/models/blocktransfer.go
  42. +52
    -0
      datamap/internal/models/hub.go
  43. +85
    -0
      datamap/internal/models/hubinfo.go
  44. +157
    -0
      datamap/internal/models/hubrequest.go
  45. +124
    -0
      datamap/internal/models/models.go
  46. +86
    -0
      datamap/internal/models/object.go
  47. +43
    -0
      datamap/internal/models/repository.go
  48. +51
    -0
      datamap/internal/models/storageStats.go
  49. +110
    -0
      datamap/internal/models/storageinfo.go
  50. +117
    -0
      datamap/internal/mq/mq.go
  51. +23
    -0
      datamap/internal/server/server.go
  52. +32
    -0
      datamap/main.go
  53. +32
    -19
      go.mod
  54. +78
    -33
      go.sum
  55. +264
    -30
      scanner/internal/event/check_package_redundancy.go
  56. +176
    -16
      scanner/internal/event/clean_pinned.go
  57. +4
    -1
      scanner/internal/event/event.go
  58. +47
    -1
      scanner/main.go

+ 86
- 0
agent/internal/cmd/serve.go View File

@@ -1,12 +1,15 @@
package cmd

import (
"context"
"fmt"
"net"
"os"
"time"

"github.com/go-co-op/gocron/v2"
"gitlink.org.cn/cloudream/storage/agent/internal/http"
"gitlink.org.cn/cloudream/storage/agent/internal/tickevent"

"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/logger"
@@ -15,6 +18,7 @@ import (
"gitlink.org.cn/cloudream/storage/agent/internal/config"
"gitlink.org.cn/cloudream/storage/agent/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/accessstat"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
@@ -23,6 +27,7 @@ import (
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/metacache"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gitlink.org.cn/cloudream/storage/common/pkgs/uploader"

"google.golang.org/grpc"
@@ -50,6 +55,8 @@ func serve(configPath string) {
stgglb.InitLocal(&config.Cfg().Local)
stgglb.InitMQPool(config.Cfg().RabbitMQ)
stgglb.InitAgentRPCPool(&agtrpc.PoolConfig{})
stgglb.Stats.SetupHubStorageTransfer(*config.Cfg().Local.HubID)
stgglb.Stats.SetupHubTransfer(*config.Cfg().Local.HubID)

// 获取Hub配置
hubCfg := downloadHubConfig()
@@ -141,6 +148,22 @@ func serve(configPath string) {
// 初始化任务管理器
taskMgr := task.NewManager(distlock, &conCol, &dlder, acStat, stgAgts, uploader)

// 初始化系统事件发布器
evtPub, err := sysevent.NewPublisher(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ), &stgmod.SourceHub{
HubID: hubCfg.Hub.HubID,
HubName: hubCfg.Hub.Name,
})
if err != nil {
logger.Errorf("new sysevent publisher: %v", err)
os.Exit(1)
}
go servePublisher(evtPub)

// 初始化定时任务执行器
sch := setupTickTask(stgAgts, evtPub)
sch.Start()
defer sch.Shutdown()

// 启动命令服务器
// TODO 需要设计AgentID持久化机制
agtSvr, err := agtmq.NewServer(cmdsvc.NewService(&taskMgr, stgAgts), config.Cfg().ID, config.Cfg().RabbitMQ)
@@ -185,6 +208,69 @@ func downloadHubConfig() coormq.GetHubConfigResp {
return *cfgResp
}

func servePublisher(evtPub *sysevent.Publisher) {
logger.Info("start serving sysevent publisher")

ch := evtPub.Start()

loop:
for {
val, err := ch.Receive().Wait(context.Background())
if err != nil {
logger.Errorf("sysevent publisher stopped with error: %s", err.Error())
break
}

switch val := val.(type) {
case sysevent.PublishError:
logger.Errorf("publishing event: %v", val)

case sysevent.PublisherExited:
if val.Err != nil {
logger.Errorf("publisher exited with error: %v", val.Err)
} else {
logger.Info("publisher exited")
}
break loop

case sysevent.OtherError:
logger.Errorf("sysevent: %v", val)
}
}
logger.Info("sysevent publisher stopped")

// TODO 仅简单结束了程序
os.Exit(1)
}

func setupTickTask(agtPool *agtpool.AgentPool, evtPub *sysevent.Publisher) gocron.Scheduler {
sch, err := gocron.NewScheduler()
if err != nil {
logger.Errorf("new cron scheduler: %s", err.Error())
os.Exit(1)
}

sch.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(
gocron.NewAtTime(0, 0, 0),
)), gocron.NewTask(tickevent.ReportStorageStats, agtPool, evtPub))

sch.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(
gocron.NewAtTime(0, 0, 1),
)), gocron.NewTask(tickevent.ReportHubTransferStats, evtPub))

sch.NewJob(gocron.DailyJob(1, gocron.NewAtTimes(
gocron.NewAtTime(0, 0, 2),
)), gocron.NewTask(tickevent.ReportHubStorageTransferStats, evtPub))

// sch.NewJob(gocron.DurationJob(time.Minute), gocron.NewTask(tickevent.ReportStorageStats, agtPool, evtPub))

// sch.NewJob(gocron.DurationJob(time.Minute), gocron.NewTask(tickevent.ReportHubTransferStats, evtPub))

// sch.NewJob(gocron.DurationJob(time.Minute), gocron.NewTask(tickevent.ReportHubStorageTransferStats, agtPool, evtPub))

return sch
}

func serveAgentServer(server *agtmq.Server) {
logger.Info("start serving command server")



+ 61
- 0
agent/internal/tickevent/report_hub_stats.go View File

@@ -0,0 +1,61 @@
package tickevent

import (
"gitlink.org.cn/cloudream/common/utils/math2"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
)

func ReportHubTransferStats(evtPub *sysevent.Publisher) {
if stgglb.Stats.HubTransfer == nil {
return
}

data := stgglb.Stats.HubTransfer.DumpData()
endTime := stgglb.Stats.HubTransfer.Reset()

for hubID, entry := range data.Entries {
evtPub.Publish(&stgmod.BodyHubTransferStats{
SourceHubID: *stgglb.Local.HubID,
TargetHubID: hubID,
Send: stgmod.DataTrans{
TotalTransfer: entry.OutputBytes,
RequestCount: entry.TotalOutput,
FailedRequestCount: entry.TotalInput - entry.SuccessInput,
AvgTransfer: math2.DivOrDefault(entry.OutputBytes, entry.TotalOutput, 0),
MinTransfer: entry.MinOutputBytes,
MaxTransfer: entry.MaxOutputBytes,
},
StartTimestamp: data.StartTime,
EndTimestamp: endTime,
})
}
}

func ReportHubStorageTransferStats(stgAgts *agtpool.AgentPool, evtPub *sysevent.Publisher) {
if stgglb.Stats.HubStorageTransfer == nil {
return
}

data := stgglb.Stats.HubStorageTransfer.DumpData()
endTime := stgglb.Stats.HubStorageTransfer.Reset()

for storageID, stg := range data.Entries {
evtPub.Publish(&stgmod.BodyHubStorageTransferStats{
HubID: *stgglb.Local.HubID,
StorageID: storageID,
Send: stgmod.DataTrans{
TotalTransfer: stg.OutputBytes,
RequestCount: stg.TotalOutput,
FailedRequestCount: stg.TotalInput - stg.SuccessInput,
AvgTransfer: math2.DivOrDefault(stg.OutputBytes, stg.TotalOutput, 0),
MinTransfer: stg.MinOutputBytes,
MaxTransfer: stg.MaxOutputBytes,
},
StartTimestamp: data.StartTime,
EndTimestamp: endTime,
})
}
}

+ 23
- 0
agent/internal/tickevent/report_storage_stats.go View File

@@ -0,0 +1,23 @@
package tickevent

import (
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
)

func ReportStorageStats(agtPool *agtpool.AgentPool, evtPub *sysevent.Publisher) {
stgs := agtPool.GetAllAgents()
for _, stg := range stgs {
shard, err := stg.GetShardStore()
if err != nil {
continue
}

stats := shard.Stats()
evtPub.Publish(&stgmod.BodyStorageStats{
StorageID: stg.Info().Storage.StorageID,
DataCount: stats.FileCount,
})
}
}

+ 60
- 0
client/internal/cmdline/sysevent.go View File

@@ -0,0 +1,60 @@
package cmdline

import (
"context"
"fmt"

"github.com/spf13/cobra"
"gitlink.org.cn/cloudream/storage/client/internal/config"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
)

func init() {
cmd := &cobra.Command{
Use: "sysevent",
}

rootCmd.AddCommand(cmd)

cmd.AddCommand(&cobra.Command{
Use: "watch",
Short: "Watch system events",
Run: func(cmd *cobra.Command, args []string) {
watchSysEvent(GetCmdCtx(cmd))
},
})
}

func watchSysEvent(cmdCtx *CommandContext) {
host, err := sysevent.NewWatcherHost(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ))
if err != nil {
fmt.Println(err)
return
}

ch := host.Start()
host.AddWatcherFn(func(event sysevent.SysEvent) {
fmt.Println(event.String())
})
for {
e, err := ch.Receive().Wait(context.Background())
if err != nil {
fmt.Println(err)
return
}

switch e := e.(type) {
case sysevent.PublishError:
fmt.Printf("Publish error: %v\n", e.Err)

case sysevent.PublisherExited:
if e.Err != nil {
fmt.Printf("Publisher exited with error: %v\n", e.Err)
}
return

case sysevent.OtherError:
fmt.Printf("Other error: %v\n", e.Err)
}
}
}

+ 0
- 1
client/internal/services/hub.go View File

@@ -2,7 +2,6 @@ package services

import (
"fmt"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"


+ 2
- 0
client/internal/services/object.go View File

@@ -64,6 +64,8 @@ func (svc *ObjectService) UpdateInfo(userID cdssdk.UserID, updatings []cdsapi.Up
return nil, fmt.Errorf("requsting to coodinator: %w", err)
}

// TODO 考虑产生Update事件

return resp.Successes, nil
}



+ 24
- 0
common/assets/confs/datamap.config.json View File

@@ -0,0 +1,24 @@
{
"logger": {
"output": "file",
"outputFileName": "datamap",
"outputDirectory": "log",
"level": "debug"
},
"db": {
"address": "106.75.6.194:3306",
"account": "root",
"password": "cloudream123456",
"databaseName": "cloudream"
},
"rabbitMQ": {
"address": "106.75.6.194:5672",
"account": "cloudream",
"password": "123456",
"vhost": "/",
"param": {
"retryNum": 5,
"retryInterval": 5000
}
}
}

+ 1
- 0
common/globals/pools.go View File

@@ -24,6 +24,7 @@ func InitMQPool(cfg mq.Config) {
CoordinatorMQPool = coormq.NewPool(cfg)

ScannerMQPool = scmq.NewPool(cfg)

}

var AgentRPCPool *agtrpc.Pool


+ 5
- 0
common/globals/stats.go View File

@@ -0,0 +1,5 @@
package stgglb

import "gitlink.org.cn/cloudream/storage/common/pkgs/servicestats"

var Stats servicestats.StatsHost

+ 517
- 0
common/models/datamap.go View File

@@ -0,0 +1,517 @@
package stgmod

import (
"fmt"
"time"

"gitlink.org.cn/cloudream/common/pkgs/types"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/serder"
)

// 系统事件
type SysEvent struct {
Timestamp time.Time `json:"timestamp"`
Source SysEventSource `json:"source"`
Body SysEventBody `json:"body"`
}

func (e *SysEvent) String() string {
return fmt.Sprintf("%v [%v] %+v", e.Timestamp.Format("2006-01-02 15:04:05"), e.Source, e.Body)
}

// 事件源
type SysEventSource interface {
GetSourceType() string
}

var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[SysEventSource](
(*SourceCoordinator)(nil),
(*SourceScanner)(nil),
(*SourceHub)(nil),
)), "type")

type SourceCoordinator struct {
serder.Metadata `union:"Coordinator"`
Type string `json:"type"`
}

func (s *SourceCoordinator) GetSourceType() string {
return "Coordinator"
}

func (s *SourceCoordinator) OnUnionSerializing() {
s.Type = s.GetSourceType()
}

func (s *SourceCoordinator) String() string {
return "Coordinator"
}

type SourceScanner struct {
serder.Metadata `union:"Scanner"`
Type string `json:"type"`
}

func (s *SourceScanner) GetSourceType() string {
return "Scanner"
}

func (s *SourceScanner) OnUnionSerializing() {
s.Type = s.GetSourceType()
}

func (s *SourceScanner) String() string {
return "Scanner"
}

type SourceHub struct {
serder.Metadata `union:"Hub"`
Type string `json:"type"`
HubID cdssdk.HubID `json:"hubID"`
HubName string `json:"hubName"`
}

func (s *SourceHub) GetSourceType() string {
return "Hub"
}

func (s *SourceHub) OnUnionSerializing() {
s.Type = s.GetSourceType()
}

func (s *SourceHub) String() string {
return fmt.Sprintf("Hub(%d, %s)", s.HubID, s.HubName)
}

// 事件体
type SysEventBody interface {
GetBodyType() string
}

var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[SysEventBody](
(*BodyNewHub)(nil),
(*BodyHubUpdated)(nil),
(*BodyHubDeleted)(nil),

(*BodyNewStorage)(nil),
(*BodyStorageUpdated)(nil),
(*BodyStorageDeleted)(nil),

(*BodyStorageStats)(nil),
(*BodyHubTransferStats)(nil),
(*BodyHubStorageTransferStats)(nil),
(*BodyBlockTransfer)(nil),
(*BodyBlockDistribution)(nil),

(*BodyNewOrUpdateObject)(nil),
(*BodyObjectInfoUpdated)(nil),
(*BodyObjectDeleted)(nil),

(*BodyNewPackage)(nil),
(*BodyPackageCloned)(nil),
(*BodyPackageDeleted)(nil),

(*BodyNewBucket)(nil),
(*BodyBucketDeleted)(nil),
)), "type")

// 新增Hub的事件
type BodyNewHub struct {
serder.Metadata `union:"NewHub"`
Type string `json:"type"`
Info cdssdk.Hub `json:"info"`
}

func (b *BodyNewHub) GetBodyType() string {
return "NewHub"
}

func (b *BodyNewHub) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Hub信息更新的事件
type BodyHubUpdated struct {
serder.Metadata `union:"HubUpdated"`
Type string `json:"type"`
Info cdssdk.Hub `json:"info"`
}

func (b *BodyHubUpdated) GetBodyType() string {
return "HubUpdated"
}

func (b *BodyHubUpdated) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Hub删除的事件
type BodyHubDeleted struct {
serder.Metadata `union:"HubDeleted"`
Type string `json:"type"`
HubID cdssdk.HubID `json:"hubID"`
}

func (b *BodyHubDeleted) GetBodyType() string {
return "HubDeleted"
}

func (b *BodyHubDeleted) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// 新增Storage的事件
type BodyNewStorage struct {
serder.Metadata `union:"NewStorage"`
Info cdssdk.Storage `json:"info"`
Type string `json:"type"`
}

func (b *BodyNewStorage) GetBodyType() string {
return "NewStorage"
}

func (b *BodyNewStorage) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Storage信息更新的事件
type BodyStorageUpdated struct {
serder.Metadata `union:"StorageUpdated"`
Type string `json:"type"`
Info cdssdk.Storage `json:"info"`
}

func (b *BodyStorageUpdated) GetBodyType() string {
return "StorageUpdated"
}

func (b *BodyStorageUpdated) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Storage删除的事件
type BodyStorageDeleted struct {
serder.Metadata `union:"StorageDeleted"`
Type string `json:"type"`
StorageID cdssdk.StorageID `json:"storageID"`
}

func (b *BodyStorageDeleted) GetBodyType() string {
return "StorageDeleted"
}

func (b *BodyStorageDeleted) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Storage统计信息的事件
type BodyStorageStats struct {
serder.Metadata `union:"StorageStats"`
Type string `json:"type"`
StorageID cdssdk.StorageID `json:"storageID"`
DataCount int64 `json:"dataCount"`
}

func (b *BodyStorageStats) GetBodyType() string {
return "StorageStats"
}

func (b *BodyStorageStats) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Hub数据传输统计信息的事件
type BodyHubTransferStats struct {
serder.Metadata `union:"HubTransferStats"`
Type string `json:"type"`
SourceHubID cdssdk.HubID `json:"sourceHubID"`
TargetHubID cdssdk.HubID `json:"targetHubID"`
Send DataTrans `json:"send"`
StartTimestamp time.Time `json:"startTimestamp"`
EndTimestamp time.Time `json:"endTimestamp"`
}

func (b *BodyHubTransferStats) GetBodyType() string {
return "HubTransferStats"
}

func (b *BodyHubTransferStats) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

type DataTrans struct {
TotalTransfer int64 `json:"totalTransfer"`
RequestCount int64 `json:"requestCount"`
FailedRequestCount int64 `json:"failedRequestCount"`
AvgTransfer int64 `json:"avgTransfer"`
MaxTransfer int64 `json:"maxTransfer"`
MinTransfer int64 `json:"minTransfer"`
}

// Hub和Storage数据传输统计信息的事件
type BodyHubStorageTransferStats struct {
serder.Metadata `union:"HubStorageTransferStats"`
Type string `json:"type"`
HubID cdssdk.HubID `json:"hubID"`
StorageID cdssdk.StorageID `json:"storageID"`
Send DataTrans `json:"send"`
Receive DataTrans `json:"receive"`
StartTimestamp time.Time `json:"startTimestamp"`
EndTimestamp time.Time `json:"endTimestamp"`
}

func (b *BodyHubStorageTransferStats) GetBodyType() string {
return "HubStorageTransferStats"
}

func (b *BodyHubStorageTransferStats) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// 块传输的事件
type BodyBlockTransfer struct {
serder.Metadata `union:"BlockTransfer"`
Type string `json:"type"`
ObjectID cdssdk.ObjectID `json:"objectID"`
PackageID cdssdk.PackageID `json:"packageID"`
BlockChanges []BlockChange `json:"blockChanges"`
}

func (b *BodyBlockTransfer) GetBodyType() string {
return "BlockTransfer"
}

func (b *BodyBlockTransfer) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// 块变化类型
type BlockChange interface {
GetBlockChangeType() string
}

var _ = serder.UseTypeUnionInternallyTagged(types.Ref(types.NewTypeUnion[BlockChange](
(*BlockChangeClone)(nil),
(*BlockChangeDeleted)(nil),
(*BlockChangeEnDecode)(nil),
)), "type")

const (
BlockTypeRaw = "Raw"
BlockTypeEC = "EC"
BlockTypeSegment = "Segment"
)

type Block struct {
BlockType string `json:"blockType"`
Index int `json:"index"`
StorageID cdssdk.StorageID `json:"storageID"`
}
type DataTransfer struct {
SourceStorageID cdssdk.StorageID `json:"sourceStorageID"`
TargetStorageID cdssdk.StorageID `json:"targetStorageID"`
TransferBytes int64 `json:"transferBytes"`
}

type BlockChangeClone struct {
serder.Metadata `union:"Clone"`
Type string `json:"type"`
BlockType string `json:"blockType"`
Index int `json:"index"`
SourceStorageID cdssdk.StorageID `json:"sourceStorageID"`
TargetStorageID cdssdk.StorageID `json:"targetStorageID"`
TransferBytes int64 `json:"transferBytes"`
}

func (b *BlockChangeClone) GetBlockChangeType() string {
return "Clone"
}

func (b *BlockChangeClone) OnUnionSerializing() {
b.Type = b.GetBlockChangeType()
}

type BlockChangeDeleted struct {
serder.Metadata `union:"Deleted"`
Type string `json:"type"`
Index int `json:"index"`
StorageID cdssdk.StorageID `json:"storageID"`
}

func (b *BlockChangeDeleted) GetBlockChangeType() string {
return "Deleted"
}

func (b *BlockChangeDeleted) OnUnionSerializing() {
b.Type = b.GetBlockChangeType()
}

type BlockChangeEnDecode struct {
serder.Metadata `union:"EnDecode"`
Type string `json:"type"`
SourceBlocks []Block `json:"sourceBlocks,omitempty"`
TargetBlocks []Block `json:"targetBlocks,omitempty"`
DataTransfers []DataTransfer `json:"dataTransfers,omitempty"`
}

func (b *BlockChangeEnDecode) GetBlockChangeType() string {
return "EnDecode"
}

func (b *BlockChangeEnDecode) OnUnionSerializing() {
b.Type = b.GetBlockChangeType()
}

// 块分布的事件
type BodyBlockDistribution struct {
serder.Metadata `union:"BlockDistribution"`
Type string `json:"type"`
ObjectID cdssdk.ObjectID `json:"objectID"`
PackageID cdssdk.PackageID `json:"packageID"`
Path string `json:"path"`
Size int64 `json:"size"`
FileHash cdssdk.FileHash `json:"fileHash"`
FaultTolerance float64 `json:"faultTolerance"`
Redundancy float64 `json:"redundancy"`
AvgAccessCost float64 `json:"avgAccessCost"`
BlockDistribution []BlockDistributionObjectInfo `json:"blockDistribution"`
DataTransfers []DataTransfer `json:"dataTransfers"`
}

func (b *BodyBlockDistribution) GetBodyType() string {
return "BlockDistribution"
}

func (b *BodyBlockDistribution) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

type BlockDistributionObjectInfo struct {
BlockType string `json:"type"`
Index int `json:"index"`
StorageID cdssdk.StorageID `json:"storageID"`
}

// 新增或者重新上传Object的事件
type BodyNewOrUpdateObject struct {
serder.Metadata `union:"NewOrUpdateObject"`
Type string `json:"type"`
Info cdssdk.Object `json:"info"`
BlockDistribution []BlockDistributionObjectInfo `json:"blockDistribution"`
}

func (b *BodyNewOrUpdateObject) GetBodyType() string {
return "NewOrUpdateObject"
}

func (b *BodyNewOrUpdateObject) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Object的基本信息更新的事件
type BodyObjectInfoUpdated struct {
serder.Metadata `union:"ObjectInfoUpdated"`
Type string `json:"type"`
Object cdssdk.Object `json:"object"`
}

func (b *BodyObjectInfoUpdated) GetBodyType() string {
return "ObjectInfoUpdated"
}

func (b *BodyObjectInfoUpdated) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Object删除的事件
type BodyObjectDeleted struct {
serder.Metadata `union:"ObjectDeleted"`
Type string `json:"type"`
ObjectID cdssdk.ObjectID `json:"objectID"`
}

func (b *BodyObjectDeleted) GetBodyType() string {
return "ObjectDeleted"
}

func (b *BodyObjectDeleted) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// 新增Package的事件
type BodyNewPackage struct {
serder.Metadata `union:"NewPackage"`
Type string `json:"type"`
Info cdssdk.Package `json:"info"`
}

func (b *BodyNewPackage) GetBodyType() string {
return "NewPackage"
}

func (b *BodyNewPackage) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Package克隆的事件
type BodyPackageCloned struct {
serder.Metadata `union:"PackageCloned"`
Type string `json:"type"`
SourcePackageID cdssdk.PackageID `json:"sourcePackageID"`
NewPackage cdssdk.Package `json:"newPackage"`
}

func (b *BodyPackageCloned) GetBodyType() string {
return "PackageCloned"
}

func (b *BodyPackageCloned) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Package删除的事件
type BodyPackageDeleted struct {
serder.Metadata `union:"PackageDeleted"`
Type string `json:"type"`
PackageID cdssdk.PackageID `json:"packageID"`
}

func (b *BodyPackageDeleted) GetBodyType() string {
return "PackageDeleted"
}

func (b *BodyPackageDeleted) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// 新增Bucket的事件
type BodyNewBucket struct {
serder.Metadata `union:"NewBucket"`
Type string `json:"type"`
Info cdssdk.Bucket `json:"info"`
}

func (b *BodyNewBucket) GetBodyType() string {
return "NewBucket"
}

func (b *BodyNewBucket) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

// Bucket删除的事件
type BodyBucketDeleted struct {
serder.Metadata `union:"BucketDeleted"`
Type string `json:"type"`
BucketID cdssdk.BucketID `json:"bucketID"`
}

func (b *BodyBucketDeleted) GetBodyType() string {
return "BucketDeleted"
}

func (b *BodyBucketDeleted) OnUnionSerializing() {
b.Type = b.GetBodyType()
}

+ 2
- 80
common/pkgs/db2/object.go View File

@@ -298,12 +298,12 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []

if len(affectedObjIDs) > 0 {
// 批量删除 ObjectBlock
if err := ctx.Table("ObjectBlock").Where("ObjectID IN ?", affectedObjIDs).Delete(&stgmod.ObjectBlock{}).Error; err != nil {
if err := db.ObjectBlock().BatchDeleteByObjectID(ctx, affectedObjIDs); err != nil {
return nil, fmt.Errorf("batch delete object blocks: %w", err)
}

// 批量删除 PinnedObject
if err := ctx.Table("PinnedObject").Where("ObjectID IN ?", affectedObjIDs).Delete(&cdssdk.PinnedObject{}).Error; err != nil {
if err := db.PinnedObject().BatchDeleteByObjectID(ctx, affectedObjIDs); err != nil {
return nil, fmt.Errorf("batch delete pinned objects: %w", err)
}
}
@@ -343,84 +343,6 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []
return affectedObjs, nil
}

func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.UpdatingObjectRedundancy) error {
if len(objs) == 0 {
return nil
}

nowTime := time.Now()
objIDs := make([]cdssdk.ObjectID, 0, len(objs))
dummyObjs := make([]cdssdk.Object, 0, len(objs))
for _, obj := range objs {
objIDs = append(objIDs, obj.ObjectID)
dummyObjs = append(dummyObjs, cdssdk.Object{
ObjectID: obj.ObjectID,
Redundancy: obj.Redundancy,
CreateTime: nowTime, // 实际不会更新,只因为不能是0值
UpdateTime: nowTime,
})
}

err := db.Object().BatchUpdateColumns(ctx, dummyObjs, []string{"Redundancy", "UpdateTime"})
if err != nil {
return fmt.Errorf("batch update object redundancy: %w", err)
}

// 删除原本所有的编码块记录,重新添加
err = db.ObjectBlock().BatchDeleteByObjectID(ctx, objIDs)
if err != nil {
return fmt.Errorf("batch delete object blocks: %w", err)
}

// 删除原本Pin住的Object。暂不考虑FileHash没有变化的情况
err = db.PinnedObject().BatchDeleteByObjectID(ctx, objIDs)
if err != nil {
return fmt.Errorf("batch delete pinned object: %w", err)
}

blocks := make([]stgmod.ObjectBlock, 0, len(objs))
for _, obj := range objs {
blocks = append(blocks, obj.Blocks...)
}
err = db.ObjectBlock().BatchCreate(ctx, blocks)
if err != nil {
return fmt.Errorf("batch create object blocks: %w", err)
}

caches := make([]model.Cache, 0, len(objs))
for _, obj := range objs {
for _, blk := range obj.Blocks {
caches = append(caches, model.Cache{
FileHash: blk.FileHash,
StorageID: blk.StorageID,
CreateTime: nowTime,
Priority: 0,
})
}
}
err = db.Cache().BatchCreate(ctx, caches)
if err != nil {
return fmt.Errorf("batch create object caches: %w", err)
}

pinneds := make([]cdssdk.PinnedObject, 0, len(objs))
for _, obj := range objs {
for _, p := range obj.PinnedAt {
pinneds = append(pinneds, cdssdk.PinnedObject{
ObjectID: obj.ObjectID,
StorageID: p,
CreateTime: nowTime,
})
}
}
err = db.PinnedObject().BatchTryCreate(ctx, pinneds)
if err != nil {
return fmt.Errorf("batch create pinned objects: %w", err)
}

return nil
}

func (db *ObjectDB) BatchDelete(ctx SQLContext, ids []cdssdk.ObjectID) error {
if len(ids) == 0 {
return nil


+ 19
- 4
common/pkgs/ioswitch2/agent_worker.go View File

@@ -7,6 +7,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
"gitlink.org.cn/cloudream/common/pkgs/types"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/serder"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
@@ -28,7 +29,7 @@ func (w *AgentWorker) NewClient() (exec.WorkerClient, error) {
return nil, err
}

return &AgentWorkerClient{cli: cli}, nil
return &AgentWorkerClient{hubID: w.Hub.HubID, cli: cli}, nil
}

func (w *AgentWorker) String() string {
@@ -45,20 +46,34 @@ func (w *AgentWorker) Equals(worker exec.WorkerInfo) bool {
}

type AgentWorkerClient struct {
cli *agtrpc.PoolClient
hubID cdssdk.HubID
cli *agtrpc.PoolClient
}

func (c *AgentWorkerClient) ExecutePlan(ctx context.Context, plan exec.Plan) error {
return c.cli.ExecuteIOPlan(ctx, plan)
}
func (c *AgentWorkerClient) SendStream(ctx context.Context, planID exec.PlanID, id exec.VarID, stream io.ReadCloser) error {
return c.cli.SendStream(ctx, planID, id, stream)
return c.cli.SendStream(ctx, planID, id, io2.CounterCloser(stream, func(cnt int64, err error) {
if stgglb.Stats.HubTransfer != nil {
stgglb.Stats.HubTransfer.RecordOutput(c.hubID, cnt, err == nil || err == io.EOF)
}
}))
}
func (c *AgentWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error {
return c.cli.SendVar(ctx, planID, id, value)
}
func (c *AgentWorkerClient) GetStream(ctx context.Context, planID exec.PlanID, streamID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) {
return c.cli.GetStream(ctx, planID, streamID, signalID, signal)
str, err := c.cli.GetStream(ctx, planID, streamID, signalID, signal)
if err != nil {
return nil, err
}

return io2.CounterCloser(str, func(cnt int64, err error) {
if stgglb.Stats.HubTransfer != nil {
stgglb.Stats.HubTransfer.RecordInput(c.hubID, cnt, err == nil || err == io.EOF)
}
}), nil
}
func (c *AgentWorkerClient) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) {
return c.cli.GetVar(ctx, planID, varID, signalID, signal)


+ 20
- 4
common/pkgs/ioswitch2/http_hub_worker.go View File

@@ -8,6 +8,8 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/ioswitch/exec"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/sdks/storage/cdsapi"
"gitlink.org.cn/cloudream/common/utils/io2"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
)

type HttpHubWorker struct {
@@ -27,7 +29,7 @@ func (w *HttpHubWorker) NewClient() (exec.WorkerClient, error) {
return nil, err
}

return &HttpHubWorkerClient{cli: cli}, nil
return &HttpHubWorkerClient{hubID: w.Hub.HubID, cli: cli}, nil
}

func (w *HttpHubWorker) String() string {
@@ -44,7 +46,8 @@ func (w *HttpHubWorker) Equals(worker exec.WorkerInfo) bool {
}

type HttpHubWorkerClient struct {
cli *cdsapi.Client
hubID cdssdk.HubID
cli *cdsapi.Client
}

func (c *HttpHubWorkerClient) ExecutePlan(ctx context.Context, plan exec.Plan) error {
@@ -58,7 +61,11 @@ func (c *HttpHubWorkerClient) SendStream(ctx context.Context, planID exec.PlanID
PlanID: planID,
VarID: id,
},
Stream: stream,
Stream: io2.CounterCloser(stream, func(cnt int64, err error) {
if stgglb.Stats.HubTransfer != nil {
stgglb.Stats.HubTransfer.RecordOutput(c.hubID, cnt, err == nil || err == io.EOF)
}
}),
})
}
func (c *HttpHubWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, id exec.VarID, value exec.VarValue) error {
@@ -69,12 +76,21 @@ func (c *HttpHubWorkerClient) SendVar(ctx context.Context, planID exec.PlanID, i
})
}
func (c *HttpHubWorkerClient) GetStream(ctx context.Context, planID exec.PlanID, streamID exec.VarID, signalID exec.VarID, signal exec.VarValue) (io.ReadCloser, error) {
return c.cli.GetStream(cdsapi.GetStreamReq{
str, err := c.cli.GetStream(cdsapi.GetStreamReq{
PlanID: planID,
VarID: streamID,
SignalID: signalID,
Signal: signal,
})
if err != nil {
return nil, err
}

return io2.CounterCloser(str, func(cnt int64, err error) {
if stgglb.Stats.HubTransfer != nil {
stgglb.Stats.HubTransfer.RecordInput(c.hubID, cnt, err == nil || err == io.EOF)
}
}), nil
}
func (c *HttpHubWorkerClient) GetVar(ctx context.Context, planID exec.PlanID, varID exec.VarID, signalID exec.VarID, signal exec.VarValue) (exec.VarValue, error) {
resp, err := c.cli.GetVar(cdsapi.GetVarReq{


+ 1
- 0
common/pkgs/mq/consts.go View File

@@ -5,6 +5,7 @@ import "fmt"
const (
COORDINATOR_QUEUE_NAME = "Coordinator"
SCANNER_QUEUE_NAME = "Scanner"
DATAMAP_QUEUE_NAME = "DataMap"
)

func MakeAgentQueueName(id int64) string {


+ 5
- 2
common/pkgs/mq/coordinator/object.go View File

@@ -272,6 +272,7 @@ type DeleteObjects struct {

type DeleteObjectsResp struct {
mq.MessageBodyBase
Successes []cdssdk.ObjectID `json:"successes"`
}

func ReqDeleteObjects(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) *DeleteObjects {
@@ -280,8 +281,10 @@ func ReqDeleteObjects(userID cdssdk.UserID, objectIDs []cdssdk.ObjectID) *Delete
ObjectIDs: objectIDs,
}
}
func RespDeleteObjects() *DeleteObjectsResp {
return &DeleteObjectsResp{}
func RespDeleteObjects(sucs []cdssdk.ObjectID) *DeleteObjectsResp {
return &DeleteObjectsResp{
Successes: sucs,
}
}
func (client *Client) DeleteObjects(msg *DeleteObjects) (*DeleteObjectsResp, error) {
return mq.Request(Service.DeleteObjects, client.rabbitCli, msg)


+ 3
- 5
common/pkgs/mq/coordinator/package.go View File

@@ -120,9 +120,8 @@ var _ = Register(Service.UpdatePackage)

type UpdatePackage struct {
mq.MessageBodyBase
PackageID cdssdk.PackageID `json:"packageID"`
Adds []AddObjectEntry `json:"adds"`
Deletes []cdssdk.ObjectID `json:"deletes"`
PackageID cdssdk.PackageID `json:"packageID"`
Adds []AddObjectEntry `json:"adds"`
}
type UpdatePackageResp struct {
mq.MessageBodyBase
@@ -136,11 +135,10 @@ type AddObjectEntry struct {
StorageIDs []cdssdk.StorageID `json:"storageIDs"`
}

func NewUpdatePackage(packageID cdssdk.PackageID, adds []AddObjectEntry, deletes []cdssdk.ObjectID) *UpdatePackage {
func NewUpdatePackage(packageID cdssdk.PackageID, adds []AddObjectEntry) *UpdatePackage {
return &UpdatePackage{
PackageID: packageID,
Adds: adds,
Deletes: deletes,
}
}
func NewUpdatePackageResp(added []cdssdk.Object) *UpdatePackageResp {


+ 103
- 0
common/pkgs/servicestats/hub_strorage_transfer.go View File

@@ -0,0 +1,103 @@
package servicestats

import (
"math"
"sync"
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
)

type HubStorageTransferStats struct {
data HubStorageTransferStatsData
fromHubID cdssdk.HubID
lock *sync.Mutex
}

type HubStorageTransferStatsData struct {
Entries map[cdssdk.StorageID]*HubStorageTransferStatsEntry
StartTime time.Time
}

type HubStorageTransferStatsEntry struct {
DestStorageID cdssdk.StorageID

OutputBytes int64
MaxOutputBytes int64
MinOutputBytes int64
TotalOutput int64
SuccessOutput int64

InputBytes int64
MaxInputBytes int64
MinInputBytes int64
TotalInput int64
SuccessInput int64
}

func (s *HubStorageTransferStats) RecordUpload(dstStorageID cdssdk.StorageID, transferBytes int64, isSuccess bool) {
s.lock.Lock()
defer s.lock.Unlock()

e := s.data.Entries[dstStorageID]
if e == nil {
e = &HubStorageTransferStatsEntry{
DestStorageID: dstStorageID,
MinInputBytes: math.MaxInt64,
MinOutputBytes: math.MaxInt64,
}
s.data.Entries[dstStorageID] = e
}
e.OutputBytes += transferBytes
e.MaxOutputBytes = math2.Max(e.MaxOutputBytes, transferBytes)
e.MinOutputBytes = math2.Min(e.MinOutputBytes, transferBytes)
if isSuccess {
e.SuccessOutput++
}
e.TotalOutput++
}

func (s *HubStorageTransferStats) RecordDownload(dstStorageID cdssdk.StorageID, transferBytes int64, isSuccess bool) {
s.lock.Lock()
defer s.lock.Unlock()

e := s.data.Entries[dstStorageID]
if e == nil {
e = &HubStorageTransferStatsEntry{
DestStorageID: dstStorageID,
MinInputBytes: math.MaxInt64,
MinOutputBytes: math.MaxInt64,
}
s.data.Entries[dstStorageID] = e
}
e.InputBytes += transferBytes
e.MaxInputBytes = math2.Max(e.MaxInputBytes, transferBytes)
e.MinInputBytes = math2.Min(e.MinInputBytes, transferBytes)
if isSuccess {
e.SuccessInput++
}
}

func (s *HubStorageTransferStats) Reset() time.Time {
s.lock.Lock()
defer s.lock.Unlock()

s.data.Entries = make(map[cdssdk.StorageID]*HubStorageTransferStatsEntry)
s.data.StartTime = time.Now()
return s.data.StartTime
}

func (s *HubStorageTransferStats) DumpData() HubStorageTransferStatsData {
s.lock.Lock()
defer s.lock.Unlock()

data := s.data
data.Entries = make(map[cdssdk.StorageID]*HubStorageTransferStatsEntry)
for k, v := range s.data.Entries {
v2 := *v
data.Entries[k] = &v2
}

return data
}

+ 103
- 0
common/pkgs/servicestats/hub_transfter.go View File

@@ -0,0 +1,103 @@
package servicestats

import (
"math"
"sync"
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/math2"
)

type HubTransferStats struct {
data HubTransferStatsData
fromHubID cdssdk.HubID
lock *sync.Mutex
}

type HubTransferStatsData struct {
Entries map[cdssdk.HubID]*HubTransferStatsEntry
StartTime time.Time
}

type HubTransferStatsEntry struct {
DestHubID cdssdk.HubID

OutputBytes int64
MaxOutputBytes int64
MinOutputBytes int64
TotalOutput int64
SuccessOutput int64

InputBytes int64
MaxInputBytes int64
MinInputBytes int64
TotalInput int64
SuccessInput int64
}

func (s *HubTransferStats) RecordOutput(dstHubID cdssdk.HubID, transferBytes int64, isSuccess bool) {
s.lock.Lock()
defer s.lock.Unlock()

e := s.data.Entries[dstHubID]
if e == nil {
e = &HubTransferStatsEntry{
DestHubID: dstHubID,
MinInputBytes: math.MaxInt64,
MinOutputBytes: math.MaxInt64,
}
s.data.Entries[dstHubID] = e
}
e.OutputBytes += transferBytes
e.MaxOutputBytes = math2.Max(e.MaxOutputBytes, transferBytes)
e.MinOutputBytes = math2.Min(e.MinOutputBytes, transferBytes)
if isSuccess {
e.SuccessOutput++
}
e.TotalOutput++
}

func (s *HubTransferStats) RecordInput(dstHubID cdssdk.HubID, transferBytes int64, isSuccess bool) {
s.lock.Lock()
defer s.lock.Unlock()

e := s.data.Entries[dstHubID]
if e == nil {
e = &HubTransferStatsEntry{
DestHubID: dstHubID,
MinInputBytes: math.MaxInt64,
MinOutputBytes: math.MaxInt64,
}
s.data.Entries[dstHubID] = e
}
e.InputBytes += transferBytes
e.MaxInputBytes = math2.Max(e.MaxInputBytes, transferBytes)
e.MinInputBytes = math2.Min(e.MinInputBytes, transferBytes)
if isSuccess {
e.SuccessInput++
}
e.TotalInput++
}

func (s *HubTransferStats) Reset() time.Time {
s.lock.Lock()
defer s.lock.Unlock()

s.data.StartTime = time.Now()
s.data.Entries = make(map[cdssdk.HubID]*HubTransferStatsEntry)
return s.data.StartTime
}

func (s *HubTransferStats) DumpData() HubTransferStatsData {
s.lock.Lock()
defer s.lock.Unlock()

data := s.data
data.Entries = make(map[cdssdk.HubID]*HubTransferStatsEntry)
for k, v := range s.data.Entries {
v2 := *v
data.Entries[k] = &v2
}
return data
}

+ 37
- 0
common/pkgs/servicestats/service_stats.go View File

@@ -0,0 +1,37 @@
package servicestats

import (
"sync"
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

type StatsHost struct {
// 统计Hub间的传输数据,仅包含当前Hub主动发送或接收的数量。
HubTransfer *HubTransferStats
// 统计Hub与存储系统间的传输数据,仅包含当前Hub主动发送或接收的数量。
HubStorageTransfer *HubStorageTransferStats
}

func (h *StatsHost) SetupHubTransfer(fromHubID cdssdk.HubID) {
h.HubTransfer = &HubTransferStats{
fromHubID: fromHubID,
lock: &sync.Mutex{},
data: HubTransferStatsData{
StartTime: time.Now(),
Entries: make(map[cdssdk.HubID]*HubTransferStatsEntry),
},
}
}

func (h *StatsHost) SetupHubStorageTransfer(fromHubID cdssdk.HubID) {
h.HubStorageTransfer = &HubStorageTransferStats{
fromHubID: fromHubID,
lock: &sync.Mutex{},
data: HubStorageTransferStatsData{
StartTime: time.Now(),
Entries: make(map[cdssdk.StorageID]*HubStorageTransferStatsEntry),
},
}
}

+ 12
- 0
common/pkgs/storage/agtpool/pool.go View File

@@ -74,6 +74,18 @@ func (m *AgentPool) GetAgent(stgID cdssdk.StorageID) (types.StorageAgent, error)
return stg.Agent, nil
}

func (m *AgentPool) GetAllAgents() []types.StorageAgent {
m.lock.Lock()
defer m.lock.Unlock()

agents := make([]types.StorageAgent, 0, len(m.storages))
for _, stg := range m.storages {
agents = append(agents, stg.Agent)
}

return agents
}

// 查找指定Storage的ShardStore组件
func (m *AgentPool) GetShardStore(stgID cdssdk.StorageID) (types.ShardStore, error) {
m.lock.Lock()


+ 14
- 3
common/pkgs/storage/local/shard_store.go View File

@@ -14,6 +14,7 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)

@@ -133,7 +134,11 @@ func (s *ShardStore) Create(stream io.Reader) (types.FileInfo, error) {
return types.FileInfo{}, err
}

size, hash, err := s.writeTempFile(file, stream)
counter := io2.Counter(stream)
size, hash, err := s.writeTempFile(file, counter)
if stgglb.Stats.HubStorageTransfer != nil {
stgglb.Stats.HubStorageTransfer.RecordUpload(s.agt.Detail.Storage.StorageID, counter.Count(), err == nil)
}
if err != nil {
// Name是文件完整路径
s.onCreateFailed(file.Name())
@@ -272,11 +277,17 @@ func (s *ShardStore) Open(opt types.OpenOption) (io.ReadCloser, error) {
}
}

var ret io.ReadCloser = file

if opt.Length >= 0 {
return io2.Length(file, opt.Length), nil
ret = io2.Length(ret, opt.Length)
}

return file, nil
return io2.CounterCloser(ret, func(cnt int64, err error) {
if stgglb.Stats.HubStorageTransfer != nil {
stgglb.Stats.HubStorageTransfer.RecordDownload(s.agt.Detail.Storage.StorageID, cnt, err == nil || err == io.EOF)
}
}), nil
}

func (s *ShardStore) Info(hash cdssdk.FileHash) (types.FileInfo, error) {


+ 12
- 1
common/pkgs/storage/s3/shard_store.go View File

@@ -17,6 +17,7 @@ import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/os2"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/types"
)
@@ -186,6 +187,9 @@ func (s *ShardStore) createWithAwsSha256(stream io.Reader) (types.FileInfo, erro
Body: counter,
ChecksumAlgorithm: s3types.ChecksumAlgorithmSha256,
})
if stgglb.Stats.HubStorageTransfer != nil {
stgglb.Stats.HubStorageTransfer.RecordUpload(s.Detail.Storage.StorageID, counter.Count(), err == nil)
}
if err != nil {
log.Warnf("uploading file %v: %v", key, err)

@@ -225,6 +229,9 @@ func (s *ShardStore) createWithCalcSha256(stream io.Reader) (types.FileInfo, err
Key: aws.String(key),
Body: counter,
})
if stgglb.Stats.HubStorageTransfer != nil {
stgglb.Stats.HubStorageTransfer.RecordUpload(s.Detail.Storage.StorageID, counter.Count(), err == nil)
}
if err != nil {
log.Warnf("uploading file %v: %v", key, err)

@@ -320,7 +327,11 @@ func (s *ShardStore) Open(opt types.OpenOption) (io.ReadCloser, error) {
return nil, err
}

return resp.Body, nil
return io2.CounterCloser(resp.Body, func(cnt int64, err error) {
if stgglb.Stats.HubStorageTransfer != nil {
stgglb.Stats.HubStorageTransfer.RecordDownload(s.Detail.Storage.StorageID, cnt, err == nil || err == io.EOF)
}
}), nil
}

func (s *ShardStore) Info(hash cdssdk.FileHash) (types.FileInfo, error) {


+ 1
- 1
common/pkgs/storage/types/shard_store.go View File

@@ -55,7 +55,7 @@ type Stats struct {
// 存储服务状态,如果状态正常,此值应该是StatusOK
Status Status
// 文件总数
FileCount int
FileCount int64
// 存储空间总大小
TotalSize int64
// 已使用的存储空间大小,可以超过存储空间总大小


+ 11
- 0
common/pkgs/sysevent/config.go View File

@@ -1,8 +1,19 @@
package sysevent

import "gitlink.org.cn/cloudream/common/pkgs/mq"

type Config struct {
Address string `json:"address"`
Account string `json:"account"`
Password string `json:"password"`
VHost string `json:"vhost"`
}

func ConfigFromMQConfig(mqCfg mq.Config) Config {
return Config{
Address: mqCfg.Address,
Account: mqCfg.Account,
Password: mqCfg.Password,
VHost: mqCfg.VHost,
}
}

+ 9
- 4
common/pkgs/sysevent/publisher.go View File

@@ -2,10 +2,12 @@ package sysevent

import (
"fmt"
"time"

"github.com/streadway/amqp"
"gitlink.org.cn/cloudream/common/pkgs/async"
"gitlink.org.cn/cloudream/common/utils/serder"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
)

type PublisherEvent interface{}
@@ -107,10 +109,13 @@ func (p *Publisher) Start() *async.UnboundChannel[PublisherEvent] {
return ch
}

// Publish 发布事件,自动补齐时间戳和源信息
func (p *Publisher) Publish(evt SysEvent) {
// TODO 补齐时间戳和源信息
p.eventChan.Send(evt)
// Publish 发布事件,会自动补齐必要信息
func (p *Publisher) Publish(eventBody stgmod.SysEventBody) {
p.eventChan.Send(stgmod.SysEvent{
Timestamp: time.Now(),
Source: p.thisSource,
Body: eventBody,
})
}

// PublishRaw 完全原样发布事件,不补齐任何信息


+ 6
- 2
common/pkgs/sysevent/sysevent.go View File

@@ -1,9 +1,13 @@
package sysevent

import (
stgmod "gitlink.org.cn/cloudream/storage/common/models"
)

const (
SysEventQueueName = "SysEventQueue"
)

type SysEvent = any // TODO 换成具体的类型
type SysEvent = stgmod.SysEvent

type Source = any // TODO 换成具体的类型
type Source = stgmod.SysEventSource

+ 14
- 0
common/pkgs/sysevent/watcher.go View File

@@ -113,9 +113,23 @@ func (w *WatcherHost) AddWatcher(watcher Watcher) {
w.watchers = append(w.watchers, watcher)
}

func (w *WatcherHost) AddWatcherFn(fn func(event SysEvent)) Watcher {
watcher := &fnWatcher{fn: fn}
w.AddWatcher(watcher)
return watcher
}

func (w *WatcherHost) RemoveWatcher(watcher Watcher) {
w.lock.Lock()
defer w.lock.Unlock()

w.watchers = lo2.Remove(w.watchers, watcher)
}

type fnWatcher struct {
fn func(event SysEvent)
}

func (w *fnWatcher) OnEvent(event SysEvent) {
w.fn(event)
}

+ 1
- 1
common/pkgs/uploader/create_load.go View File

@@ -96,7 +96,7 @@ func (u *CreateLoadUploader) Commit() (CreateLoadResult, error) {
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

updateResp, err := coorCli.UpdatePackage(coormq.NewUpdatePackage(u.pkg.PackageID, u.successes, nil))
updateResp, err := coorCli.UpdatePackage(coormq.NewUpdatePackage(u.pkg.PackageID, u.successes))
if err != nil {
return CreateLoadResult{}, fmt.Errorf("updating package: %w", err)
}


+ 1
- 1
common/pkgs/uploader/update.go View File

@@ -100,7 +100,7 @@ func (w *UpdateUploader) Commit() (UpdateResult, error) {
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

updateResp, err := coorCli.UpdatePackage(coormq.NewUpdatePackage(w.pkgID, w.successes, nil))
updateResp, err := coorCli.UpdatePackage(coormq.NewUpdatePackage(w.pkgID, w.successes))
if err != nil {
return UpdateResult{}, fmt.Errorf("updating package: %w", err)
}


+ 47
- 1
coordinator/internal/cmd/serve.go View File

@@ -1,13 +1,16 @@
package cmd

import (
"context"
"fmt"
"os"

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gitlink.org.cn/cloudream/storage/coordinator/internal/config"
mymq "gitlink.org.cn/cloudream/storage/coordinator/internal/mq"
)
@@ -30,7 +33,15 @@ func serve(configPath string) {
logger.Fatalf("new db2 failed, err: %s", err.Error())
}

coorSvr, err := coormq.NewServer(mymq.NewService(db2), config.Cfg().RabbitMQ)
// 初始化系统事件发布器
evtPub, err := sysevent.NewPublisher(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ), &stgmod.SourceCoordinator{})
if err != nil {
logger.Errorf("new sysevent publisher: %v", err)
os.Exit(1)
}
go servePublisher(evtPub)

coorSvr, err := coormq.NewServer(mymq.NewService(db2, evtPub), config.Cfg().RabbitMQ)
if err != nil {
logger.Fatalf("new coordinator server failed, err: %s", err.Error())
}
@@ -46,6 +57,41 @@ func serve(configPath string) {
<-forever
}

func servePublisher(evtPub *sysevent.Publisher) {
logger.Info("start serving sysevent publisher")

ch := evtPub.Start()

loop:
for {
val, err := ch.Receive().Wait(context.Background())
if err != nil {
logger.Errorf("sysevent publisher stopped with error: %s", err.Error())
break
}

switch val := val.(type) {
case sysevent.PublishError:
logger.Errorf("publishing event: %v", val)

case sysevent.PublisherExited:
if val.Err != nil {
logger.Errorf("publisher exited with error: %v", val.Err)
} else {
logger.Info("publisher exited")
}
break loop

case sysevent.OtherError:
logger.Errorf("sysevent: %v", val)
}
}
logger.Info("sysevent publisher stopped")

// TODO 仅简单结束了程序
os.Exit(1)
}

func serveCoorServer(server *coormq.Server, cfg mq.Config) {
logger.Info("start serving command server")



+ 9
- 0
coordinator/internal/mq/bucket.go View File

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"

stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gorm.io/gorm"

@@ -93,6 +94,10 @@ func (svc *Service) CreateBucket(msg *coormq.CreateBucket) (*coormq.CreateBucket
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

svc.evtPub.Publish(&stgmod.BodyNewBucket{
Info: bucket,
})

return mq.ReplyOK(coormq.NewCreateBucketResp(bucket))
}

@@ -133,5 +138,9 @@ func (svc *Service) DeleteBucket(msg *coormq.DeleteBucket) (*coormq.DeleteBucket
return nil, mq.Failed(errorcode.OperationFailed, "delete bucket failed")
}

svc.evtPub.Publish(&stgmod.BodyBucketDeleted{
BucketID: msg.BucketID,
})

return mq.ReplyOK(coormq.NewDeleteBucketResp())
}

+ 142
- 4
coordinator/internal/mq/object.go View File

@@ -3,8 +3,10 @@ package mq
import (
"errors"
"fmt"
"time"

"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2/model"
"gorm.io/gorm"

"github.com/samber/lo"
@@ -192,8 +194,95 @@ func (svc *Service) GetObjectDetails(msg *coormq.GetObjectDetails) (*coormq.GetO
}

func (svc *Service) UpdateObjectRedundancy(msg *coormq.UpdateObjectRedundancy) (*coormq.UpdateObjectRedundancyResp, *mq.CodeMessage) {
err := svc.db2.DoTx(func(tx db2.SQLContext) error {
return svc.db2.Object().BatchUpdateRedundancy(tx, msg.Updatings)
err := svc.db2.DoTx(func(ctx db2.SQLContext) error {
db := svc.db2
objs := msg.Updatings

nowTime := time.Now()
objIDs := make([]cdssdk.ObjectID, 0, len(objs))
for _, obj := range objs {
objIDs = append(objIDs, obj.ObjectID)
}

avaiIDs, err := db.Object().BatchTestObjectID(ctx, objIDs)
if err != nil {
return fmt.Errorf("batch test object id: %w", err)
}

// 过滤掉已经不存在的对象。
// 注意,objIDs没有被过滤,因为后续逻辑不过滤也不会出错
objs = lo.Filter(objs, func(obj coormq.UpdatingObjectRedundancy, _ int) bool {
return avaiIDs[obj.ObjectID]
})

dummyObjs := make([]cdssdk.Object, 0, len(objs))
for _, obj := range objs {
dummyObjs = append(dummyObjs, cdssdk.Object{
ObjectID: obj.ObjectID,
Redundancy: obj.Redundancy,
CreateTime: nowTime, // 实际不会更新,只因为不能是0值
UpdateTime: nowTime,
})
}

err = db.Object().BatchUpdateColumns(ctx, dummyObjs, []string{"Redundancy", "UpdateTime"})
if err != nil {
return fmt.Errorf("batch update object redundancy: %w", err)
}

// 删除原本所有的编码块记录,重新添加
err = db.ObjectBlock().BatchDeleteByObjectID(ctx, objIDs)
if err != nil {
return fmt.Errorf("batch delete object blocks: %w", err)
}

// 删除原本Pin住的Object。暂不考虑FileHash没有变化的情况
err = db.PinnedObject().BatchDeleteByObjectID(ctx, objIDs)
if err != nil {
return fmt.Errorf("batch delete pinned object: %w", err)
}

blocks := make([]stgmod.ObjectBlock, 0, len(objs))
for _, obj := range objs {
blocks = append(blocks, obj.Blocks...)
}
err = db.ObjectBlock().BatchCreate(ctx, blocks)
if err != nil {
return fmt.Errorf("batch create object blocks: %w", err)
}

caches := make([]model.Cache, 0, len(objs))
for _, obj := range objs {
for _, blk := range obj.Blocks {
caches = append(caches, model.Cache{
FileHash: blk.FileHash,
StorageID: blk.StorageID,
CreateTime: nowTime,
Priority: 0,
})
}
}
err = db.Cache().BatchCreate(ctx, caches)
if err != nil {
return fmt.Errorf("batch create object caches: %w", err)
}

pinneds := make([]cdssdk.PinnedObject, 0, len(objs))
for _, obj := range objs {
for _, p := range obj.PinnedAt {
pinneds = append(pinneds, cdssdk.PinnedObject{
ObjectID: obj.ObjectID,
StorageID: p,
CreateTime: nowTime,
})
}
}
err = db.PinnedObject().BatchTryCreate(ctx, pinneds)
if err != nil {
return fmt.Errorf("batch create pinned objects: %w", err)
}

return nil
})
if err != nil {
logger.Warnf("batch updating redundancy: %s", err.Error())
@@ -275,6 +364,8 @@ func pickByObjectIDs[T any](objs []T, objIDs []cdssdk.ObjectID, getID func(T) cd

func (svc *Service) MoveObjects(msg *coormq.MoveObjects) (*coormq.MoveObjectsResp, *mq.CodeMessage) {
var sucs []cdssdk.ObjectID
var evt []*stgmod.BodyObjectInfoUpdated

err := svc.db2.DoTx(func(tx db2.SQLContext) error {
msg.Movings = sort2.Sort(msg.Movings, func(o1, o2 cdsapi.MovingObject) int {
return sort2.Cmp(o1.ObjectID, o2.ObjectID)
@@ -336,6 +427,11 @@ func (svc *Service) MoveObjects(msg *coormq.MoveObjects) (*coormq.MoveObjectsRes
}

sucs = lo.Map(newObjs, func(obj cdssdk.Object, _ int) cdssdk.ObjectID { return obj.ObjectID })
evt = lo.Map(newObjs, func(obj cdssdk.Object, _ int) *stgmod.BodyObjectInfoUpdated {
return &stgmod.BodyObjectInfoUpdated{
Object: obj,
}
})
return nil
})
if err != nil {
@@ -343,6 +439,10 @@ func (svc *Service) MoveObjects(msg *coormq.MoveObjects) (*coormq.MoveObjectsRes
return nil, mq.Failed(errorcode.OperationFailed, "move objects failed")
}

for _, e := range evt {
svc.evtPub.Publish(e)
}

return mq.ReplyOK(coormq.RespMoveObjects(sucs))
}

@@ -453,8 +553,15 @@ func (svc *Service) checkPathChangedObjects(tx db2.SQLContext, userID cdssdk.Use
}

func (svc *Service) DeleteObjects(msg *coormq.DeleteObjects) (*coormq.DeleteObjectsResp, *mq.CodeMessage) {
var sucs []cdssdk.ObjectID
err := svc.db2.DoTx(func(tx db2.SQLContext) error {
err := svc.db2.Object().BatchDelete(tx, msg.ObjectIDs)
avaiIDs, err := svc.db2.Object().BatchTestObjectID(tx, msg.ObjectIDs)
if err != nil {
return fmt.Errorf("batch testing object id: %w", err)
}
sucs = lo.Keys(avaiIDs)

err = svc.db2.Object().BatchDelete(tx, msg.ObjectIDs)
if err != nil {
return fmt.Errorf("batch deleting objects: %w", err)
}
@@ -481,7 +588,13 @@ func (svc *Service) DeleteObjects(msg *coormq.DeleteObjects) (*coormq.DeleteObje
return nil, mq.Failed(errorcode.OperationFailed, "batch delete objects failed")
}

return mq.ReplyOK(coormq.RespDeleteObjects())
for _, objID := range sucs {
svc.evtPub.Publish(&stgmod.BodyObjectDeleted{
ObjectID: objID,
})
}

return mq.ReplyOK(coormq.RespDeleteObjects(sucs))
}

func (svc *Service) CloneObjects(msg *coormq.CloneObjects) (*coormq.CloneObjectsResp, *mq.CodeMessage) {
@@ -494,6 +607,8 @@ func (svc *Service) CloneObjects(msg *coormq.CloneObjects) (*coormq.CloneObjects
Clonings map[string]CloningObject
}

var evt []*stgmod.BodyNewOrUpdateObject

// TODO 要检查用户是否有Object、Package的权限
clonings := make(map[cdssdk.PackageID]*PackageClonings)
for i, cloning := range msg.Clonings {
@@ -600,6 +715,25 @@ func (svc *Service) CloneObjects(msg *coormq.CloneObjects) (*coormq.CloneObjects
for i, cloning := range avaiClonings {
ret[cloning.OrgIndex] = &newObjs[i]
}

for i, cloning := range avaiClonings {
var evtBlks []stgmod.BlockDistributionObjectInfo
blkType := getBlockTypeFromRed(newObjs[i].Redundancy)

oldBlks := avaiDetailsMap[cloning.Cloning.ObjectID].Blocks
for _, blk := range oldBlks {
evtBlks = append(evtBlks, stgmod.BlockDistributionObjectInfo{
BlockType: blkType,
Index: blk.Index,
StorageID: blk.StorageID,
})
}

evt = append(evt, &stgmod.BodyNewOrUpdateObject{
Info: newObjs[i],
BlockDistribution: evtBlks,
})
}
return nil
})

@@ -608,5 +742,9 @@ func (svc *Service) CloneObjects(msg *coormq.CloneObjects) (*coormq.CloneObjects
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

for _, e := range evt {
svc.evtPub.Publish(e)
}

return mq.ReplyOK(coormq.RespCloneObjects(ret))
}

+ 37
- 14
coordinator/internal/mq/package.go View File

@@ -79,6 +79,10 @@ func (svc *Service) CreatePackage(msg *coormq.CreatePackage) (*coormq.CreatePack
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

svc.evtPub.Publish(&stgmod.BodyNewPackage{
Info: pkg,
})

return mq.ReplyOK(coormq.NewCreatePackageResp(pkg))
}

@@ -90,21 +94,11 @@ func (svc *Service) UpdatePackage(msg *coormq.UpdatePackage) (*coormq.UpdatePack
return fmt.Errorf("getting package by id: %w", err)
}

// 先执行删除操作
if len(msg.Deletes) > 0 {
if err := svc.db2.Object().BatchDelete(tx, msg.Deletes); err != nil {
return fmt.Errorf("deleting objects: %w", err)
}
}

// 再执行添加操作
if len(msg.Adds) > 0 {
ad, err := svc.db2.Object().BatchAdd(tx, msg.PackageID, msg.Adds)
if err != nil {
return fmt.Errorf("adding objects: %w", err)
}
added = ad
ad, err := svc.db2.Object().BatchAdd(tx, msg.PackageID, msg.Adds)
if err != nil {
return fmt.Errorf("adding objects: %w", err)
}
added = ad

return nil
})
@@ -113,6 +107,26 @@ func (svc *Service) UpdatePackage(msg *coormq.UpdatePackage) (*coormq.UpdatePack
return nil, mq.Failed(errorcode.OperationFailed, "update package failed")
}

addedMp := make(map[string]cdssdk.Object)
for _, obj := range added {
addedMp[obj.Path] = obj
}

for _, add := range msg.Adds {
var blks []stgmod.BlockDistributionObjectInfo
for _, stgID := range add.StorageIDs {
blks = append(blks, stgmod.BlockDistributionObjectInfo{
BlockType: stgmod.BlockTypeRaw,
StorageID: stgID,
})
}

svc.evtPub.Publish(&stgmod.BodyNewOrUpdateObject{
Info: addedMp[add.Path],
BlockDistribution: blks,
})
}

return mq.ReplyOK(coormq.NewUpdatePackageResp(added))
}

@@ -137,6 +151,10 @@ func (svc *Service) DeletePackage(msg *coormq.DeletePackage) (*coormq.DeletePack
return nil, mq.Failed(errorcode.OperationFailed, "delete package failed")
}

svc.evtPub.Publish(&stgmod.BodyPackageDeleted{
PackageID: msg.PackageID,
})

return mq.ReplyOK(coormq.NewDeletePackageResp())
}

@@ -203,6 +221,11 @@ func (svc *Service) ClonePackage(msg *coormq.ClonePackage) (*coormq.ClonePackage
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

svc.evtPub.Publish(&stgmod.BodyPackageCloned{
SourcePackageID: msg.PackageID,
NewPackage: pkg,
})

return mq.ReplyOK(coormq.RespClonePackage(pkg))
}



+ 6
- 3
coordinator/internal/mq/service.go View File

@@ -2,14 +2,17 @@ package mq

import (
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
)

type Service struct {
db2 *db2.DB
db2 *db2.DB
evtPub *sysevent.Publisher
}

func NewService(db2 *db2.DB) *Service {
func NewService(db2 *db2.DB, evtPub *sysevent.Publisher) *Service {
return &Service{
db2: db2,
db2: db2,
evtPub: evtPub,
}
}

+ 23
- 0
coordinator/internal/mq/utils.go View File

@@ -0,0 +1,23 @@
package mq

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
)

func getBlockTypeFromRed(red cdssdk.Redundancy) string {
switch red.(type) {
case *cdssdk.NoneRedundancy:
return stgmod.BlockTypeRaw

case *cdssdk.ECRedundancy:
return stgmod.BlockTypeEC

case *cdssdk.LRCRedundancy:
return stgmod.BlockTypeEC

case *cdssdk.SegmentRedundancy:
return stgmod.BlockTypeSegment
}
return ""
}

+ 15
- 0
datamap/.env View File

@@ -0,0 +1,15 @@
# 数据库配置
DB_HOST=175.178.223.172
DB_PORT=3306
DB_USER=root
DB_PASSWORD=
DB_NAME=storage_datamap

# RabbitMQ 配置
RABBITMQ_HOST=175.178.223.172
RABBITMQ_PORT=5672
RABBITMQ_USER=guest
RABBITMQ_PASSWORD=

# 服务配置
SERVER_PORT=8080

+ 54
- 0
datamap/internal/config/config.go View File

@@ -0,0 +1,54 @@
package config

import (
"github.com/spf13/viper"
)

type Config struct {
Database DatabaseConfig
RabbitMQ RabbitMQConfig
Server ServerConfig
}

type DatabaseConfig struct {
Host string
Port string
User string
Password string
DBName string
}

type RabbitMQConfig struct {
Host string
Port string
User string
Password string
}

type ServerConfig struct {
Port string
}

func LoadConfig() *Config {
viper.SetConfigFile("C:\\Users\\Administrator\\workspace\\workspace\\storage\\datamap\\.env")
viper.ReadInConfig()

return &Config{
Database: DatabaseConfig{
Host: viper.GetString("DB_HOST"),
Port: viper.GetString("DB_PORT"),
User: viper.GetString("DB_USER"),
Password: viper.GetString("DB_PASSWORD"),
DBName: viper.GetString("DB_NAME"),
},
RabbitMQ: RabbitMQConfig{
Host: viper.GetString("RABBITMQ_HOST"),
Port: viper.GetString("RABBITMQ_PORT"),
User: viper.GetString("RABBITMQ_USER"),
Password: viper.GetString("RABBITMQ_PASSWORD"),
},
Server: ServerConfig{
Port: viper.GetString("SERVER_PORT"),
},
}
}

+ 32
- 0
datamap/internal/db/db.go View File

@@ -0,0 +1,32 @@
package db

import (
"fmt"
"gitlink.org.cn/cloudream/storage/datamap/internal/config"
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/schema"
)

func InitDB(cfg config.DatabaseConfig) (*gorm.DB, error) {
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.DBName)
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
NamingStrategy: schema.NamingStrategy{
SingularTable: true, // 禁用自动复数化表名
},
})
if err != nil {
return nil, err
}

//// 自动迁移表结构
//db.AutoMigrate(
// &models.Hub{},
// &models.Storage{},
// &models.HubRequest{},
// &models.BlockDistribution{},
//)

return db, nil
}

+ 174
- 0
datamap/internal/handlers/handlers.go View File

@@ -0,0 +1,174 @@
package handlers

import (
"github.com/gin-gonic/gin"
"gitlink.org.cn/cloudream/storage/datamap/internal/models"
"gorm.io/gorm"
"net/http"
"strconv"
)

// DB 全局数据库连接实例
var DB *gorm.DB

// SetDB 设置数据库连接实例
func SetDB(db *gorm.DB) {
DB = db
}

// GetHubInfo 获取 节点交互数据
func GetHubInfo(c *gin.Context) {

repoHub := models.NewHubRepository(DB)
repoStorage := models.NewStorageRepository(DB)
repoHubReq := models.NewHubRequestRepository(DB)

nodes := make([]models.Node, 0)
edges := make([]models.Edge, 0)

//添加所有节点信息
hubs, _ := repoHub.GetAllHubs()
storages, _ := repoStorage.GetAllStorages()
for _, hub := range hubs {
node := models.Node{
ID: "hub" + strconv.FormatInt(int64(hub.HubID), 10),
NodeType: "hub",
Name: hub.Name,
Address: hub.Address,
}
nodes = append(nodes, node)
}
for _, storage := range storages {
node := models.Node{
ID: "storage" + strconv.FormatInt(int64(storage.StorageID), 10),
NodeType: "storage",
Name: storage.StorageName,
DataCount: storage.DataCount,
NewDataCount: storage.NewDataCount,
Timestamp: storage.Timestamp,
}
nodes = append(nodes, node)
}

// 添加所有边信息
hubReqs, _ := repoHubReq.GetAllHubRequests()
for _, hubReq := range hubReqs {
edge := models.Edge{
SourceType: hubReq.SourceType,
SourceID: hubReq.SourceType + strconv.FormatInt(int64(hubReq.SourceID), 10),
TargetType: hubReq.TargetType,
TargetID: hubReq.TargetType + strconv.FormatInt(int64(hubReq.TargetID), 10),
DataTransferCount: hubReq.DataTransferCount,
RequestCount: hubReq.RequestCount,
FailedRequestCount: hubReq.FailedRequestCount,
AvgTransferCount: hubReq.AvgTransferCount,
MaxTransferCount: hubReq.MaxTransferCount,
MinTransferCount: hubReq.MinTransferCount,
StartTimestamp: hubReq.StartTimestamp,
EndTimestamp: hubReq.EndTimestamp,
}
edges = append(edges, edge)
}
hubRelationship := models.HubRelationship{
Nodes: nodes,
Edges: edges,
}
c.JSON(http.StatusOK, hubRelationship)
}

func containsCombo(combos []models.Combo, targetID string, targetComboType string) bool {
for _, combo := range combos {
if combo.ID == targetID && combo.ComboType == targetComboType {
return true
}
}
return false
}

// GetDataTransfer 数据对象的节点间传输量
func GetDataTransfer(c *gin.Context) {

repoObject := models.NewObjectRepository(DB)
repoBlockDistribution := models.NewBlockDistributionRepository(DB)
repoStorageTrans := models.NewStorageTransferCountRepository(DB)

//首先判断object是否存在
objectIDStr := c.Param("objectID")
objectID, _ := strconv.ParseInt(objectIDStr, 10, 64)
object, _ := repoObject.GetObjectByID(objectID)
if object == nil {
c.JSON(http.StatusOK, []interface{}{})
return
}

nodes := make([]models.DistNode, 0)
combos := make([]models.Combo, 0)
edges := make([]models.DistEdge, 0)

//根据ObjectID查询出在所有storage中存储的块或副本
blocks, _ := repoBlockDistribution.GetBlockDistributionByObjectID(objectID)
for _, block := range blocks {
//nodes --------- block
//添加node信息
node := models.DistNode{
//block id
ID: strconv.FormatInt(block.BlockID, 10),
//storage id
ComboID: "storage" + strconv.FormatInt(block.StorageID, 10),
//block index
Label: block.Type + strconv.FormatInt(block.Index, 10),
//block type
NodeType: block.Type,
}
nodes = append(nodes, node)

//combos ------- state or storage
//添加storage combo信息
if !containsCombo(combos, "storage"+strconv.FormatInt(block.StorageID, 10), "storage") {
combo := models.Combo{
ID: "storage" + strconv.FormatInt(block.StorageID, 10),
Label: "存储中心" + strconv.FormatInt(block.StorageID, 10),
ParentId: "state" + strconv.Itoa(block.Status),
ComboType: "storage",
}
combos = append(combos, combo)
}
//添加state combo信息
if !containsCombo(combos, "state"+strconv.Itoa(block.Status), "state") {
var statusStr string
switch block.Status {
case 0:
statusStr = "实时情况"
case 1:
statusStr = block.Timestamp.Format("2006-01-02") + "布局调整后"
case 2:
statusStr = block.Timestamp.Format("2006-01-02") + "布局调整前"
case 3:
statusStr = block.Timestamp.Format("2006-01-02") + "布局调整后"
default:
statusStr = "未知状态"
}
combo := models.Combo{
ID: "state" + strconv.Itoa(block.Status),
Label: statusStr,
ComboType: "state",
}
combos = append(combos, combo)
}
}
//edges data trans between storage and storage
relations, _ := repoStorageTrans.GetStorageTransferCountByObjectID(objectID)
for _, relation := range relations {
edge := models.DistEdge{
Source: "storage" + strconv.FormatInt(relation.SourceStorageID, 10),
Target: "storage" + strconv.FormatInt(relation.TargetStorageID, 10),
}
edges = append(edges, edge)
}
result := models.ObjectDistribution{
Nodes: nodes,
Combos: combos,
Edges: edges,
}
c.JSON(http.StatusOK, result)
}

+ 187
- 0
datamap/internal/models/blockdistribution.go View File

@@ -0,0 +1,187 @@
package models

import (
"errors"
"log"
"strconv"
"time"

stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gorm.io/gorm"
)

type BlockDistribution struct {
BlockID int64 `gorm:"column:BlockID; primaryKey; type:bigint; autoIncrement" json:"blockID"`
ObjectID int64 `gorm:"column:ObjectID; type:bigint; not null" json:"objectID"`
Type string `gorm:"column:Type; type:varchar(1024); not null" json:"type"`
Index int64 `gorm:"column:Index; type:bigint; not null" json:"index"`
StorageID int64 `gorm:"column:StorageID; type:bigint; not null" json:"storageID"`
Status int `gorm:"column:Status; type:tinyint; not null" json:"status"`
Timestamp time.Time `gorm:"column:Timestamp; type:datatime; not null" json:"timestamp"`
}

func (BlockDistribution) TableName() string {
return "blockdistribution"
}

type BlockDistributionRepository struct {
repo *GormRepository
}

func NewBlockDistributionRepository(db *gorm.DB) *BlockDistributionRepository {
return &BlockDistributionRepository{repo: NewGormRepository(db)}
}

func (r *BlockDistributionRepository) CreateBlockDistribution(block *BlockDistribution) error {
return r.repo.Create(block)
}

func (r *BlockDistributionRepository) UpdateBlockDistribution(block *BlockDistribution) error {
return r.repo.Update(block)
}

func (r *BlockDistributionRepository) GetAllBlocks() ([]BlockDistribution, error) {
var blocks []BlockDistribution
err := r.repo.GetAll(&blocks)
if err != nil {
return nil, err
}
return blocks, nil
}

func (r *BlockDistributionRepository) GetBlockDistributionByObjectID(objectID int64) ([]BlockDistribution, error) {
var blocks []BlockDistribution
query := "SELECT * FROM blockdistribution WHERE ObjectID = ?"
err := r.repo.db.Raw(query, objectID).Scan(&blocks).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return []BlockDistribution{}, errors.New("block not found")
}
return blocks, nil
}

func (r *BlockDistributionRepository) GetStorageIDsByObjectID(objectID int64) ([]int64, error) {
var storageIDs []int64
query := "SELECT distinct storageID FROM blockdistribution WHERE ObjectID = ?"
// 通过 ObjectID 查询
err := r.repo.db.Raw(query, objectID).Scan(&storageIDs).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return []int64{}, errors.New("block not found")
}
return storageIDs, nil
}

func (r *BlockDistributionRepository) GetBlockDistributionByIndex(objectID int64, index int64, storageID int64) (BlockDistribution, error) {
var block BlockDistribution
query := "SELECT * FROM blockdistribution WHERE ObjectID = ? AND `Index` = ? AND StorageID = ?"
// 通过 ObjectID 和 Index 联合查询
err := r.repo.db.Exec(query, objectID, index, storageID).First(&block).Error
if errors.Is(err, gorm.ErrRecordNotFound) {
return BlockDistribution{}, errors.New("block not found")
}
return block, nil
}

// DeleteBlockDistribution 删除 BlockDistribution 记录 (根据 ObjectID 和 Index)

func (r *BlockDistributionRepository) DeleteBlockDistribution(objectID int64, index int64, storageID int64) error {
query := "DELETE FROM blockdistribution WHERE ObjectID = ? AND `Index` = ? AND StorageID = ?"

return r.repo.db.Exec(query, objectID, index, storageID).Error
}

type BlockDistributionWatcher struct {
Name string
}

func (w *BlockDistributionWatcher) OnEvent(event sysevent.SysEvent) {
body, ok := event.Body.(*stgmod.BodyBlockDistribution)
if !ok {
return
}

repoObject := NewObjectRepository(DB)
repoBlock := NewBlockDistributionRepository(DB)
repoStorage := NewStorageTransferCountRepository(DB)

//更新object表中的状态
object, err := repoObject.GetObjectByID(int64(body.ObjectID))
faultTolerance, _ := strconv.ParseFloat(body.FaultTolerance, 64)
redundancy, _ := strconv.ParseFloat(body.Redundancy, 64)
avgAccessCost, _ := strconv.ParseFloat(body.AvgAccessCost, 64)
if errors.Is(err, gorm.ErrRecordNotFound) {
err := repoObject.CreateObject(&Object{
ObjectID: body.ObjectID,
PackageID: body.PackageID,
Path: body.Path,
Size: body.Size,
FileHash: body.FileHash,
Status: StatusYesterdayAfter,
FaultTolerance: faultTolerance,
Redundancy: redundancy,
AvgAccessCost: avgAccessCost,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error create object: %v", err)
}
} else {
object.Status = StatusYesterdayAfter
err = repoObject.UpdateObject(object)
if err != nil {
log.Printf("Error update object: %v", err)
}
}

//更新block表中的状态
for _, blockDist := range body.BlockDistribution {
blockIndex, _ := strconv.ParseInt(blockDist.Index, 10, 64)
blockStorageID, _ := strconv.ParseInt(blockDist.StorageID, 10, 64)
blockDist, err := repoBlock.GetBlockDistributionByIndex(int64(body.ObjectID), blockIndex, blockStorageID)
if errors.Is(err, gorm.ErrRecordNotFound) {
err := repoBlock.CreateBlockDistribution(&BlockDistribution{
BlockID: blockDist.BlockID,
ObjectID: blockDist.ObjectID,
Type: blockDist.Type,
Index: blockIndex,
StorageID: blockStorageID,
Status: StatusYesterdayAfter,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error create BlockDistribution: %v", err)
}
} else {
err := repoBlock.UpdateBlockDistribution(&BlockDistribution{
BlockID: blockDist.BlockID,
ObjectID: blockDist.ObjectID,
Type: blockDist.Type,
Index: blockIndex,
StorageID: blockStorageID,
Status: StatusYesterdayAfter,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error update BlockDistribution: %v", err)
}
}
}
//在storageTransferCount表中添加记录
for _, dataTransfer := range body.DataTransfers {
sourceStorageID, _ := strconv.ParseInt(string(dataTransfer.SourceStorageID), 10, 64)
targetStorageID, _ := strconv.ParseInt(string(dataTransfer.TargetStorageID), 10, 64)
dataTransferCount, _ := strconv.ParseInt(dataTransfer.TransferBytes, 10, 64)

err := repoStorage.CreateStorageTransferCount(&StorageTransferCount{
ObjectID: int64(body.ObjectID),
Status: StatusTodayBeforeYesterday,
SourceStorageID: sourceStorageID,
TargetStorageID: targetStorageID,
DataTransferCount: dataTransferCount,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error create StorageTransferCount : %v", err)
}
}
}

+ 234
- 0
datamap/internal/models/blocktransfer.go View File

@@ -0,0 +1,234 @@
package models

import (
"errors"
"log"
"strconv"
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gorm.io/gorm"
)

type StorageTransferCount struct {
RelationshipID int64 `gorm:"column:RelationshipID; primaryKey; type:bigint; autoIncrement" json:"relationshipID"`
ObjectID int64 `gorm:"column:ObjectID; type:bigint; not null" json:"objectID"`
Status int64 `gorm:"column:Status; type:bigint; not null" json:"status"` // 连线左侧的状态
SourceStorageID int64 `gorm:"column:SourceStorageID; type:bigint; not null" json:"sourceStorageID"` // 源存储节点 ID
TargetStorageID int64 `gorm:"column:TargetStorageID; type:bigint; not null" json:"targetStorageID"` // 目标存储节点 ID
DataTransferCount int64 `gorm:"column:DataTransferCount; type:bigint; not null" json:"dataTransferCount"` // 数据传输量
Timestamp time.Time `gorm:"column:Timestamp; type:datatime; not null" json:"timestamp"` // 变化结束时间戳
}

func (StorageTransferCount) TableName() string {
return "storagetransfercount"
}

type StorageTransferCountRepository struct {
repo *GormRepository
}

func NewStorageTransferCountRepository(db *gorm.DB) *StorageTransferCountRepository {
return &StorageTransferCountRepository{repo: NewGormRepository(db)}
}

func (r *StorageTransferCountRepository) CreateStorageTransferCount(storageTransferCount *StorageTransferCount) error {
return r.repo.Create(storageTransferCount)
}

func (r *StorageTransferCountRepository) UpdateStorageTransferCount(storageTransferCount *StorageTransferCount) error {
return r.repo.Update(storageTransferCount)
}

func (r *StorageTransferCountRepository) GetStorageTransferCountByID(id int) (*StorageTransferCount, error) {
var storageTransferCount StorageTransferCount
err := r.repo.GetByID(uint(id), &storageTransferCount)
if err != nil {
return nil, err
}
return &storageTransferCount, nil
}

func (r *StorageTransferCountRepository) GetStorageTransferCountByObjectID(objectID int64) ([]StorageTransferCount, error) {
var storageTransferCounts []StorageTransferCount
query := "SELECT * FROM storagetransfercount WHERE ObjectID = ?"
err := r.repo.db.Raw(query, objectID).Scan(&storageTransferCounts).Error
if err != nil {
return nil, err
}
return storageTransferCounts, nil
}

func (r *StorageTransferCountRepository) GetAllStorageTransferCounts() ([]StorageTransferCount, error) {
var storageTransferCounts []StorageTransferCount
err := r.repo.GetAll(&storageTransferCounts)
if err != nil {
return nil, err
}
return storageTransferCounts, nil
}

type BlockTransferWatcher struct {
Name string
}

func (w *BlockTransferWatcher) OnEvent(event sysevent.SysEvent) {
body, ok := event.Body.(*stgmod.BodyBlockTransfer)
if !ok {
return
}

repoDist := NewBlockDistributionRepository(DB)
repoStorage := NewStorageRepository(DB)
repoStorageTrans := NewStorageTransferCountRepository(DB)
repoObject := NewObjectRepository(DB)

for _, change := range body.BlockChanges {

objectID, _ := strconv.ParseInt(string(body.ObjectID), 10, 64)
object, _ := repoObject.GetObjectByID(objectID)
// index, _ := strconv.ParseInt(change.Index, 10, 64)
// sourceStorageID, _ := strconv.ParseInt(string(change.SourceStorageID), 10, 64)
// targetStorageID, _ := strconv.ParseInt(string(change.TargetStorageID), 10, 64)
// newDataCount, _ := strconv.ParseInt(change.DataTransferCount, 10, 64)

switch change := change.(type) {
case *stgmod.BlockChangeClone: //拷贝
// TODO 从change中获取index, sourceStorageID, targetStorageID, newDataCount,下同

//查询出存储在数据库中的BlockDistribution信息
blockSource, errSource := repoDist.GetBlockDistributionByIndex(objectID, index, sourceStorageID)
//没有记录就将source和target的信息都保存到库中
if errors.Is(errSource, gorm.ErrRecordNotFound) {
err := repoDist.CreateBlockDistribution(&BlockDistribution{
ObjectID: objectID,
Type: change.BlockType,
Index: index,
StorageID: sourceStorageID,
Status: StatusNow,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error create source blockdistribution: %v", err)
}
} else {
//有数据则新增一条storageID为targetStorageID的记录,同时更新状态
err := repoDist.CreateBlockDistribution(&BlockDistribution{
ObjectID: blockSource.ObjectID,
Type: change.BlockType,
Index: index,
StorageID: targetStorageID,
Status: StatusNow,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error update blockdistribution: %v", err)
}
//复制完成之后增加的dataCount要加到targetStorage的记录中
storageOld, err := repoStorage.GetStorageByID(targetStorageID)
if errors.Is(err, gorm.ErrRecordNotFound) {
err = repoStorage.CreateStorage(&Storage{
StorageID: cdssdk.StorageID(targetStorageID),
DataCount: newDataCount,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error increase datacount in targetstorage: %v", err)
}
} else {
err = repoStorage.UpdateStorage(&Storage{
StorageID: cdssdk.StorageID(targetStorageID),
DataCount: storageOld.DataCount + newDataCount,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error increase datacount in targetstorage: %v", err)
}
}

}
//新增记录到storageTransferCount表中
err := repoStorageTrans.CreateStorageTransferCount(&StorageTransferCount{
ObjectID: objectID,
Status: int64(blockSource.Status),
SourceStorageID: sourceStorageID,
TargetStorageID: targetStorageID,
DataTransferCount: newDataCount,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error create StorageTransferCount : %v", err)
}
case *stgmod.BlockChangeEnDecode: //编解码
//删除所有的sourceBlock
for _, sourceBlock := range change.SourceBlocks {
sourceBlockIndex, _ := strconv.ParseInt(sourceBlock.Index, 10, 64)
err := repoDist.DeleteBlockDistribution(objectID, sourceBlockIndex, sourceStorageID)
if err != nil {
log.Printf("Error delete blockdistribution: %v", err)
}
}
//插入所有的targetBlock
for _, targetBlock := range change.TargetBlocks {
storageID, _ := strconv.ParseInt(string(targetBlock.StorageID), 10, 64)
err := repoDist.CreateBlockDistribution(&BlockDistribution{
ObjectID: objectID,
Type: targetBlock.BlockType,
Index: index,
//直接保存到目标中心
StorageID: storageID,
Status: StatusNow,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error create blockdistribution: %v", err)
}
}
//新增记录到storageTransferCount表中
err := repoStorageTrans.CreateStorageTransferCount(&StorageTransferCount{
ObjectID: objectID,
Status: int64(object.Status),
SourceStorageID: sourceStorageID,
TargetStorageID: targetStorageID,
DataTransferCount: newDataCount,
Timestamp: time.Now(),
})
if err != nil {
log.Printf("Error create StorageTransferCount : %v", err)
}

case *stgmod.BlockChangeDeleted: //删除
storageID, _ := strconv.ParseInt(string(change.StorageID), 10, 64)
changeIndex, _ := strconv.ParseInt(change.Index, 10, 64)
err := repoDist.DeleteBlockDistribution(objectID, changeIndex, storageID)
if err != nil {
log.Printf("Error delete blockdistribution: %v", err)
}

// case *stgmod.BlockChangeUpdated: //更新
// for _, blockUpdate := range change.Blocks {
// //查询出存储在数据库中的BlockDistribution信息
// blockIndex, _ := strconv.ParseInt(blockUpdate.Index, 10, 64)
// blockOld, err := repoDist.GetBlockDistributionByIndex(objectID, blockIndex, sourceStorageID)
// newStorageID, _ := strconv.ParseInt(string(blockUpdate.StorageID), 10, 64)
// err = repoDist.UpdateBlockDistribution(&BlockDistribution{
// BlockID: blockOld.BlockID,
// ObjectID: blockOld.ObjectID,
// Type: blockUpdate.BlockType,
// Index: blockIndex,
// StorageID: newStorageID,
// Status: StatusNow,
// Timestamp: time.Now(),
// })
// if err != nil {
// log.Printf("Error delete blockdistribution: %v", err)
// }
// }

default:
break
}
}
}

+ 52
- 0
datamap/internal/models/hub.go View File

@@ -0,0 +1,52 @@
package models

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gorm.io/gorm"
)

type Hub struct {
HubID cdssdk.HubID `gorm:"column:HubID; primaryKey; type:bigint; autoIncrement" json:"hubID"`
Name string `gorm:"column:Name; type:varchar(255); not null" json:"name"`
Address cdssdk.HubAddressInfo `gorm:"column:Address; type:json; " json:"address"`
}

func (Hub) TableName() string { return "hub" }

type HubRepository struct {
repo *GormRepository
}

func NewHubRepository(db *gorm.DB) *HubRepository {
return &HubRepository{repo: NewGormRepository(db)}
}

func (r *HubRepository) CreateHub(hub *Hub) error {
return r.repo.Create(hub)
}

func (r *HubRepository) UpdateHub(hub *Hub) error {
return r.repo.Update(hub)
}

func (r *HubRepository) DeleteHub(hub *Hub) error {
return r.repo.Delete(hub, uint(hub.HubID))
}

func (r *HubRepository) GetHubByID(id int) (*Hub, error) {
var hub Hub
err := r.repo.GetByID(uint(id), &hub)
if err != nil {
return nil, err
}
return &hub, nil
}

func (r *HubRepository) GetAllHubs() ([]Hub, error) {
var hubs []Hub
err := r.repo.GetAll(&hubs)
if err != nil {
return nil, err
}
return hubs, nil
}

+ 85
- 0
datamap/internal/models/hubinfo.go View File

@@ -0,0 +1,85 @@
package models

import (
"encoding/json"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
)

// LocalHub 本地结构体,嵌入cdssdk.Hub
type LocalHub struct {
cdssdk.Hub
}

type ConcreteHubType struct {
Address string
}

func (c ConcreteHubType) GetStorageType() string {
return c.Address
}

func (c ConcreteHubType) String() string {
return c.Address
}

func (s *LocalHub) UnmarshalJSON(data []byte) error {
// 定义一个临时结构体来解析 JSON
type Alias LocalHub
aux := &struct {
Address string `json:"address"`
*Alias
}{
Alias: (*Alias)(s),
}

if err := json.Unmarshal(data, &aux); err != nil {
return err
}

s.Address = ConcreteHubType{Address: aux.Address}
return nil
}

// 实现 Watcher 接口的结构体
type HubInfoWatcher struct {
Name string
}

// 实现 OnEvent 方法
func (w *HubInfoWatcher) OnEvent(event sysevent.SysEvent) {

repo := NewHubRepository(DB)

switch body := event.Body.(type) {
case *stgmod.BodyNewHub:
err := repo.CreateHub(&Hub{
HubID: body.Info.HubID,
Name: body.Info.Name,
Address: body.Info.Address,
})
if err != nil {
return
}

case *stgmod.BodyHubUpdated:
err := repo.UpdateHub(&Hub{
HubID: body.Info.HubID,
Name: body.Info.Name,
Address: body.Info.Address,
})
if err != nil {
return
}

case *stgmod.BodyHubDeleted:
err := repo.DeleteHub(&Hub{
HubID: body.HubID,
})
if err != nil {
return
}
}
}

+ 157
- 0
datamap/internal/models/hubrequest.go View File

@@ -0,0 +1,157 @@
package models

import (
"fmt"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gorm.io/gorm"
"log"
"time"
)

type HubRequest struct {
//todo source和target类型的区分
RequestID int64 `gorm:"column:RequestID; primaryKey; type:bigint; autoIncrement" json:"RequestID"`
SourceType string `gorm:"column:SourceType; type:varchar(255); not null" json:"sourceType"`
SourceID cdssdk.HubID `gorm:"column:SourceID; type:bigint; not null" json:"sourceID"`
TargetType string `gorm:"column:TargetType; type:varchar(255); not null" json:"targetType"`
TargetID cdssdk.HubID `gorm:"column:TargetID; type:bigint; not null" json:"targetID"`
DataTransferCount int64 `gorm:"column:DataTransferCount; type:bigint; not null" json:"dataTransferCount"`
RequestCount int64 `gorm:"column:RequestCount; type:bigint; not null" json:"requestCount"`
FailedRequestCount int64 `gorm:"column:FailedRequestCount; type:bigint; not null" json:"failedRequestCount"`
AvgTransferCount int64 `gorm:"column:AvgTransferCount; type:bigint; not null" json:"avgTransferCount"`
MaxTransferCount int64 `gorm:"column:MaxTransferCount; type:bigint; not null" json:"maxTransferCount"`
MinTransferCount int64 `gorm:"column:MinTransferCount; type:bigint; not null" json:"minTransferCount"`
StartTimestamp time.Time `gorm:"column:StartTimestamp; type:datatime; not null" json:"startTimestamp"`
EndTimestamp time.Time `gorm:"column:EndTimestamp; type:datatime; not null" json:"endTimestamp"`
}

func (HubRequest) TableName() string { return "hubrequest" }

type HubRequestRepository struct {
repo *GormRepository
}

func NewHubRequestRepository(db *gorm.DB) *HubRequestRepository {
return &HubRequestRepository{repo: NewGormRepository(db)}
}

func (r *HubRequestRepository) CreateHubRequest(request *HubRequest) error {
return r.repo.Create(request)
}

func (r *HubRequestRepository) GetHubRequestByHubID(hubId int64) ([]HubRequest, error) {
var hubRequests []HubRequest
query := "SELECT * FROM hubrequest WHERE SourceHubID = ?"
err := r.repo.db.Raw(query, hubId).Scan(&hubRequests).Error
if err != nil {
return nil, err
}
return hubRequests, nil
}

func (r *HubRequestRepository) GetAllHubRequests() ([]HubRequest, error) {
var hubRequests []HubRequest
err := r.repo.GetAll(&hubRequests)
if err != nil {
return nil, err
}
return hubRequests, nil
}

type HubTransferStatsWatcher struct {
Name string
}

func (w *HubTransferStatsWatcher) OnEvent(event sysevent.SysEvent) {
repo := NewHubRequestRepository(DB)

if event.Category == "hubTransferStats" {
if hubTransferStats, ok := event.Body.(*stgmod.BodyHubTransferStats); ok {
hubRequest := &HubRequest{
SourceType: "hub",
SourceID: hubTransferStats.SourceHubID,
TargetType: "hub",
TargetID: hubTransferStats.TargetHubID,
DataTransferCount: hubTransferStats.Send.TotalTransfer,
RequestCount: hubTransferStats.Send.RequestCount,
FailedRequestCount: hubTransferStats.Send.FailedRequestCount,
AvgTransferCount: hubTransferStats.Send.AvgTransfer,
MaxTransferCount: hubTransferStats.Send.MaxTransfer,
MinTransferCount: hubTransferStats.Send.MinTransfer,
StartTimestamp: hubTransferStats.StartTimestamp,
EndTimestamp: hubTransferStats.EndTimestamp,
}

err := repo.CreateHubRequest(hubRequest)
if err != nil {
log.Printf("Error update hubrequest: %v", err)

}
} else {
fmt.Printf("Watcher %s: Unexpected Body type, expected *BodyStorageInfo, got %T\n", w.Name, event.Body)
}
} else {
fmt.Printf("Watcher %s received an event with category %s\n", w.Name, event.Category)
}
}

type HubStorageTransferStatsWatcher struct {
Name string
}

func (w *HubStorageTransferStatsWatcher) OnEvent(event sysevent.SysEvent) {
repo := NewHubRequestRepository(DB)

if event.Category == "hubStorageTransferStats" {
if hubStorageTransferStats, ok := event.Body.(*stgmod.BodyHubStorageTransferStats); ok {

hubRequestSend := &HubRequest{
SourceType: "hub",
SourceID: hubStorageTransferStats.HubID,
TargetType: "storage",
TargetID: cdssdk.HubID(hubStorageTransferStats.StorageID),
DataTransferCount: hubStorageTransferStats.Send.TotalTransfer,
RequestCount: hubStorageTransferStats.Send.RequestCount,
FailedRequestCount: hubStorageTransferStats.Send.FailedRequestCount,
AvgTransferCount: hubStorageTransferStats.Send.AvgTransfer,
MaxTransferCount: hubStorageTransferStats.Send.MaxTransfer,
MinTransferCount: hubStorageTransferStats.Send.MinTransfer,
StartTimestamp: hubStorageTransferStats.StartTimestamp,
EndTimestamp: hubStorageTransferStats.EndTimestamp,
}

err := repo.CreateHubRequest(hubRequestSend)
if err != nil {
log.Printf("Error update hubrequest: %v", err)

}

hubRequestReceive := &HubRequest{
SourceType: "storage",
SourceID: cdssdk.HubID(hubStorageTransferStats.StorageID),
TargetType: "hub",
TargetID: hubStorageTransferStats.HubID,
DataTransferCount: hubStorageTransferStats.Receive.TotalTransfer,
RequestCount: hubStorageTransferStats.Receive.RequestCount,
FailedRequestCount: hubStorageTransferStats.Receive.FailedRequestCount,
AvgTransferCount: hubStorageTransferStats.Receive.AvgTransfer,
MaxTransferCount: hubStorageTransferStats.Receive.MaxTransfer,
MinTransferCount: hubStorageTransferStats.Receive.MinTransfer,
StartTimestamp: hubStorageTransferStats.StartTimestamp,
EndTimestamp: hubStorageTransferStats.EndTimestamp,
}

err = repo.CreateHubRequest(hubRequestReceive)
if err != nil {
log.Printf("Error update hubrequest: %v", err)

}
} else {
fmt.Printf("Watcher %s: Unexpected Body type, expected *BodyStorageInfo, got %T\n", w.Name, event.Body)
}
} else {
fmt.Printf("Watcher %s received an event with category %s\n", w.Name, event.Category)
}
}

+ 124
- 0
datamap/internal/models/models.go View File

@@ -0,0 +1,124 @@
package models

import (
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gorm.io/gorm"
"time"
)

var DB *gorm.DB

// InitDB 初始化数据库连接
func InitDB(db *gorm.DB) {
DB = db
}

type RequestID int64
type BlockID int64
type RelationshipID int64
type Status int

const (
StatusNow = 0 //表示当前实时状态
StatusYesterdayAfter = 1 // 表示前一天调整后的状态
StatusYesterdayBefore = 2 // 表示前一天调整前的状态
StatusTodayBeforeYesterday = 3 // 表示前两天调整后的状态
)

// 节点间关系图

type HubRelationship struct {
Nodes []Node `json:"nodes"` // 节点列表
Edges []Edge `json:"edges"` // 名称
}

type Node struct {
ID string `json:"id"` // 节点/中心ID
NodeType string `json:"nodeType"` //节点类型 storage/hub
Name string `json:"name"` // 节点/中心名称
Address cdssdk.HubAddressInfo `json:"address"` // 地址
DataCount int64 `json:"dataCount"` // 总数据量(文件分块数)
NewDataCount int64 `json:"newdataCount"` // 新增数据量(文件分块数)
Timestamp time.Time `json:"timestamp"` // 时间戳
}

type Edge struct {
SourceType string `json:"sourceType"` // 源节点类型
SourceID string `json:"source"` // 源节点ID
TargetType string `json:"targetType"` // 目标节点类型
TargetID string `json:"target"` // 目标节点ID
DataTransferCount int64 `json:"dataTransferCount"` // 数据传输量
RequestCount int64 `json:"requestCount"` // 请求数
FailedRequestCount int64 `json:"failedRequestCount"` // 失败请求数
AvgTransferCount int64 `json:"avgTransferCount"` // 平均数据传输量
MaxTransferCount int64 `json:"maxTransferCount"` // 最大数据传输量
MinTransferCount int64 `json:"minTransferCount"` // 最小数据传输量
StartTimestamp time.Time `json:"startTimestamp"` // 起始时间戳
EndTimestamp time.Time `json:"endTimestamp"` // 结束时间戳
}

// 对象分布
type ObjectDistribution struct {
Nodes []DistNode `json:"nodes"`
Combos []Combo `json:"combos"`
Edges []DistEdge `json:"edges"`
}

type DistNode struct {
ID string `json:"id"`
ComboID string `json:"comboId"`
Label string `json:"label"`
NodeType string `json:"nodeType"`
}

type Combo struct {
ID string `json:"id"`
Label string `json:"label"`
ParentId string `json:"parentId"`
ComboType string `json:"comboType"`
}

type DistEdge struct {
Source string `json:"source"`
Target string `json:"target"`
}

// 对象块分布结构
//type ObjectDistribution struct {
// ObjectID cdssdk.ObjectID `json:"objectID"` // 对象 ID
// PackageID cdssdk.PackageID `json:"packageID"` // 包 ID
// Path string `json:"path"` // 路径
// Size int64 `json:"size"` // 大小
// FileHash string `json:"fileHash"` // 文件哈希
// States []State `json:"states"` // 各阶段状态信息(只需要传1、2、3阶段)
// Relationships []Relationship `json:"relationships"` // 节点间传输量
// Timestamp time.Time `json:"timestamp"` // 请求中的时间戳
//}
//
//type State struct {
// Timestamp time.Time `json:"timestamp"` // 时间戳
// Status string `json:"status"` // 状态
// FaultTolerance string `json:"faultTolerance"` // 容灾度(仅布局调整后)
// Redundancy string `json:"redundancy"` // 冗余度(仅布局调整后)
// AvgAccessCost float64 `json:"avgAccessCost"` // 平均访问开销(仅布局调整前)
// BlockDistributions []BlockDist `json:"blockDistributions"` // 块分布情况
//}
//
//type BlockDist struct {
// StorageID cdssdk.StorageID `json:"storageID"` // 中心ID
// Blocks []Block `json:"blocks"` // 该中心的所有块
//}
//
//type Block struct {
// Type string `json:"type"` // 块类型
// Index string `json:"index"` // 块编号
// ID string `json:"id"` // 块ID
//}
//
//type Relationship struct {
// Status Status `json:"status"` // 连线左侧的状态
// SourceStorageID string `json:"sourceStorageID"` // 源存储节点 ID
// TargetStorageID string `json:"targetStorageID"` // 目标存储节点 ID
// DataTransferCount string `json:"dataTransferCount"` // 数据传输量
// Timestamp time.Time `json:"timestamp"` // 变化结束时间戳
//}

+ 86
- 0
datamap/internal/models/object.go View File

@@ -0,0 +1,86 @@
package models

import (
"fmt"
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gorm.io/gorm"
)

type Object struct {
ObjectID cdssdk.ObjectID `gorm:"column:ObjectID; primaryKey; type:bigint; autoIncrement" json:"objectID"`
PackageID cdssdk.PackageID `gorm:"column:PackageID; type:bigint; not null" json:"packageID"`
Path string `gorm:"column:Path; type:varchar(1024); not null" json:"path"`
Size int64 `gorm:"column:Size; type:bigint; not null" json:"size"`
FileHash string `gorm:"column:FileHash; type:varchar(255); not null" json:"fileHash"`
Status Status `gorm:"column:Status; type:tinyint; not null" json:"status"`
FaultTolerance float64 `gorm:"column:faultTolerance; type:float; not null" json:"faultTolerance"`
Redundancy float64 `gorm:"column:redundancy; type:float; not null" json:"redundancy"`
AvgAccessCost float64 `gorm:"column:avgAccessCost; type:float; not null" json:"avgAccessCost"`
Timestamp time.Time `gorm:"column:Timestamp; type:datatime; not null" json:"timestamp"`
}

func (Object) TableName() string {
return "object"
}

// ObjectRepository 块传输记录的 Repository
type ObjectRepository struct {
repo *GormRepository
}

// NewObjectRepository 创建 ObjectRepository 实例
func NewObjectRepository(db *gorm.DB) *ObjectRepository {
return &ObjectRepository{repo: NewGormRepository(db)}
}

// CreateObject 创建块传输记录
func (r *ObjectRepository) CreateObject(object *Object) error {
return r.repo.Create(object)
}

// GetObjectByID 查询单个Object
func (r *ObjectRepository) GetObjectByID(objectID int64) (*Object, error) {
var object Object
query := "SELECT * FROM object WHERE ObjectID = ?"
err := r.repo.db.Raw(query, objectID).First(&object).Error
if err != nil {
return nil, err
}
return &object, nil
}

// UpdateObject 更新块传输记录
func (r *ObjectRepository) UpdateObject(object *Object) error {
return r.repo.Update(object)
}

// GetAllObjects 获取所有块传输记录
func (r *ObjectRepository) GetAllObjects() ([]Object, error) {
var objects []Object
err := r.repo.GetAll(&objects)
if err != nil {
return nil, err
}
return objects, nil
}

type ObjectWatcher struct {
Name string
}

func (w *ObjectWatcher) OnEvent(event sysevent.SysEvent) {

if event.Category == "objectChange" {
if _, ok := event.Body.(*stgmod.BodyNewOrUpdateObject); ok {

} else {
fmt.Printf("Watcher %s: Unexpected Body type, expected *ObjectInfo, got %T\n", w.Name, event.Body)
}
} else {
fmt.Printf("Watcher %s received an event with category %s\n", w.Name, event.Category)
}
}

+ 43
- 0
datamap/internal/models/repository.go View File

@@ -0,0 +1,43 @@
package models

import (
"gorm.io/gorm"
)

// Repository 通用接口
type Repository interface {
Create(value interface{}) error
Update(value interface{}) error
Delete(value interface{}, id uint) error
GetByID(id uint, out interface{}) error
GetAll(out interface{}) error
}

// GormRepository 基于 GORM 的通用实现
type GormRepository struct {
db *gorm.DB
}

func NewGormRepository(db *gorm.DB) *GormRepository {
return &GormRepository{db: db}
}

func (r *GormRepository) Create(value interface{}) error {
return r.db.Create(value).Error
}

func (r *GormRepository) Update(value interface{}) error {
return r.db.Save(value).Error
}

func (r *GormRepository) Delete(value interface{}, id uint) error {
return r.db.Delete(value, id).Error
}

func (r *GormRepository) GetByID(id uint, out interface{}) error {
return r.db.First(out, id).Error
}

func (r *GormRepository) GetAll(out interface{}) error {
return r.db.Find(out).Error
}

+ 51
- 0
datamap/internal/models/storageStats.go View File

@@ -0,0 +1,51 @@
package models

import (
"errors"
"fmt"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gorm.io/gorm"
"log"
)

type StorageStatsWatcher struct {
Name string
}

func (w *StorageStatsWatcher) OnEvent(event sysevent.SysEvent) {
repo := NewStorageRepository(DB)

if event.Category == "storageStats" {
if storageStats, ok := event.Body.(*stgmod.BodyStorageStats); ok {

storage, err := repo.GetStorageByID(int64(storageStats.StorageID))
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
// 插入新记录
newStorage := &Storage{
StorageID: storageStats.StorageID,
DataCount: storageStats.DataCount,
NewDataCount: 0,
}
repo.CreateStorage(newStorage)
} else {
log.Printf("Error querying storage: %v", err)
}
} else {
// 更新记录
newDataCount := storageStats.DataCount - storage.DataCount
storage.DataCount = storageStats.DataCount
storage.NewDataCount = newDataCount
err := repo.UpdateStorage(storage)
if err != nil {
log.Printf("Error update storage: %v", err)
}
}
} else {
fmt.Printf("Watcher %s: Unexpected Body type, expected *BodyStorageInfo, got %T\n", w.Name, event.Body)
}
} else {
fmt.Printf("Watcher %s received an event with category %s\n", w.Name, event.Category)
}
}

+ 110
- 0
datamap/internal/models/storageinfo.go View File

@@ -0,0 +1,110 @@
package models

import (
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gorm.io/gorm"
)

type Storage struct {
StorageID cdssdk.StorageID `gorm:"column:StorageID; primaryKey; type:bigint; autoIncrement" json:"storageID"`
StorageName string `gorm:"column:StorageName; type:varchar(1024); not null" json:"storageName"`
HubID cdssdk.HubID `gorm:"column:HubID; type:bigint; not null" json:"hubID"`
DataCount int64 `gorm:"column:DataCount; type:bigint; not null" json:"dataCount"`
NewDataCount int64 `gorm:"column:NewDataCount; type:bigint; not null" json:"newDataCount"`
Timestamp time.Time `gorm:"column:Timestamp; type:datatime; not null" json:"timestamp"`
}

func (Storage) TableName() string { return "storage" }

type StorageRepository struct {
repo *GormRepository
}

func NewStorageRepository(db *gorm.DB) *StorageRepository {
return &StorageRepository{repo: NewGormRepository(db)}
}

func (r *StorageRepository) CreateStorage(storage *Storage) error {
return r.repo.Create(storage)
}

func (r *StorageRepository) UpdateStorage(storage *Storage) error {
return r.repo.Update(storage)
}
func (r *StorageRepository) DeleteStorage(storage *Storage) error {
return r.repo.Delete(storage, uint(storage.StorageID))
}

func (r *StorageRepository) GetStorageByID(id int64) (*Storage, error) {
var storage Storage
err := r.repo.GetByID(uint(id), &storage)
if err != nil {
return nil, err
}
return &storage, nil
}

func (r *StorageRepository) GetStorageByHubID(hubId int64) (*Storage, error) {
var storage Storage
query := "SELECT * FROM storage WHERE HubID = ?"
err := r.repo.db.Raw(query, hubId).Scan(&storage).Error
if err != nil {
return nil, err
}
return &storage, nil
}

func (r *StorageRepository) GetAllStorages() ([]Storage, error) {
var storages []Storage
err := r.repo.GetAll(&storages)
if err != nil {
return nil, err
}
return storages, nil
}

type StorageInfoWatcher struct {
Name string
}

func (w *StorageInfoWatcher) OnEvent(event sysevent.SysEvent) {
repo := NewStorageRepository(DB)

switch body := event.Body.(type) {
case *stgmod.BodyNewStorage:
storage := &Storage{
StorageID: body.Info.StorageID,
StorageName: body.Info.Name,
HubID: body.Info.MasterHub,
Timestamp: time.Now(),
}
err := repo.CreateStorage(storage)
if err != nil {
return
}

case *stgmod.BodyStorageUpdated:
storage := &Storage{
StorageID: body.Info.StorageID,
StorageName: body.Info.Name,
HubID: body.Info.MasterHub,
Timestamp: time.Now(),
}
err := repo.UpdateStorage(storage)
if err != nil {
return
}
case *stgmod.BodyStorageDeleted:
storage := &Storage{
StorageID: body.StorageID,
}
err := repo.DeleteStorage(storage)
if err != nil {
return
}
}
}

+ 117
- 0
datamap/internal/mq/mq.go View File

@@ -0,0 +1,117 @@
package mq

import (
"fmt"
jsoniter "github.com/json-iterator/go"
"github.com/streadway/amqp"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/datamap/internal/config"
"gitlink.org.cn/cloudream/storage/datamap/internal/models"
"log"
)

func InitMQ(cfg config.RabbitMQConfig) (*amqp.Connection, error) {
conn, err := amqp.Dial(fmt.Sprintf("amqp://%s:%s@%s:%s/",
cfg.User, cfg.Password, cfg.Host, cfg.Port))
if err != nil {
return nil, err
}

// 启动队列监听
go listenQueues(conn)

return conn, nil
}

func listenQueues(conn *amqp.Connection) {
queues := []string{
"datamap_hubinfo",
"datamap_storageinfo",
"datamap_storagestats",
"datamap_hubtransferstats",
"datamap_hubstoragetransferstats",
"datamap_blocktransfer",
"datamap_blockdistribution",
}

for _, queue := range queues {
go func(q string) {
ch, err := conn.Channel()
if err != nil {
log.Printf("Failed to open channel for queue %s: %v", q, err)
return
}
defer ch.Close()

msgs, err := ch.Consume(q, "", true, false, false, false, nil)
if err != nil {
log.Printf("Failed to register consumer for queue %s: %v", q, err)
return
}

for msg := range msgs {
processMessage(q, msg.Body)
}
}(queue)
}
}

func processMessage(queue string, body []byte) {
switch queue {
case "datamap_hubinfo":
var data stgmod.HubInfo
if err := jsoniter.Unmarshal(body, &data); err != nil {
log.Printf("Failed to unmarshal HubInfo: %v, body: %s", err, body)
return
}
models.ProcessHubInfo(data)
case "datamap_storageinfo":
var data stgmod.StorageInfo
if err := jsoniter.Unmarshal(body, &data); err != nil {
log.Printf("Failed to unmarshal StorageInfo: %v, body: %s", err, body)
return
}
models.ProcessStorageInfo(data)
case "datamap_storagestats":
var data stgmod.StorageStats
if err := jsoniter.Unmarshal(body, &data); err != nil {
log.Printf("Failed to unmarshal StorageStats: %v, body: %s", err, body)
return
}
models.ProcessStorageStats(data)
case "datamap_hubtransferstats":
var data stgmod.HubTransferStats
err := jsoniter.Unmarshal(body, &data)
if err != nil {
log.Printf("Failed to unmarshal HubTransferStats: %v, body: %s", err, body)
return
}
models.ProcessHubTransfer(data)
case "datamap_hubstoragetransferstats":
var data stgmod.HubStorageTransferStats
err := jsoniter.Unmarshal(body, &data)
if err != nil {
log.Printf("Failed to unmarshal HubStorageTransferStats: %v, body: %s", err, body)
return
}
models.ProcessHubStorageTransfer(data)
case "datamap_blocktransfer":
var data stgmod.BlockTransfer
err := jsoniter.Unmarshal(body, &data)
if err != nil {
log.Printf("Failed to unmarshal BlockTransfer: %v, body: %s", err, body)
return
}
models.ProcessBlockTransfer(data)
case "datamap_blockdistribution":
var data stgmod.BlockDistribution
err := jsoniter.Unmarshal(body, &data)
if err != nil {
log.Printf("Failed to unmarshal BlockDistribution: %v, body: %s", err, body)
return
}
models.ProcessBlockDistribution(data)
default:
log.Printf("Unknown queue: %s", queue)
}
}

+ 23
- 0
datamap/internal/server/server.go View File

@@ -0,0 +1,23 @@
package server

import (
"github.com/gin-gonic/gin"
"github.com/streadway/amqp"
"gitlink.org.cn/cloudream/storage/datamap/internal/handlers"
"gorm.io/gorm"
"log"
)

func StartServer(db *gorm.DB, mq *amqp.Connection) {
r := gin.Default()

handlers.SetDB(db)
// 注册HTTP接口
r.GET("/hubInfo", handlers.GetHubInfo)
r.GET("/dataTransfer/:objectID", handlers.GetDataTransfer)

// 启动服务
if err := r.Run(":8080"); err != nil {
log.Fatalf("Failed to start server: %v", err)
}
}

+ 32
- 0
datamap/main.go View File

@@ -0,0 +1,32 @@
package main

import (
"gitlink.org.cn/cloudream/storage/datamap/internal/config"
"gitlink.org.cn/cloudream/storage/datamap/internal/db"
"gitlink.org.cn/cloudream/storage/datamap/internal/models"
"gitlink.org.cn/cloudream/storage/datamap/internal/mq"
"gitlink.org.cn/cloudream/storage/datamap/internal/server"
"log"
)

func main() {
// 加载配置
cfg := config.LoadConfig()

// 初始化数据库
dbConn, err := db.InitDB(cfg.Database)
if err != nil {
log.Fatalf("Failed to initialize database: %v", err)
}

models.InitDB(dbConn)

// 初始化RabbitMQ
mqConn, err := mq.InitMQ(cfg.RabbitMQ)
if err != nil {
log.Fatalf("Failed to initialize RabbitMQ: %v", err)
}

// 启动Gin服务
server.StartServer(dbConn, mqConn)
}

+ 32
- 19
go.mod View File

@@ -11,7 +11,8 @@ require (
github.com/aws/aws-sdk-go-v2/credentials v1.17.47
github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0
github.com/gin-gonic/gin v1.7.7
github.com/go-sql-driver/mysql v1.7.1
github.com/go-co-op/gocron/v2 v2.15.0
github.com/go-sql-driver/mysql v1.8.1
github.com/hashicorp/golang-lru/v2 v2.0.5
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.131
@@ -22,13 +23,16 @@ require (
github.com/samber/lo v1.38.1
github.com/smartystreets/goconvey v1.8.1
github.com/spf13/cobra v1.8.0
github.com/spf13/viper v1.19.0
gitlink.org.cn/cloudream/common v0.0.0
google.golang.org/grpc v1.57.0
google.golang.org/protobuf v1.31.0
gorm.io/gorm v1.25.7
golang.org/x/sync v0.7.0
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.33.0
gorm.io/gorm v1.25.11
)

require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect
@@ -38,11 +42,30 @@ require (
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect
github.com/aws/smithy-go v1.22.1 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jonboulle/clockwork v0.4.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.6.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tjfoc/gmsm v1.4.1 // indirect
go.mongodb.org/mongo-driver v1.12.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
@@ -58,13 +81,12 @@ require (
github.com/go-playground/validator/v10 v10.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/json-iterator/go v1.1.12
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
@@ -80,20 +102,11 @@ require (
github.com/streadway/amqp v1.1.0
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/zyedidia/generic v1.2.1 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.etcd.io/etcd/api/v3 v3.5.12 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect
go.etcd.io/etcd/client/v3 v3.5.12 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.23.0 // indirect
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect
golang.org/x/net v0.21.0 // indirect
golang.org/x/sync v0.1.0
golang.org/x/sys v0.20.0 // indirect
golang.org/x/text v0.15.0 // indirect
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
gorm.io/driver/mysql v1.5.7
)

+ 78
- 33
go.sum View File

@@ -1,4 +1,6 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/antonfisher/nested-logrus-formatter v1.3.1 h1:NFJIr+pzwv5QLHTPyKz9UMEoHck02Q9L0FP13b/xSbQ=
github.com/antonfisher/nested-logrus-formatter v1.3.1/go.mod h1:6WTfyWFkBc9+zyBaKIqRrg/KwMqBbodBjgbHjDz7zjA=
@@ -37,15 +39,22 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs=
github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U=
github.com/go-co-op/gocron/v2 v2.15.0 h1:Kpvo71VSihE+RImmpA+3ta5CcMhoRzMGw4dJawrj4zo=
github.com/go-co-op/gocron/v2 v2.15.0/go.mod h1:ZF70ZwEqz0OO4RBXE1sNxnANy/zvwLcattWEFsqpKig=
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
@@ -56,8 +65,8 @@ github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn
github.com/go-playground/validator/v10 v10.8.0 h1:1kAa0fCrnpv+QYdkdcRzrRM7AyYs5o8+jZdJCz9xj6k=
github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@@ -82,11 +91,11 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -96,6 +105,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4=
github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible h1:XQVXdk+WAJ4fSNB6mMRuYNvFWou7BZs6SZB925hPrnk=
github.com/huaweicloud/huaweicloud-sdk-go-obs v3.24.9+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s=
github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.1.131 h1:34E2+lzM/yi0GlYAEQEUuf4/3mAoAadA+7oaq9q3Mys=
@@ -112,6 +123,8 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4=
github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
@@ -124,12 +137,18 @@ github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZX
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY=
github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@@ -144,15 +163,26 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM=
github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA=
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
@@ -161,15 +191,24 @@ github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGB
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM=
github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -178,8 +217,12 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
@@ -195,18 +238,18 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc=
github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c=
go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A=
go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg=
go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw=
go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE=
go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
@@ -222,8 +265,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 h1:3xJIFvzUFbu4ls0BTBYcgbCGhA63eAOEMxIHugyXJqA=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -245,8 +288,9 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -254,8 +298,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -311,18 +356,16 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M=
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9 h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=
google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ=
google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -331,10 +374,11 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -346,7 +390,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.5.7 h1:MndhOPYOfEp2rHKgkZIhJ16eVUIRf2HmzgoPmh7FCWo=
gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM=
gorm.io/gorm v1.25.7 h1:VsD6acwRjz2zFxGO50gPO6AkNs7KKnvfzUjHQhZDz/A=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg=
gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

+ 264
- 30
scanner/internal/event/check_package_redundancy.go View File

@@ -483,6 +483,7 @@ func (t *CheckPackageRedundancy) noneToRep(ctx ExecuteContext, obj stgmod.Object
}

var blocks []stgmod.ObjectBlock
var blockChgs []stgmod.BlockChange
for i, stg := range uploadStgs {
blocks = append(blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
@@ -490,8 +491,26 @@ func (t *CheckPackageRedundancy) noneToRep(ctx ExecuteContext, obj stgmod.Object
StorageID: stg.Storage.Storage.StorageID,
FileHash: ret[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
})
blockChgs = append(blockChgs, &stgmod.BlockChangeClone{
BlockType: stgmod.BlockTypeRaw,
SourceStorageID: obj.Blocks[0].StorageID,
TargetStorageID: stg.Storage.Storage.StorageID,
TransferBytes: 1,
})
}

// 删除原本的文件块
blockChgs = append(blockChgs, &stgmod.BlockChangeDeleted{
Index: 0,
StorageID: obj.Blocks[0].StorageID,
})

ctx.Args.EvtPub.Publish(&stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: blockChgs,
})

return &coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: red,
@@ -532,6 +551,8 @@ func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectD
}

var blocks []stgmod.ObjectBlock
var evtTargetBlocks []stgmod.Block
var evtBlockTrans []stgmod.DataTransfer
for i := 0; i < red.N; i++ {
blocks = append(blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
@@ -539,8 +560,39 @@ func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectD
StorageID: uploadStgs[i].Storage.Storage.StorageID,
FileHash: ioRet[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
})
evtTargetBlocks = append(evtTargetBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeEC,
Index: i,
StorageID: uploadStgs[i].Storage.Storage.StorageID,
})
evtBlockTrans = append(evtBlockTrans, stgmod.DataTransfer{
SourceStorageID: obj.Blocks[0].StorageID,
TargetStorageID: uploadStgs[i].Storage.Storage.StorageID,
TransferBytes: 1,
})
}

ctx.Args.EvtPub.Publish(&stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: []stgmod.BlockChange{
&stgmod.BlockChangeEnDecode{
SourceBlocks: []stgmod.Block{{
BlockType: stgmod.BlockTypeRaw,
StorageID: obj.Blocks[0].StorageID,
}},
TargetBlocks: evtTargetBlocks,
DataTransfers: evtBlockTrans,
},

// 删除原本的文件块
&stgmod.BlockChangeDeleted{
Index: 0,
StorageID: obj.Blocks[0].StorageID,
},
},
})

return &coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: red,
@@ -548,7 +600,7 @@ func (t *CheckPackageRedundancy) noneToEC(ctx ExecuteContext, obj stgmod.ObjectD
}, nil
}

func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, uploadStorages []*StorageLoadInfo, allStgs map[cdssdk.StorageID]*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.ObjectDetail, red *cdssdk.LRCRedundancy, uploadStgs []*StorageLoadInfo, allStgs map[cdssdk.StorageID]*StorageLoadInfo) (*coormq.UpdatingObjectRedundancy, error) {
if len(obj.Blocks) == 0 {
return nil, fmt.Errorf("object is not cached on any storages, cannot change its redundancy to ec")
}
@@ -563,7 +615,7 @@ func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.Object

var toes []ioswitchlrc.To
for i := 0; i < red.N; i++ {
toes = append(toes, ioswitchlrc.NewToStorage(*uploadStorages[i].Storage.MasterHub, uploadStorages[i].Storage.Storage, i, fmt.Sprintf("%d", i)))
toes = append(toes, ioswitchlrc.NewToStorage(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage.Storage, i, fmt.Sprintf("%d", i)))
}

plans := exec.NewPlanBuilder()
@@ -580,15 +632,48 @@ func (t *CheckPackageRedundancy) noneToLRC(ctx ExecuteContext, obj stgmod.Object
}

var blocks []stgmod.ObjectBlock
var evtTargetBlocks []stgmod.Block
var evtBlockTrans []stgmod.DataTransfer
for i := 0; i < red.N; i++ {
blocks = append(blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
Index: i,
StorageID: uploadStorages[i].Storage.Storage.StorageID,
StorageID: uploadStgs[i].Storage.Storage.StorageID,
FileHash: ioRet[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
})
evtTargetBlocks = append(evtTargetBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeEC,
Index: i,
StorageID: uploadStgs[i].Storage.Storage.StorageID,
})
evtBlockTrans = append(evtBlockTrans, stgmod.DataTransfer{
SourceStorageID: obj.Blocks[0].StorageID,
TargetStorageID: uploadStgs[i].Storage.Storage.StorageID,
TransferBytes: 1,
})
}

ctx.Args.EvtPub.Publish(&stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: []stgmod.BlockChange{
&stgmod.BlockChangeEnDecode{
SourceBlocks: []stgmod.Block{{
BlockType: stgmod.BlockTypeRaw,
StorageID: obj.Blocks[0].StorageID,
}},
TargetBlocks: evtTargetBlocks,
DataTransfers: evtBlockTrans,
},

// 删除原本的文件块
&stgmod.BlockChangeDeleted{
Index: 0,
StorageID: obj.Blocks[0].StorageID,
},
},
})

return &coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: red,
@@ -634,6 +719,8 @@ func (t *CheckPackageRedundancy) noneToSeg(ctx ExecuteContext, obj stgmod.Object
}

var blocks []stgmod.ObjectBlock
var evtTargetBlocks []stgmod.Block
var evtBlockTrans []stgmod.DataTransfer
for i, stg := range uploadStgs {
blocks = append(blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
@@ -641,8 +728,39 @@ func (t *CheckPackageRedundancy) noneToSeg(ctx ExecuteContext, obj stgmod.Object
StorageID: stg.Storage.Storage.StorageID,
FileHash: ret[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
})
evtTargetBlocks = append(evtTargetBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeSegment,
Index: i,
StorageID: uploadStgs[i].Storage.Storage.StorageID,
})
evtBlockTrans = append(evtBlockTrans, stgmod.DataTransfer{
SourceStorageID: obj.Blocks[0].StorageID,
TargetStorageID: uploadStgs[i].Storage.Storage.StorageID,
TransferBytes: 1,
})
}

ctx.Args.EvtPub.Publish(&stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: []stgmod.BlockChange{
&stgmod.BlockChangeEnDecode{
SourceBlocks: []stgmod.Block{{
BlockType: stgmod.BlockTypeRaw,
StorageID: obj.Blocks[0].StorageID,
}},
TargetBlocks: evtTargetBlocks,
DataTransfers: evtBlockTrans,
},

// 删除原本的文件块
&stgmod.BlockChangeDeleted{
Index: 0,
StorageID: obj.Blocks[0].StorageID,
},
},
})

return &coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: red,
@@ -687,6 +805,7 @@ func (t *CheckPackageRedundancy) repToRep(ctx ExecuteContext, obj stgmod.ObjectD
}

var blocks []stgmod.ObjectBlock
var blockChgs []stgmod.BlockChange
for i, stg := range uploadStgs {
blocks = append(blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
@@ -694,8 +813,26 @@ func (t *CheckPackageRedundancy) repToRep(ctx ExecuteContext, obj stgmod.ObjectD
StorageID: stg.Storage.Storage.StorageID,
FileHash: ret[fmt.Sprintf("%d", i)].(*ops2.FileHashValue).Hash,
})
blockChgs = append(blockChgs, &stgmod.BlockChangeClone{
BlockType: stgmod.BlockTypeRaw,
SourceStorageID: obj.Blocks[0].StorageID,
TargetStorageID: stg.Storage.Storage.StorageID,
TransferBytes: 1,
})
}

// 删除原本的文件块
blockChgs = append(blockChgs, &stgmod.BlockChangeDeleted{
Index: 0,
StorageID: obj.Blocks[0].StorageID,
})

ctx.Args.EvtPub.Publish(&stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: blockChgs,
})

return &coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: red,
@@ -739,26 +876,21 @@ func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDe
// 如果选择的备份节点都是同一个,那么就只要上传一次
uploadStgs = lo.UniqBy(uploadStgs, func(item *StorageLoadInfo) cdssdk.StorageID { return item.Storage.Storage.StorageID })

// 每个被选节点都在自己节点上重建原始数据
planBlder := exec.NewPlanBuilder()
for i := range uploadStgs {
ft := ioswitch2.NewFromTo()
ft.ECParam = srcRed
ft := ioswitch2.NewFromTo()
ft.ECParam = srcRed

for i2, block := range chosenBlocks {
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2], ioswitch2.ECStream(block.Index)))
}
for i, block := range chosenBlocks {
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i].MasterHub, chosenBlockStg[i], ioswitch2.ECStream(block.Index)))
}

len := obj.Object.Size
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.RawStream(), fmt.Sprintf("%d", i), math2.Range{
Offset: 0,
Length: &len,
}))
for i := range uploadStgs {
ft.AddTo(ioswitch2.NewToShardStoreWithRange(*uploadStgs[i].Storage.MasterHub, uploadStgs[i].Storage, ioswitch2.RawStream(), fmt.Sprintf("%d", i), math2.NewRange(0, obj.Object.Size)))
}

err := parser.Parse(ft, planBlder)
if err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}
err := parser.Parse(ft, planBlder)
if err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}

// TODO 添加依赖
@@ -770,6 +902,7 @@ func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDe
}

var blocks []stgmod.ObjectBlock

for i := range uploadStgs {
blocks = append(blocks, stgmod.ObjectBlock{
ObjectID: obj.Object.ObjectID,
@@ -779,6 +912,55 @@ func (t *CheckPackageRedundancy) ecToRep(ctx ExecuteContext, obj stgmod.ObjectDe
})
}

var evtSrcBlocks []stgmod.Block
var evtTargetBlocks []stgmod.Block
for i2, block := range chosenBlocks {
evtSrcBlocks = append(evtSrcBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeEC,
Index: block.Index,
StorageID: chosenBlockStg[i2].Storage.StorageID,
})
}

for _, stg := range uploadStgs {
evtTargetBlocks = append(evtTargetBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeRaw,
Index: 0,
StorageID: stg.Storage.Storage.StorageID,
})
}

var evtBlockTrans []stgmod.DataTransfer
for _, stg := range uploadStgs {
for i2 := range chosenBlocks {
evtBlockTrans = append(evtBlockTrans, stgmod.DataTransfer{
SourceStorageID: chosenBlockStg[i2].Storage.StorageID,
TargetStorageID: stg.Storage.Storage.StorageID,
TransferBytes: 1,
})
}
}

var blockChgs []stgmod.BlockChange
blockChgs = append(blockChgs, &stgmod.BlockChangeEnDecode{
SourceBlocks: evtSrcBlocks,
TargetBlocks: evtTargetBlocks,
DataTransfers: evtBlockTrans,
})

for _, block := range obj.Blocks {
blockChgs = append(blockChgs, &stgmod.BlockChangeDeleted{
Index: block.Index,
StorageID: block.StorageID,
})
}

ctx.Args.EvtPub.Publish(&stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: blockChgs,
})

return &coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: tarRed,
@@ -817,6 +999,22 @@ func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDet
// 目前EC的参数都相同,所以可以不用重建出完整数据然后再分块,可以直接构建出目的节点需要的块
planBlder := exec.NewPlanBuilder()

var evtSrcBlocks []stgmod.Block
var evtTargetBlocks []stgmod.Block

ft := ioswitch2.NewFromTo()
ft.ECParam = srcRed

for i, block := range chosenBlocks {
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i].MasterHub, chosenBlockStg[i], ioswitch2.ECStream(block.Index)))

evtSrcBlocks = append(evtSrcBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeEC,
Index: block.Index,
StorageID: chosenBlockStg[i].Storage.StorageID,
})
}

var newBlocks []stgmod.ObjectBlock
shouldUpdateBlocks := false
for i, stg := range uploadStorages {
@@ -838,24 +1036,23 @@ func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDet
shouldUpdateBlocks = true

// 否则就要重建出这个节点需要的块

ft := ioswitch2.NewFromTo()
ft.ECParam = srcRed
for i2, block := range chosenBlocks {
ft.AddFrom(ioswitch2.NewFromShardstore(block.FileHash, *chosenBlockStg[i2].MasterHub, chosenBlockStg[i2], ioswitch2.ECStream(block.Index)))
}

// 输出只需要自己要保存的那一块
ft.AddTo(ioswitch2.NewToShardStore(*stg.Storage.MasterHub, stg.Storage, ioswitch2.ECStream(i), fmt.Sprintf("%d", i)))

err := parser.Parse(ft, planBlder)
if err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}
evtTargetBlocks = append(evtTargetBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeEC,
Index: i,
StorageID: stg.Storage.Storage.StorageID,
})

newBlocks = append(newBlocks, newBlock)
}

err := parser.Parse(ft, planBlder)
if err != nil {
return nil, fmt.Errorf("parsing plan: %w", err)
}

// 如果没有任何Plan,Wait会直接返回成功
execCtx := exec.NewExecContext()
exec.SetValueByType(execCtx, ctx.Args.StgMgr)
@@ -877,6 +1074,41 @@ func (t *CheckPackageRedundancy) ecToEC(ctx ExecuteContext, obj stgmod.ObjectDet
newBlocks[idx].FileHash = v.(*ops2.FileHashValue).Hash
}

var evtBlockTrans []stgmod.DataTransfer
for _, src := range evtSrcBlocks {
for _, tar := range evtTargetBlocks {
evtBlockTrans = append(evtBlockTrans, stgmod.DataTransfer{
SourceStorageID: src.StorageID,
TargetStorageID: tar.StorageID,
TransferBytes: 1,
})
}
}

var blockChgs []stgmod.BlockChange
for _, block := range obj.Blocks {
keep := lo.ContainsBy(newBlocks, func(newBlock stgmod.ObjectBlock) bool {
return newBlock.Index == block.Index && newBlock.StorageID == block.StorageID
})
if !keep {
blockChgs = append(blockChgs, &stgmod.BlockChangeDeleted{
Index: block.Index,
StorageID: block.StorageID,
})
}
}
blockChgs = append(blockChgs, &stgmod.BlockChangeEnDecode{
SourceBlocks: evtSrcBlocks,
TargetBlocks: evtTargetBlocks,
DataTransfers: evtBlockTrans,
})

ctx.Args.EvtPub.Publish(&stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: blockChgs,
})

return &coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: tarRed,
@@ -915,6 +1147,8 @@ func (t *CheckPackageRedundancy) lrcToLRC(ctx ExecuteContext, obj stgmod.ObjectD
}
}

// TODO 产生BlockTransfer事件

if canGroupReconstruct {
// return t.groupReconstructLRC(obj, lostBlocks, lostBlockGrps, blocksGrpByIndex, srcRed, uploadStorages)
}


+ 176
- 16
scanner/internal/event/clean_pinned.go View File

@@ -117,6 +117,8 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
planBld := exec.NewPlanBuilder()
planningStgIDs := make(map[cdssdk.StorageID]bool)

var sysEvents []stgmod.SysEventBody

// 对于rep对象,统计出所有对象块分布最多的两个节点,用这两个节点代表所有rep对象块的分布,去进行退火算法
var repObjectsUpdating []coormq.UpdatingObjectRedundancy
repMostHubIDs := t.summaryRepObjectBlockNodes(repObjects)
@@ -128,6 +130,7 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
})
for _, obj := range repObjects {
repObjectsUpdating = append(repObjectsUpdating, t.makePlansForRepObject(allStgInfos, solu, obj, planBld, planningStgIDs))
sysEvents = append(sysEvents, t.generateSysEventForRepObject(solu, obj)...)
}

// 对于ec对象,则每个对象单独进行退火算法
@@ -141,6 +144,7 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
blocks: obj.Blocks,
})
ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(allStgInfos, solu, obj, planBld, planningStgIDs))
sysEvents = append(sysEvents, t.generateSysEventForECObject(solu, obj)...)
}

ioSwRets, err := t.executePlans(execCtx, planBld, planningStgIDs)
@@ -161,6 +165,10 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
log.Warnf("changing object redundancy: %s", err.Error())
return
}

for _, e := range sysEvents {
execCtx.Args.EvtPub.Publish(e)
}
}
}

@@ -227,9 +235,12 @@ type annealingState struct {
maxScore float64 // 搜索过程中得到过的最大分数
maxScoreRmBlocks []bool // 最大分数对应的删除方案

rmBlocks []bool // 当前删除方案
inversedIndex int // 当前删除方案是从上一次的方案改动哪个flag而来的
lastScore float64 // 上一次方案的分数
rmBlocks []bool // 当前删除方案
inversedIndex int // 当前删除方案是从上一次的方案改动哪个flag而来的
lastDisasterTolerance float64 // 上一次方案的容灾度
lastSpaceCost float64 // 上一次方案的冗余度
lastMinAccessCost float64 // 上一次方案的最小访问费用
lastScore float64 // 上一次方案的分数
}

type objectBlock struct {
@@ -464,8 +475,11 @@ type combinatorialTreeNode struct {
}

type annealingSolution struct {
blockList []objectBlock // 所有节点的块分布情况
rmBlocks []bool // 要删除哪些块
blockList []objectBlock // 所有节点的块分布情况
rmBlocks []bool // 要删除哪些块
disasterTolerance float64 // 本方案的容灾度
spaceCost float64 // 本方案的冗余度
minAccessCost float64 // 本方案的最小访问费用
}

func (t *CleanPinned) startAnnealing(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, readerStgIDs []cdssdk.StorageID, object annealingObject) annealingSolution {
@@ -529,8 +543,11 @@ func (t *CleanPinned) startAnnealing(allStgInfos map[cdssdk.StorageID]*stgmod.St
}
// fmt.Printf("final: %v\n", state.maxScoreRmBlocks)
return annealingSolution{
blockList: state.blockList,
rmBlocks: state.maxScoreRmBlocks,
blockList: state.blockList,
rmBlocks: state.maxScoreRmBlocks,
disasterTolerance: state.lastDisasterTolerance,
spaceCost: state.lastSpaceCost,
minAccessCost: state.lastMinAccessCost,
}
}

@@ -640,6 +657,10 @@ func (t *CleanPinned) calcScore(state *annealingState) float64 {
ac := t.calcMinAccessCost(state)
sc := t.calcSpaceCost(state)

state.lastDisasterTolerance = dt
state.lastMinAccessCost = ac
state.lastSpaceCost = sc

dtSc := 1.0
if dt < 1 {
dtSc = 0
@@ -730,6 +751,11 @@ func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*st
Redundancy: obj.Object.Redundancy,
}

ft := ioswitch2.NewFromTo()

fromStg := allStgInfos[obj.Blocks[0].StorageID]
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg.MasterHub, *fromStg, ioswitch2.RawStream()))

for i, f := range solu.rmBlocks {
hasCache := lo.ContainsBy(obj.Blocks, func(b stgmod.ObjectBlock) bool { return b.StorageID == solu.blockList[i].StorageID }) ||
lo.ContainsBy(obj.PinnedAt, func(n cdssdk.StorageID) bool { return n == solu.blockList[i].StorageID })
@@ -738,18 +764,9 @@ func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*st
if !willRm {
// 如果对象在退火后要保留副本的节点没有副本,则需要在这个节点创建副本
if !hasCache {
ft := ioswitch2.NewFromTo()

fromStg := allStgInfos[obj.Blocks[0].StorageID]
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *fromStg.MasterHub, *fromStg, ioswitch2.RawStream()))
toStg := allStgInfos[solu.blockList[i].StorageID]
ft.AddTo(ioswitch2.NewToShardStore(*toStg.MasterHub, *toStg, ioswitch2.RawStream(), fmt.Sprintf("%d.0", obj.Object.ObjectID)))

err := parser.Parse(ft, planBld)
if err != nil {
// TODO 错误处理
continue
}
planningHubIDs[solu.blockList[i].StorageID] = true
}
entry.Blocks = append(entry.Blocks, stgmod.ObjectBlock{
@@ -761,9 +778,72 @@ func (t *CleanPinned) makePlansForRepObject(allStgInfos map[cdssdk.StorageID]*st
}
}

err := parser.Parse(ft, planBld)
if err != nil {
// TODO 错误处理
}

return entry
}

func (t *CleanPinned) generateSysEventForRepObject(solu annealingSolution, obj stgmod.ObjectDetail) []stgmod.SysEventBody {
var blockChgs []stgmod.BlockChange

for i, f := range solu.rmBlocks {
hasCache := lo.ContainsBy(obj.Blocks, func(b stgmod.ObjectBlock) bool { return b.StorageID == solu.blockList[i].StorageID }) ||
lo.ContainsBy(obj.PinnedAt, func(n cdssdk.StorageID) bool { return n == solu.blockList[i].StorageID })
willRm := f

if !willRm {
// 如果对象在退火后要保留副本的节点没有副本,则需要在这个节点创建副本
if !hasCache {
blockChgs = append(blockChgs, &stgmod.BlockChangeClone{
BlockType: stgmod.BlockTypeRaw,
SourceStorageID: obj.Blocks[0].StorageID,
TargetStorageID: solu.blockList[i].StorageID,
})
}
} else {
blockChgs = append(blockChgs, &stgmod.BlockChangeDeleted{
Index: 0,
StorageID: solu.blockList[i].StorageID,
})
}
}

transEvt := &stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: blockChgs,
}

var blockDist []stgmod.BlockDistributionObjectInfo
for i, f := range solu.rmBlocks {
if !f {
blockDist = append(blockDist, stgmod.BlockDistributionObjectInfo{
BlockType: stgmod.BlockTypeRaw,
Index: 0,
StorageID: solu.blockList[i].StorageID,
})
}
}

distEvt := &stgmod.BodyBlockDistribution{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
Path: obj.Object.Path,
Size: obj.Object.Size,
FileHash: obj.Object.FileHash,
FaultTolerance: solu.disasterTolerance,
Redundancy: solu.spaceCost,
AvgAccessCost: 0, // TODO 计算平均访问代价,从日常访问数据中统计
BlockDistribution: blockDist,
// TODO 不好计算传输量
}

return []stgmod.SysEventBody{transEvt, distEvt}
}

func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stgmod.StorageDetail, solu annealingSolution, obj stgmod.ObjectDetail, planBld *exec.PlanBuilder, planningHubIDs map[cdssdk.StorageID]bool) coormq.UpdatingObjectRedundancy {
entry := coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
@@ -797,6 +877,7 @@ func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stg
ecRed := obj.Object.Redundancy.(*cdssdk.ECRedundancy)

for id, idxs := range reconstrct {
// 依次生成每个节点上的执行计划,因为如果放到一个计划里一起生成,不能保证每个节点上的块用的都是本节点上的副本
ft := ioswitch2.NewFromTo()
ft.ECParam = ecRed
ft.AddFrom(ioswitch2.NewFromShardstore(obj.Object.FileHash, *allStgInfos[id].MasterHub, *allStgInfos[id], ioswitch2.RawStream()))
@@ -816,6 +897,85 @@ func (t *CleanPinned) makePlansForECObject(allStgInfos map[cdssdk.StorageID]*stg
return entry
}

func (t *CleanPinned) generateSysEventForECObject(solu annealingSolution, obj stgmod.ObjectDetail) []stgmod.SysEventBody {
var blockChgs []stgmod.BlockChange

reconstrct := make(map[cdssdk.StorageID]*[]int)
for i, f := range solu.rmBlocks {
block := solu.blockList[i]
if !f {
// 如果这个块是影子块,那么就要从完整对象里重建这个块
if !block.HasEntity {
re, ok := reconstrct[block.StorageID]
if !ok {
re = &[]int{}
reconstrct[block.StorageID] = re
}

*re = append(*re, block.Index)
}
} else {
blockChgs = append(blockChgs, &stgmod.BlockChangeDeleted{
Index: block.Index,
StorageID: block.StorageID,
})
}
}

// 由于每一个需要被重建的块都是从同中心的副本里构建出来的,所以对于每一个中心都要产生一个BlockChangeEnDecode
for id, idxs := range reconstrct {
var tarBlocks []stgmod.Block
for _, idx := range *idxs {
tarBlocks = append(tarBlocks, stgmod.Block{
BlockType: stgmod.BlockTypeEC,
Index: idx,
StorageID: id,
})
}
blockChgs = append(blockChgs, &stgmod.BlockChangeEnDecode{
SourceBlocks: []stgmod.Block{{
BlockType: stgmod.BlockTypeRaw,
Index: 0,
StorageID: id, // 影子块的原始对象就在同一个节点上
}},
TargetBlocks: tarBlocks,
// 传输量为0
})
}

transEvt := &stgmod.BodyBlockTransfer{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
BlockChanges: blockChgs,
}

var blockDist []stgmod.BlockDistributionObjectInfo
for i, f := range solu.rmBlocks {
if !f {
blockDist = append(blockDist, stgmod.BlockDistributionObjectInfo{
BlockType: stgmod.BlockTypeEC,
Index: solu.blockList[i].Index,
StorageID: solu.blockList[i].StorageID,
})
}
}

distEvt := &stgmod.BodyBlockDistribution{
ObjectID: obj.Object.ObjectID,
PackageID: obj.Object.PackageID,
Path: obj.Object.Path,
Size: obj.Object.Size,
FileHash: obj.Object.FileHash,
FaultTolerance: solu.disasterTolerance,
Redundancy: solu.spaceCost,
AvgAccessCost: 0, // TODO 计算平均访问代价,从日常访问数据中统计
BlockDistribution: blockDist,
// TODO 不好计算传输量
}

return []stgmod.SysEventBody{transEvt, distEvt}
}

func (t *CleanPinned) executePlans(ctx ExecuteContext, planBld *exec.PlanBuilder, planningStgIDs map[cdssdk.StorageID]bool) (map[string]exec.VarValue, error) {
// 统一加锁,有重复也没关系
lockBld := reqbuilder.NewBuilder()


+ 4
- 1
scanner/internal/event/event.go View File

@@ -10,12 +10,14 @@ import (
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
scevt "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner/event"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
)

type ExecuteArgs struct {
DB *db2.DB
DistLock *distlock.Service
StgMgr *agtpool.AgentPool
EvtPub *sysevent.Publisher
}

type Executor = event.Executor[ExecuteArgs]
@@ -26,11 +28,12 @@ type Event = event.Event[ExecuteArgs]

type ExecuteOption = event.ExecuteOption

func NewExecutor(db *db2.DB, distLock *distlock.Service, stgAgts *agtpool.AgentPool) Executor {
func NewExecutor(db *db2.DB, distLock *distlock.Service, stgAgts *agtpool.AgentPool, evtPub *sysevent.Publisher) Executor {
return event.NewExecutor(ExecuteArgs{
DB: db,
DistLock: distLock,
StgMgr: stgAgts,
EvtPub: evtPub,
})
}



+ 47
- 1
scanner/main.go View File

@@ -1,16 +1,19 @@
package main

import (
"context"
"fmt"
"os"

"gitlink.org.cn/cloudream/common/pkgs/logger"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
stgmod "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/db2"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
scmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner"
"gitlink.org.cn/cloudream/storage/common/pkgs/storage/agtpool"
"gitlink.org.cn/cloudream/storage/common/pkgs/sysevent"
"gitlink.org.cn/cloudream/storage/scanner/internal/config"
"gitlink.org.cn/cloudream/storage/scanner/internal/event"
"gitlink.org.cn/cloudream/storage/scanner/internal/mq"
@@ -50,8 +53,16 @@ func main() {
// 启动存储服务管理器
stgAgts := agtpool.NewPool()

// 初始化系统事件发布器
evtPub, err := sysevent.NewPublisher(sysevent.ConfigFromMQConfig(config.Cfg().RabbitMQ), &stgmod.SourceScanner{})
if err != nil {
logger.Errorf("new sysevent publisher: %v", err)
os.Exit(1)
}
go servePublisher(evtPub)

// 启动事件执行器
eventExecutor := event.NewExecutor(db, distlockSvc, stgAgts)
eventExecutor := event.NewExecutor(db, distlockSvc, stgAgts, evtPub)
go serveEventExecutor(&eventExecutor)

agtSvr, err := scmq.NewServer(mq.NewService(&eventExecutor), config.Cfg().RabbitMQ)
@@ -88,6 +99,41 @@ func serveEventExecutor(executor *event.Executor) {
os.Exit(1)
}

func servePublisher(evtPub *sysevent.Publisher) {
logger.Info("start serving sysevent publisher")

ch := evtPub.Start()

loop:
for {
val, err := ch.Receive().Wait(context.Background())
if err != nil {
logger.Errorf("sysevent publisher stopped with error: %s", err.Error())
break
}

switch val := val.(type) {
case sysevent.PublishError:
logger.Errorf("publishing event: %v", val)

case sysevent.PublisherExited:
if val.Err != nil {
logger.Errorf("publisher exited with error: %v", val.Err)
} else {
logger.Info("publisher exited")
}
break loop

case sysevent.OtherError:
logger.Errorf("sysevent: %v", val)
}
}
logger.Info("sysevent publisher stopped")

// TODO 仅简单结束了程序
os.Exit(1)
}

func serveScannerServer(server *scmq.Server) {
logger.Info("start serving scanner server")



Loading…
Cancel
Save