Browse Source

Merge pull request '修复测试后发现的问题' (#22) from feature_gxh into master

gitlink
Sydonian 1 year ago
parent
commit
aa4f41c498
51 changed files with 1171 additions and 546 deletions
  1. +2
    -0
      agent/internal/config/config.go
  2. +11
    -0
      agent/internal/grpc/ping.go
  3. +54
    -9
      agent/internal/task/create_package.go
  4. +8
    -5
      agent/internal/task/task.go
  5. +39
    -3
      agent/main.go
  6. +3
    -3
      client/internal/cmdline/bucket.go
  7. +6
    -1
      client/internal/cmdline/cache.go
  8. +5
    -0
      client/internal/cmdline/commandline.go
  9. +63
    -0
      client/internal/cmdline/object.go
  10. +18
    -101
      client/internal/cmdline/package.go
  11. +12
    -2
      client/internal/cmdline/storage.go
  12. +8
    -6
      client/internal/config/config.go
  13. +61
    -0
      client/internal/http/bucket.go
  14. +1
    -1
      client/internal/http/cache.go
  15. +51
    -1
      client/internal/http/object.go
  16. +11
    -58
      client/internal/http/package.go
  17. +15
    -11
      client/internal/http/server.go
  18. +1
    -1
      client/internal/http/storage.go
  19. +17
    -0
      client/internal/services/object.go
  20. +15
    -31
      client/internal/services/package.go
  21. +0
    -35
      client/internal/task/create_package.go
  22. +6
    -3
      client/internal/task/task.go
  23. +0
    -36
      client/internal/task/update_package.go
  24. +37
    -0
      client/internal/task/upload_objects.go
  25. +6
    -1
      client/main.go
  26. +3
    -0
      common/assets/confs/agent.config.json
  27. +3
    -0
      common/assets/confs/client.config.json
  28. +9
    -6
      common/assets/scripts/create_database.sql
  29. +0
    -88
      common/pkgs/cmd/update_package.go
  30. +38
    -25
      common/pkgs/cmd/upload_objects.go
  31. +219
    -0
      common/pkgs/connectivity/collector.go
  32. +5
    -0
      common/pkgs/connectivity/config.go
  33. +11
    -0
      common/pkgs/db/cache.go
  34. +3
    -11
      common/pkgs/db/model/model.go
  35. +40
    -0
      common/pkgs/db/node_connectivity.go
  36. +37
    -60
      common/pkgs/db/object.go
  37. +12
    -0
      common/pkgs/db/object_block.go
  38. +17
    -1
      common/pkgs/db/pinned_object.go
  39. +2
    -2
      common/pkgs/db/utils.go
  40. +136
    -28
      common/pkgs/grpc/agent/agent.pb.go
  41. +5
    -0
      common/pkgs/grpc/agent/agent.proto
  42. +39
    -1
      common/pkgs/grpc/agent/agent_grpc.pb.go
  43. +5
    -0
      common/pkgs/grpc/agent/client.go
  44. +53
    -0
      common/pkgs/mq/coordinator/node.go
  45. +13
    -9
      common/pkgs/mq/coordinator/package.go
  46. +49
    -0
      coordinator/internal/mq/node.go
  47. +4
    -1
      scanner/internal/event/agent_cache_gc.go
  48. +4
    -2
      scanner/internal/event/agent_check_cache.go
  49. +4
    -1
      scanner/internal/event/agent_storage_gc.go
  50. +5
    -2
      scanner/internal/event/check_package_redundancy.go
  51. +5
    -1
      scanner/internal/event/clean_pinned.go

+ 2
- 0
agent/internal/config/config.go View File

@@ -6,6 +6,7 @@ import (
log "gitlink.org.cn/cloudream/common/pkgs/logger" log "gitlink.org.cn/cloudream/common/pkgs/logger"
c "gitlink.org.cn/cloudream/common/utils/config" c "gitlink.org.cn/cloudream/common/utils/config"
stgmodels "gitlink.org.cn/cloudream/storage/common/models" stgmodels "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/grpc" "gitlink.org.cn/cloudream/storage/common/pkgs/grpc"
stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq"
) )
@@ -19,6 +20,7 @@ type Config struct {
RabbitMQ stgmq.Config `json:"rabbitMQ"` RabbitMQ stgmq.Config `json:"rabbitMQ"`
IPFS ipfs.Config `json:"ipfs"` IPFS ipfs.Config `json:"ipfs"`
DistLock distlock.Config `json:"distlock"` DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
} }


var cfg Config var cfg Config


+ 11
- 0
agent/internal/grpc/ping.go View File

@@ -0,0 +1,11 @@
package grpc

import (
"context"

agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
)

func (s *Service) Ping(context.Context, *agtrpc.PingReq) (*agtrpc.PingResp, error) {
return &agtrpc.PingResp{}, nil
}

+ 54
- 9
agent/internal/task/create_package.go View File

@@ -1,26 +1,39 @@
package task package task


import ( import (
"fmt"
"time" "time"


"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/task" "gitlink.org.cn/cloudream/common/pkgs/task"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/cmd" "gitlink.org.cn/cloudream/storage/common/pkgs/cmd"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator" "gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
"gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
) )


type CreatePackageResult = cmd.CreatePackageResult
type CreatePackageResult struct {
PackageID cdssdk.PackageID
Objects []cmd.ObjectUploadResult
}


type CreatePackage struct { type CreatePackage struct {
cmd cmd.CreatePackage

Result *CreatePackageResult
userID cdssdk.UserID
bucketID cdssdk.BucketID
name string
objIter iterator.UploadingObjectIterator
nodeAffinity *cdssdk.NodeID
Result *CreatePackageResult
} }


func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage { func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage {
return &CreatePackage{ return &CreatePackage{
cmd: *cmd.NewCreatePackage(userID, bucketID, name, objIter, nodeAffinity),
userID: userID,
bucketID: bucketID,
name: name,
objIter: objIter,
nodeAffinity: nodeAffinity,
} }
} }


@@ -29,12 +42,44 @@ func (t *CreatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, c
log.Debugf("begin") log.Debugf("begin")
defer log.Debugf("end") defer log.Debugf("end")


ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{
Distlock: ctx.distlock,
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
err = fmt.Errorf("new coordinator client: %w", err)
log.Warn(err.Error())
complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
return
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

createResp, err := coorCli.CreatePackage(coordinator.NewCreatePackage(t.userID, t.bucketID, t.name))
if err != nil {
err = fmt.Errorf("creating package: %w", err)
log.Error(err.Error())
complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
return
}

uploadRet, err := cmd.NewUploadObjects(t.userID, createResp.PackageID, t.objIter, t.nodeAffinity).Execute(&cmd.UploadObjectsContext{
Distlock: ctx.distlock,
Connectivity: ctx.connectivity,
}) })
t.Result = ret
if err != nil {
err = fmt.Errorf("uploading objects: %w", err)
log.Error(err.Error())
complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
return
}

t.Result.PackageID = createResp.PackageID
t.Result.Objects = uploadRet.Objects


complete(err, CompleteOption{
complete(nil, CompleteOption{
RemovingDelay: time.Minute, RemovingDelay: time.Minute,
}) })
} }

+ 8
- 5
agent/internal/task/task.go View File

@@ -3,12 +3,14 @@ package task
import ( import (
"gitlink.org.cn/cloudream/common/pkgs/distlock" "gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/pkgs/task" "gitlink.org.cn/cloudream/common/pkgs/task"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
) )


type TaskContext struct { type TaskContext struct {
distlock *distlock.Service
sw *ioswitch.Switch
distlock *distlock.Service
sw *ioswitch.Switch
connectivity *connectivity.Collector
} }


// 需要在Task结束后主动调用,completing函数将在Manager加锁期间被调用, // 需要在Task结束后主动调用,completing函数将在Manager加锁期间被调用,
@@ -23,9 +25,10 @@ type Task = task.Task[TaskContext]


type CompleteOption = task.CompleteOption type CompleteOption = task.CompleteOption


func NewManager(distlock *distlock.Service, sw *ioswitch.Switch) Manager {
func NewManager(distlock *distlock.Service, sw *ioswitch.Switch, connectivity *connectivity.Collector) Manager {
return task.NewManager(TaskContext{ return task.NewManager(TaskContext{
distlock: distlock,
sw: sw,
distlock: distlock,
sw: sw,
connectivity: connectivity,
}) })
} }

+ 39
- 3
agent/main.go View File

@@ -7,9 +7,11 @@ import (
"sync" "sync"


log "gitlink.org.cn/cloudream/common/pkgs/logger" log "gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/agent/internal/config" "gitlink.org.cn/cloudream/storage/agent/internal/config"
"gitlink.org.cn/cloudream/storage/agent/internal/task" "gitlink.org.cn/cloudream/storage/agent/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
@@ -20,6 +22,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"


agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"


grpcsvc "gitlink.org.cn/cloudream/storage/agent/internal/grpc" grpcsvc "gitlink.org.cn/cloudream/storage/agent/internal/grpc"
cmdsvc "gitlink.org.cn/cloudream/storage/agent/internal/mq" cmdsvc "gitlink.org.cn/cloudream/storage/agent/internal/mq"
@@ -49,6 +52,41 @@ func main() {
stgglb.InitAgentRPCPool(&agtrpc.PoolConfig{}) stgglb.InitAgentRPCPool(&agtrpc.PoolConfig{})
stgglb.InitIPFSPool(&config.Cfg().IPFS) stgglb.InitIPFSPool(&config.Cfg().IPFS)


// 启动网络连通性检测,并就地检测一次
conCol := connectivity.NewCollector(&config.Cfg().Connectivity, func(collector *connectivity.Collector) {
log := log.WithField("Connectivity", "")

coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
log.Warnf("acquire coordinator mq failed, err: %s", err.Error())
return
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

cons := collector.GetAll()
nodeCons := make([]cdssdk.NodeConnectivity, 0, len(cons))
for _, con := range cons {
var delay *float32
if con.Delay != nil {
v := float32(con.Delay.Microseconds()) / 1000
delay = &v
}

nodeCons = append(nodeCons, cdssdk.NodeConnectivity{
FromNodeID: *stgglb.Local.NodeID,
ToNodeID: con.ToNodeID,
Delay: delay,
TestTime: con.TestTime,
})
}

_, err = coorCli.UpdateNodeConnectivities(coormq.ReqUpdateNodeConnectivities(nodeCons))
if err != nil {
log.Warnf("update node connectivities: %v", err)
}
})
conCol.CollectInPlace()

distlock, err := distlock.NewService(&config.Cfg().DistLock) distlock, err := distlock.NewService(&config.Cfg().DistLock)
if err != nil { if err != nil {
log.Fatalf("new ipfs failed, err: %s", err.Error()) log.Fatalf("new ipfs failed, err: %s", err.Error())
@@ -60,7 +98,7 @@ func main() {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(4) wg.Add(4)


taskMgr := task.NewManager(distlock, &sw)
taskMgr := task.NewManager(distlock, &sw, &conCol)


// 启动命令服务器 // 启动命令服务器
// TODO 需要设计AgentID持久化机制 // TODO 需要设计AgentID持久化机制
@@ -74,8 +112,6 @@ func main() {


go serveAgentServer(agtSvr, &wg) go serveAgentServer(agtSvr, &wg)


// go reportStatus(&wg) //网络延迟感知

//面向客户端收发数据 //面向客户端收发数据
listenAddr := config.Cfg().GRPC.MakeListenAddress() listenAddr := config.Cfg().GRPC.MakeListenAddress()
lis, err := net.Listen("tcp", listenAddr) lis, err := net.Listen("tcp", listenAddr)


+ 3
- 3
client/internal/cmdline/bucket.go View File

@@ -8,7 +8,7 @@ import (
) )


func BucketListUserBuckets(ctx CommandContext) error { func BucketListUserBuckets(ctx CommandContext) error {
userID := cdssdk.UserID(0)
userID := cdssdk.UserID(1)


buckets, err := ctx.Cmdline.Svc.BucketSvc().GetUserBuckets(userID) buckets, err := ctx.Cmdline.Svc.BucketSvc().GetUserBuckets(userID)
if err != nil { if err != nil {
@@ -29,7 +29,7 @@ func BucketListUserBuckets(ctx CommandContext) error {
} }


func BucketCreateBucket(ctx CommandContext, bucketName string) error { func BucketCreateBucket(ctx CommandContext, bucketName string) error {
userID := cdssdk.UserID(0)
userID := cdssdk.UserID(1)


bucketID, err := ctx.Cmdline.Svc.BucketSvc().CreateBucket(userID, bucketName) bucketID, err := ctx.Cmdline.Svc.BucketSvc().CreateBucket(userID, bucketName)
if err != nil { if err != nil {
@@ -41,7 +41,7 @@ func BucketCreateBucket(ctx CommandContext, bucketName string) error {
} }


func BucketDeleteBucket(ctx CommandContext, bucketID cdssdk.BucketID) error { func BucketDeleteBucket(ctx CommandContext, bucketID cdssdk.BucketID) error {
userID := cdssdk.UserID(0)
userID := cdssdk.UserID(1)


err := ctx.Cmdline.Svc.BucketSvc().DeleteBucket(userID, bucketID) err := ctx.Cmdline.Svc.BucketSvc().DeleteBucket(userID, bucketID)
if err != nil { if err != nil {


+ 6
- 1
client/internal/cmdline/cache.go View File

@@ -8,7 +8,12 @@ import (
) )


func CacheMovePackage(ctx CommandContext, packageID cdssdk.PackageID, nodeID cdssdk.NodeID) error { func CacheMovePackage(ctx CommandContext, packageID cdssdk.PackageID, nodeID cdssdk.NodeID) error {
taskID, err := ctx.Cmdline.Svc.CacheSvc().StartCacheMovePackage(0, packageID, nodeID)
startTime := time.Now()
defer func() {
fmt.Printf("%v\n", time.Since(startTime).Seconds())
}()

taskID, err := ctx.Cmdline.Svc.CacheSvc().StartCacheMovePackage(1, packageID, nodeID)
if err != nil { if err != nil {
return fmt.Errorf("start cache moving package: %w", err) return fmt.Errorf("start cache moving package: %w", err)
} }


+ 5
- 0
client/internal/cmdline/commandline.go View File

@@ -38,3 +38,8 @@ func (c *Commandline) DispatchCommand(allArgs []string) {
os.Exit(1) os.Exit(1)
} }
} }

func MustAddCmd(fn any, prefixWords ...string) any {
commands.MustAdd(fn, prefixWords...)
return nil
}

+ 63
- 0
client/internal/cmdline/object.go View File

@@ -0,0 +1,63 @@
package cmdline

import (
"fmt"
"os"
"path/filepath"
"time"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
)

var _ = MustAddCmd(func(ctx CommandContext, packageID cdssdk.PackageID, rootPath string, nodeAffinity []cdssdk.NodeID) error {
startTime := time.Now()
defer func() {
fmt.Printf("%v\n", time.Since(startTime).Seconds())
}()

userID := cdssdk.UserID(1)

var uploadFilePathes []string
err := filepath.WalkDir(rootPath, func(fname string, fi os.DirEntry, err error) error {
if err != nil {
return nil
}

if !fi.IsDir() {
uploadFilePathes = append(uploadFilePathes, fname)
}

return nil
})
if err != nil {
return fmt.Errorf("open directory %s failed, err: %w", rootPath, err)
}

var nodeAff *cdssdk.NodeID
if len(nodeAffinity) > 0 {
n := cdssdk.NodeID(nodeAffinity[0])
nodeAff = &n
}

objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes)
taskID, err := ctx.Cmdline.Svc.ObjectSvc().StartUploading(userID, packageID, objIter, nodeAff)
if err != nil {
return fmt.Errorf("update objects to package %d failed, err: %w", packageID, err)
}

for {
complete, _, err := ctx.Cmdline.Svc.ObjectSvc().WaitUploading(taskID, time.Second*5)
if complete {
if err != nil {
return fmt.Errorf("uploading objects: %w", err)
}

return nil
}

if err != nil {
return fmt.Errorf("wait updating: %w", err)
}
}
}, "obj", "upload")

+ 18
- 101
client/internal/cmdline/package.go View File

@@ -13,7 +13,7 @@ import (
) )


func PackageListBucketPackages(ctx CommandContext, bucketID cdssdk.BucketID) error { func PackageListBucketPackages(ctx CommandContext, bucketID cdssdk.BucketID) error {
userID := cdssdk.UserID(0)
userID := cdssdk.UserID(1)


packages, err := ctx.Cmdline.Svc.BucketSvc().GetBucketPackages(userID, bucketID) packages, err := ctx.Cmdline.Svc.BucketSvc().GetBucketPackages(userID, bucketID)
if err != nil { if err != nil {
@@ -34,13 +34,20 @@ func PackageListBucketPackages(ctx CommandContext, bucketID cdssdk.BucketID) err
} }


func PackageDownloadPackage(ctx CommandContext, packageID cdssdk.PackageID, outputDir string) error { func PackageDownloadPackage(ctx CommandContext, packageID cdssdk.PackageID, outputDir string) error {
startTime := time.Now()
defer func() {
fmt.Printf("%v\n", time.Since(startTime).Seconds())
}()

userID := cdssdk.UserID(1)

err := os.MkdirAll(outputDir, os.ModePerm) err := os.MkdirAll(outputDir, os.ModePerm)
if err != nil { if err != nil {
return fmt.Errorf("create output directory %s failed, err: %w", outputDir, err) return fmt.Errorf("create output directory %s failed, err: %w", outputDir, err)
} }


// 下载文件 // 下载文件
objIter, err := ctx.Cmdline.Svc.PackageSvc().DownloadPackage(0, packageID)
objIter, err := ctx.Cmdline.Svc.PackageSvc().DownloadPackage(userID, packageID)
if err != nil { if err != nil {
return fmt.Errorf("download object failed, err: %w", err) return fmt.Errorf("download object failed, err: %w", err)
} }
@@ -91,108 +98,20 @@ func PackageDownloadPackage(ctx CommandContext, packageID cdssdk.PackageID, outp
return nil return nil
} }


func PackageCreatePackage(ctx CommandContext, name string, rootPath string, bucketID cdssdk.BucketID, nodeAffinity []cdssdk.NodeID) error {
rootPath = filepath.Clean(rootPath)

var uploadFilePathes []string
err := filepath.WalkDir(rootPath, func(fname string, fi os.DirEntry, err error) error {
if err != nil {
return nil
}

if !fi.IsDir() {
uploadFilePathes = append(uploadFilePathes, fname)
}

return nil
})
if err != nil {
return fmt.Errorf("open directory %s failed, err: %w", rootPath, err)
}

var nodeAff *cdssdk.NodeID
if len(nodeAffinity) > 0 {
n := cdssdk.NodeID(nodeAffinity[0])
nodeAff = &n
}

objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes)
taskID, err := ctx.Cmdline.Svc.PackageSvc().StartCreatingPackage(0, bucketID, name, objIter, nodeAff)

if err != nil {
return fmt.Errorf("upload file data failed, err: %w", err)
}

for {
complete, uploadObjectResult, err := ctx.Cmdline.Svc.PackageSvc().WaitCreatingPackage(taskID, time.Second*5)
if complete {
if err != nil {
return fmt.Errorf("uploading package: %w", err)
}

tb := table.NewWriter()

tb.AppendHeader(table.Row{"Path", "ObjectID"})
for i := 0; i < len(uploadObjectResult.ObjectResults); i++ {
tb.AppendRow(table.Row{
uploadObjectResult.ObjectResults[i].Info.Path,
uploadObjectResult.ObjectResults[i].ObjectID,
})
}
fmt.Print(tb.Render())
fmt.Printf("\n%v", uploadObjectResult.PackageID)
return nil
}

if err != nil {
return fmt.Errorf("wait uploading: %w", err)
}
}
}

func PackageUpdatePackage(ctx CommandContext, packageID cdssdk.PackageID, rootPath string) error {
//userID := int64(0)

var uploadFilePathes []string
err := filepath.WalkDir(rootPath, func(fname string, fi os.DirEntry, err error) error {
if err != nil {
return nil
}

if !fi.IsDir() {
uploadFilePathes = append(uploadFilePathes, fname)
}

return nil
})
if err != nil {
return fmt.Errorf("open directory %s failed, err: %w", rootPath, err)
}
func PackageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string) error {
userID := cdssdk.UserID(1)


objIter := iterator.NewUploadingObjectIterator(rootPath, uploadFilePathes)
taskID, err := ctx.Cmdline.Svc.PackageSvc().StartUpdatingPackage(0, packageID, objIter)
pkgID, err := ctx.Cmdline.Svc.PackageSvc().Create(userID, bucketID, name)
if err != nil { if err != nil {
return fmt.Errorf("update package %d failed, err: %w", packageID, err)
return err
} }


for {
complete, _, err := ctx.Cmdline.Svc.PackageSvc().WaitUpdatingPackage(taskID, time.Second*5)
if complete {
if err != nil {
return fmt.Errorf("updating package: %w", err)
}

return nil
}

if err != nil {
return fmt.Errorf("wait updating: %w", err)
}
}
fmt.Printf("%v\n", pkgID)
return nil
} }


func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error { func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error {
userID := cdssdk.UserID(0)
userID := cdssdk.UserID(1)
err := ctx.Cmdline.Svc.PackageSvc().DeletePackage(userID, packageID) err := ctx.Cmdline.Svc.PackageSvc().DeletePackage(userID, packageID)
if err != nil { if err != nil {
return fmt.Errorf("delete package %d failed, err: %w", packageID, err) return fmt.Errorf("delete package %d failed, err: %w", packageID, err)
@@ -201,7 +120,7 @@ func PackageDeletePackage(ctx CommandContext, packageID cdssdk.PackageID) error
} }


func PackageGetCachedNodes(ctx CommandContext, packageID cdssdk.PackageID) error { func PackageGetCachedNodes(ctx CommandContext, packageID cdssdk.PackageID) error {
userID := cdssdk.UserID(0)
userID := cdssdk.UserID(1)
resp, err := ctx.Cmdline.Svc.PackageSvc().GetCachedNodes(userID, packageID) resp, err := ctx.Cmdline.Svc.PackageSvc().GetCachedNodes(userID, packageID)
fmt.Printf("resp: %v\n", resp) fmt.Printf("resp: %v\n", resp)
if err != nil { if err != nil {
@@ -211,7 +130,7 @@ func PackageGetCachedNodes(ctx CommandContext, packageID cdssdk.PackageID) error
} }


func PackageGetLoadedNodes(ctx CommandContext, packageID cdssdk.PackageID) error { func PackageGetLoadedNodes(ctx CommandContext, packageID cdssdk.PackageID) error {
userID := cdssdk.UserID(0)
userID := cdssdk.UserID(1)
nodeIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedNodes(userID, packageID) nodeIDs, err := ctx.Cmdline.Svc.PackageSvc().GetLoadedNodes(userID, packageID)
fmt.Printf("nodeIDs: %v\n", nodeIDs) fmt.Printf("nodeIDs: %v\n", nodeIDs)
if err != nil { if err != nil {
@@ -227,8 +146,6 @@ func init() {


commands.MustAdd(PackageCreatePackage, "pkg", "new") commands.MustAdd(PackageCreatePackage, "pkg", "new")


commands.MustAdd(PackageUpdatePackage, "pkg", "update")

commands.MustAdd(PackageDeletePackage, "pkg", "delete") commands.MustAdd(PackageDeletePackage, "pkg", "delete")


commands.MustAdd(PackageGetCachedNodes, "pkg", "cached") commands.MustAdd(PackageGetCachedNodes, "pkg", "cached")


+ 12
- 2
client/internal/cmdline/storage.go View File

@@ -8,7 +8,12 @@ import (
) )


func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageID cdssdk.StorageID) error { func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageID cdssdk.StorageID) error {
nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(0, packageID, storageID)
startTime := time.Now()
defer func() {
fmt.Printf("%v\n", time.Since(startTime).Seconds())
}()

nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageLoadPackage(1, packageID, storageID)
if err != nil { if err != nil {
return fmt.Errorf("start loading package to storage: %w", err) return fmt.Errorf("start loading package to storage: %w", err)
} }
@@ -31,7 +36,12 @@ func StorageLoadPackage(ctx CommandContext, packageID cdssdk.PackageID, storageI
} }


func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string) error { func StorageCreatePackage(ctx CommandContext, bucketID cdssdk.BucketID, name string, storageID cdssdk.StorageID, path string) error {
nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageCreatePackage(0, bucketID, name, storageID, path, nil)
startTime := time.Now()
defer func() {
fmt.Printf("%v\n", time.Since(startTime).Seconds())
}()

nodeID, taskID, err := ctx.Cmdline.Svc.StorageSvc().StartStorageCreatePackage(1, bucketID, name, storageID, path, nil)
if err != nil { if err != nil {
return fmt.Errorf("start storage uploading package: %w", err) return fmt.Errorf("start storage uploading package: %w", err)
} }


+ 8
- 6
client/internal/config/config.go View File

@@ -6,17 +6,19 @@ import (
"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/utils/config" "gitlink.org.cn/cloudream/common/utils/config"
stgmodels "gitlink.org.cn/cloudream/storage/common/models" stgmodels "gitlink.org.cn/cloudream/storage/common/models"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq" stgmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq"
) )


type Config struct { type Config struct {
Local stgmodels.LocalMachineInfo `json:"local"`
AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"`
Logger logger.Config `json:"logger"`
RabbitMQ stgmq.Config `json:"rabbitMQ"`
IPFS *ipfs.Config `json:"ipfs"` // 此字段非空代表客户端上存在ipfs daemon
DistLock distlock.Config `json:"distlock"`
Local stgmodels.LocalMachineInfo `json:"local"`
AgentGRPC agtrpc.PoolConfig `json:"agentGRPC"`
Logger logger.Config `json:"logger"`
RabbitMQ stgmq.Config `json:"rabbitMQ"`
IPFS *ipfs.Config `json:"ipfs"` // 此字段非空代表客户端上存在ipfs daemon
DistLock distlock.Config `json:"distlock"`
Connectivity connectivity.Config `json:"connectivity"`
} }


var cfg Config var cfg Config


+ 61
- 0
client/internal/http/bucket.go View File

@@ -0,0 +1,61 @@
package http

import (
"net/http"

"github.com/gin-gonic/gin"
"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
)

type BucketService struct {
*Server
}

func (s *Server) Bucket() *BucketService {
return &BucketService{
Server: s,
}
}

func (s *BucketService) Create(ctx *gin.Context) {
log := logger.WithField("HTTP", "Bucket.Create")

var req cdssdk.BucketCreateReq
if err := ctx.ShouldBindJSON(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

bucketID, err := s.svc.BucketSvc().CreateBucket(req.UserID, req.BucketName)
if err != nil {
log.Warnf("creating bucket: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "create bucket failed"))
return
}

ctx.JSON(http.StatusOK, OK(cdssdk.BucketCreateResp{
BucketID: bucketID,
}))
}

func (s *BucketService) Delete(ctx *gin.Context) {
log := logger.WithField("HTTP", "Bucket.Delete")

var req cdssdk.BucketDeleteReq
if err := ctx.ShouldBindJSON(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

if err := s.svc.BucketSvc().DeleteBucket(req.UserID, req.BucketID); err != nil {
log.Warnf("deleting bucket: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "delete bucket failed"))
return
}

ctx.JSON(http.StatusOK, OK(nil))
}

+ 1
- 1
client/internal/http/cache.go View File

@@ -14,7 +14,7 @@ type CacheService struct {
*Server *Server
} }


func (s *Server) CacheSvc() *CacheService {
func (s *Server) Cache() *CacheService {
return &CacheService{ return &CacheService{
Server: s, Server: s,
} }


+ 51
- 1
client/internal/http/object.go View File

@@ -2,7 +2,9 @@ package http


import ( import (
"io" "io"
"mime/multipart"
"net/http" "net/http"
"time"


"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"gitlink.org.cn/cloudream/common/consts/errorcode" "gitlink.org.cn/cloudream/common/consts/errorcode"
@@ -15,12 +17,60 @@ type ObjectService struct {
*Server *Server
} }


func (s *Server) ObjectSvc() *ObjectService {
func (s *Server) Object() *ObjectService {
return &ObjectService{ return &ObjectService{
Server: s, Server: s,
} }
} }


type ObjectUploadReq struct {
Info cdssdk.ObjectUploadInfo `form:"info" binding:"required"`
Files []*multipart.FileHeader `form:"files"`
}

func (s *ObjectService) Upload(ctx *gin.Context) {
log := logger.WithField("HTTP", "Object.Upload")

var req ObjectUploadReq
if err := ctx.ShouldBind(&req); err != nil {
log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return
}

var err error

objIter := mapMultiPartFileToUploadingObject(req.Files)

taskID, err := s.svc.ObjectSvc().StartUploading(req.Info.UserID, req.Info.PackageID, objIter, req.Info.NodeAffinity)

if err != nil {
log.Warnf("start uploading object task: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "start uploading task failed"))
return
}

for {
complete, _, err := s.svc.ObjectSvc().WaitUploading(taskID, time.Second*5)
if complete {
if err != nil {
log.Warnf("uploading object: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "uploading object failed"))
return
}

ctx.JSON(http.StatusOK, OK(nil))
return
}

if err != nil {
log.Warnf("waiting task: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "wait uploading task failed"))
return
}
}
}

type ObjectDownloadReq struct { type ObjectDownloadReq struct {
UserID *cdssdk.UserID `form:"userID" binding:"required"` UserID *cdssdk.UserID `form:"userID" binding:"required"`
ObjectID *cdssdk.ObjectID `form:"objectID" binding:"required"` ObjectID *cdssdk.ObjectID `form:"objectID" binding:"required"`


+ 11
- 58
client/internal/http/package.go View File

@@ -3,7 +3,6 @@ package http
import ( import (
"mime/multipart" "mime/multipart"
"net/http" "net/http"
"time"


"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"gitlink.org.cn/cloudream/common/consts/errorcode" "gitlink.org.cn/cloudream/common/consts/errorcode"
@@ -19,7 +18,7 @@ type PackageService struct {
*Server *Server
} }


func (s *Server) PackageSvc() *PackageService {
func (s *Server) Package() *PackageService {
return &PackageService{ return &PackageService{
Server: s, Server: s,
} }
@@ -53,71 +52,25 @@ func (s *PackageService) Get(ctx *gin.Context) {
ctx.JSON(http.StatusOK, OK(PackageGetResp{Package: *pkg})) ctx.JSON(http.StatusOK, OK(PackageGetResp{Package: *pkg}))
} }


type PackageUploadReq struct {
Info PackageUploadInfo `form:"info" binding:"required"`
Files []*multipart.FileHeader `form:"files"`
}

type PackageUploadInfo struct {
UserID *cdssdk.UserID `json:"userID" binding:"required"`
BucketID *cdssdk.BucketID `json:"bucketID" binding:"required"`
Name string `json:"name" binding:"required"`
NodeAffinity *cdssdk.NodeID `json:"nodeAffinity"`
}

type PackageUploadResp struct {
PackageID cdssdk.PackageID `json:"packageID,string"`
}

func (s *PackageService) Upload(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.Upload")

var req PackageUploadReq
if err := ctx.ShouldBind(&req); err != nil {
func (s *PackageService) Create(ctx *gin.Context) {
log := logger.WithField("HTTP", "Package.Create")
var req cdssdk.PackageCreateReq
if err := ctx.ShouldBindJSON(&req); err != nil {
log.Warnf("binding body: %s", err.Error()) log.Warnf("binding body: %s", err.Error())
ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument")) ctx.JSON(http.StatusBadRequest, Failed(errorcode.BadArgument, "missing argument or invalid argument"))
return return
} }


s.uploadEC(ctx, &req)
}

func (s *PackageService) uploadEC(ctx *gin.Context, req *PackageUploadReq) {
log := logger.WithField("HTTP", "Package.Upload")

var err error

objIter := mapMultiPartFileToUploadingObject(req.Files)

taskID, err := s.svc.PackageSvc().StartCreatingPackage(*req.Info.UserID, *req.Info.BucketID, req.Info.Name, objIter, req.Info.NodeAffinity)

pkgID, err := s.svc.PackageSvc().Create(req.UserID, req.BucketID, req.Name)
if err != nil { if err != nil {
log.Warnf("start uploading ec package task: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "start uploading task failed"))
log.Warnf("creating package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "create package failed"))
return return
} }


for {
complete, createResult, err := s.svc.PackageSvc().WaitCreatingPackage(taskID, time.Second*5)
if complete {
if err != nil {
log.Warnf("uploading ec package: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "uploading ec package failed"))
return
}

ctx.JSON(http.StatusOK, OK(PackageUploadResp{
PackageID: createResult.PackageID,
}))
return
}

if err != nil {
log.Warnf("waiting task: %s", err.Error())
ctx.JSON(http.StatusOK, Failed(errorcode.OperationFailed, "wait uploading task failed"))
return
}
}
ctx.JSON(http.StatusOK, OK(cdssdk.PackageCreateResp{
PackageID: pkgID,
}))
} }


type PackageDeleteReq struct { type PackageDeleteReq struct {


+ 15
- 11
client/internal/http/server.go View File

@@ -39,18 +39,22 @@ func (s *Server) Serve() error {
} }


func (s *Server) initRouters() { func (s *Server) initRouters() {
s.engine.GET("/object/download", s.ObjectSvc().Download)
s.engine.GET(cdssdk.ObjectGetPackageObjectsPath, s.ObjectSvc().GetPackageObjects)
s.engine.GET(cdssdk.ObjectDownloadPath, s.Object().Download)
s.engine.POST(cdssdk.ObjectUploadPath, s.Object().Upload)
s.engine.GET(cdssdk.ObjectGetPackageObjectsPath, s.Object().GetPackageObjects)


s.engine.GET("/package/get", s.PackageSvc().Get)
s.engine.POST("/package/upload", s.PackageSvc().Upload)
s.engine.POST("/package/delete", s.PackageSvc().Delete)
s.engine.GET("/package/getCachedNodes", s.PackageSvc().GetCachedNodes)
s.engine.GET("/package/getLoadedNodes", s.PackageSvc().GetLoadedNodes)
s.engine.GET(cdssdk.PackageGetPath, s.Package().Get)
s.engine.POST(cdssdk.PackageCreatePath, s.Package().Create)
s.engine.POST("/package/delete", s.Package().Delete)
s.engine.GET("/package/getCachedNodes", s.Package().GetCachedNodes)
s.engine.GET("/package/getLoadedNodes", s.Package().GetLoadedNodes)


s.engine.POST("/storage/loadPackage", s.StorageSvc().LoadPackage)
s.engine.POST("/storage/createPackage", s.StorageSvc().CreatePackage)
s.engine.GET("/storage/getInfo", s.StorageSvc().GetInfo)
s.engine.POST("/storage/loadPackage", s.Storage().LoadPackage)
s.engine.POST("/storage/createPackage", s.Storage().CreatePackage)
s.engine.GET("/storage/getInfo", s.Storage().GetInfo)


s.engine.POST(cdssdk.CacheMovePackagePath, s.CacheSvc().MovePackage)
s.engine.POST(cdssdk.CacheMovePackagePath, s.Cache().MovePackage)

s.engine.POST(cdssdk.BucketCreatePath, s.Bucket().Create)
s.engine.POST(cdssdk.BucketDeletePath, s.Bucket().Delete)
} }

+ 1
- 1
client/internal/http/storage.go View File

@@ -14,7 +14,7 @@ type StorageService struct {
*Server *Server
} }


func (s *Server) StorageSvc() *StorageService {
func (s *Server) Storage() *StorageService {
return &StorageService{ return &StorageService{
Server: s, Server: s,
} }


+ 17
- 0
client/internal/services/object.go View File

@@ -3,10 +3,13 @@ package services
import ( import (
"fmt" "fmt"
"io" "io"
"time"


cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
mytask "gitlink.org.cn/cloudream/storage/client/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model" "gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
) )


@@ -18,6 +21,20 @@ func (svc *Service) ObjectSvc() *ObjectService {
return &ObjectService{Service: svc} return &ObjectService{Service: svc}
} }


func (svc *ObjectService) StartUploading(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) (string, error) {
tsk := svc.TaskMgr.StartNew(mytask.NewUploadObjects(userID, packageID, objIter, nodeAffinity))
return tsk.ID(), nil
}

func (svc *ObjectService) WaitUploading(taskID string, waitTimeout time.Duration) (bool, *mytask.UploadObjectsResult, error) {
tsk := svc.TaskMgr.FindByID(taskID)
if tsk.WaitTimeout(waitTimeout) {
updatePkgTask := tsk.Body().(*mytask.UploadObjects)
return true, updatePkgTask.Result, tsk.Error()
}
return false, nil, nil
}

func (svc *ObjectService) Download(userID cdssdk.UserID, objectID cdssdk.ObjectID) (io.ReadCloser, error) { func (svc *ObjectService) Download(userID cdssdk.UserID, objectID cdssdk.ObjectID) (io.ReadCloser, error) {
panic("not implement yet!") panic("not implement yet!")
} }


+ 15
- 31
client/internal/services/package.go View File

@@ -2,13 +2,10 @@ package services


import ( import (
"fmt" "fmt"
"time"


cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"


mytask "gitlink.org.cn/cloudream/storage/client/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgglb "gitlink.org.cn/cloudream/storage/common/globals"
agtcmd "gitlink.org.cn/cloudream/storage/common/pkgs/cmd"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model" "gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator" "gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
@@ -37,6 +34,21 @@ func (svc *PackageService) Get(userID cdssdk.UserID, packageID cdssdk.PackageID)
return &getResp.Package, nil return &getResp.Package, nil
} }


func (svc *PackageService) Create(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string) (cdssdk.PackageID, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return 0, fmt.Errorf("new coordinator client: %w", err)
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

resp, err := coorCli.CreatePackage(coormq.NewCreatePackage(userID, bucketID, name))
if err != nil {
return 0, fmt.Errorf("creating package: %w", err)
}

return resp.PackageID, nil
}

func (svc *PackageService) DownloadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID) (iterator.DownloadingObjectIterator, error) { func (svc *PackageService) DownloadPackage(userID cdssdk.UserID, packageID cdssdk.PackageID) (iterator.DownloadingObjectIterator, error) {
coorCli, err := stgglb.CoordinatorMQPool.Acquire() coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil { if err != nil {
@@ -56,34 +68,6 @@ func (svc *PackageService) DownloadPackage(userID cdssdk.UserID, packageID cdssd
return iter, nil return iter, nil
} }


func (svc *PackageService) StartCreatingPackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) (string, error) {
tsk := svc.TaskMgr.StartNew(mytask.NewCreatePackage(userID, bucketID, name, objIter, nodeAffinity))
return tsk.ID(), nil
}

func (svc *PackageService) WaitCreatingPackage(taskID string, waitTimeout time.Duration) (bool, *agtcmd.CreatePackageResult, error) {
tsk := svc.TaskMgr.FindByID(taskID)
if tsk.WaitTimeout(waitTimeout) {
cteatePkgTask := tsk.Body().(*mytask.CreatePackage)
return true, cteatePkgTask.Result, tsk.Error()
}
return false, nil, nil
}

func (svc *PackageService) StartUpdatingPackage(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator) (string, error) {
tsk := svc.TaskMgr.StartNew(mytask.NewUpdatePackage(userID, packageID, objIter))
return tsk.ID(), nil
}

func (svc *PackageService) WaitUpdatingPackage(taskID string, waitTimeout time.Duration) (bool, *agtcmd.UpdatePackageResult, error) {
tsk := svc.TaskMgr.FindByID(taskID)
if tsk.WaitTimeout(waitTimeout) {
updatePkgTask := tsk.Body().(*mytask.UpdatePackage)
return true, updatePkgTask.Result, tsk.Error()
}
return false, nil, nil
}

func (svc *PackageService) DeletePackage(userID cdssdk.UserID, packageID cdssdk.PackageID) error { func (svc *PackageService) DeletePackage(userID cdssdk.UserID, packageID cdssdk.PackageID) error {
coorCli, err := stgglb.CoordinatorMQPool.Acquire() coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil { if err != nil {


+ 0
- 35
client/internal/task/create_package.go View File

@@ -1,35 +0,0 @@
package task

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/task"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/cmd"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
)

type CreatePackageResult = cmd.CreatePackageResult

type CreatePackage struct {
cmd cmd.CreatePackage

Result *CreatePackageResult
}

func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage {
return &CreatePackage{
cmd: *cmd.NewCreatePackage(userID, bucketID, name, objIter, nodeAffinity),
}
}

func (t *CreatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{
Distlock: ctx.distlock,
})
t.Result = ret

complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
}

+ 6
- 3
client/internal/task/task.go View File

@@ -3,10 +3,12 @@ package task
import ( import (
"gitlink.org.cn/cloudream/common/pkgs/distlock" "gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/pkgs/task" "gitlink.org.cn/cloudream/common/pkgs/task"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
) )


type TaskContext struct { type TaskContext struct {
distlock *distlock.Service
distlock *distlock.Service
connectivity *connectivity.Collector
} }


// 需要在Task结束后主动调用,completing函数将在Manager加锁期间被调用, // 需要在Task结束后主动调用,completing函数将在Manager加锁期间被调用,
@@ -21,8 +23,9 @@ type Task = task.Task[TaskContext]


type CompleteOption = task.CompleteOption type CompleteOption = task.CompleteOption


func NewManager(distlock *distlock.Service) Manager {
func NewManager(distlock *distlock.Service, connectivity *connectivity.Collector) Manager {
return task.NewManager(TaskContext{ return task.NewManager(TaskContext{
distlock: distlock,
distlock: distlock,
connectivity: connectivity,
}) })
} }

+ 0
- 36
client/internal/task/update_package.go View File

@@ -1,36 +0,0 @@
package task

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/task"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/cmd"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
)

type UpdatePackageResult = cmd.UpdatePackageResult

type UpdatePackage struct {
cmd cmd.UpdatePackage

Result *UpdatePackageResult
}

func NewUpdatePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator) *UpdatePackage {
return &UpdatePackage{
cmd: *cmd.NewUpdatePackage(userID, packageID, objectIter),
}
}

func (t *UpdatePackage) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
ret, err := t.cmd.Execute(&cmd.UpdatePackageContext{
Distlock: ctx.distlock,
})

t.Result = ret

complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
}

+ 37
- 0
client/internal/task/upload_objects.go View File

@@ -0,0 +1,37 @@
package task

import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/task"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/cmd"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
)

type UploadObjectsResult = cmd.UploadObjectsResult

type UploadObjects struct {
cmd cmd.UploadObjects

Result *UploadObjectsResult
}

func NewUploadObjects(userID cdssdk.UserID, packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *UploadObjects {
return &UploadObjects{
cmd: *cmd.NewUploadObjects(userID, packageID, objectIter, nodeAffinity),
}
}

func (t *UploadObjects) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
ret, err := t.cmd.Execute(&cmd.UploadObjectsContext{
Distlock: ctx.distlock,
Connectivity: ctx.connectivity,
})

t.Result = ret

complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
}

+ 6
- 1
client/main.go View File

@@ -12,6 +12,7 @@ import (
"gitlink.org.cn/cloudream/storage/client/internal/services" "gitlink.org.cn/cloudream/storage/client/internal/services"
"gitlink.org.cn/cloudream/storage/client/internal/task" "gitlink.org.cn/cloudream/storage/client/internal/task"
stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock"
) )


@@ -37,6 +38,10 @@ func main() {
stgglb.InitIPFSPool(config.Cfg().IPFS) stgglb.InitIPFSPool(config.Cfg().IPFS)
} }


// 启动网络连通性检测,并就地检测一次
conCol := connectivity.NewCollector(&config.Cfg().Connectivity, nil)
conCol.CollectInPlace()

distlockSvc, err := distlock.NewService(&config.Cfg().DistLock) distlockSvc, err := distlock.NewService(&config.Cfg().DistLock)
if err != nil { if err != nil {
logger.Warnf("new distlock service failed, err: %s", err.Error()) logger.Warnf("new distlock service failed, err: %s", err.Error())
@@ -44,7 +49,7 @@ func main() {
} }
go serveDistLock(distlockSvc) go serveDistLock(distlockSvc)


taskMgr := task.NewManager(distlockSvc)
taskMgr := task.NewManager(distlockSvc, &conCol)


svc, err := services.NewService(distlockSvc, &taskMgr) svc, err := services.NewService(distlockSvc, &taskMgr)
if err != nil { if err != nil {


+ 3
- 0
common/assets/confs/agent.config.json View File

@@ -32,5 +32,8 @@
"etcdLockLeaseTimeSec": 5, "etcdLockLeaseTimeSec": 5,
"randomReleasingDelayMs": 3000, "randomReleasingDelayMs": 3000,
"serviceDescription": "I am a agent" "serviceDescription": "I am a agent"
},
"connectivity": {
"testInterval": 300
} }
} }

+ 3
- 0
common/assets/confs/client.config.json View File

@@ -25,5 +25,8 @@
"etcdLockLeaseTimeSec": 5, "etcdLockLeaseTimeSec": 5,
"randomReleasingDelayMs": 3000, "randomReleasingDelayMs": 3000,
"serviceDescription": "I am a client" "serviceDescription": "I am a client"
},
"connectivity": {
"testInterval": 300
} }
} }

+ 9
- 6
common/assets/scripts/create_database.sql View File

@@ -52,12 +52,13 @@ insert into
values values
(1, "HuaWei-Cloud", 1, "/", "Online"); (1, "HuaWei-Cloud", 1, "/", "Online");


create table NodeDelay (
SourceNodeID int not null comment '发起检测的节点ID',
DestinationNodeID int not null comment '被检测节点的ID',
DelayInMs int not null comment '发起节点与被检测节点间延迟(毫秒)',
primary key(SourceNodeID, DestinationNodeID)
) comment = '节点延迟表';
create table NodeConnectivity (
FromNodeID int not null comment '发起检测的节点ID',
ToNodeID int not null comment '被检测节点的ID',
Delay float comment '发起节点与被检测节点间延迟(毫秒),为null代表节点不可达',
TestTime timestamp comment '进行连通性测试的时间',
primary key(FromNodeID, ToNodeID)
) comment = '节点连通性表';


create table User ( create table User (
UserID int not null primary key comment '用户ID', UserID int not null primary key comment '用户ID',
@@ -122,6 +123,8 @@ create table Object (
Size bigint not null comment '对象大小(Byte)', Size bigint not null comment '对象大小(Byte)',
FileHash varchar(100) not null comment '完整对象的FileHash', FileHash varchar(100) not null comment '完整对象的FileHash',
Redundancy JSON not null comment '冗余策略', Redundancy JSON not null comment '冗余策略',
CreateTime timestamp not null comment '创建时间',
UpdateTime timestamp not null comment '更新时间',
UNIQUE KEY PackagePath (PackageID, Path) UNIQUE KEY PackagePath (PackageID, Path)
) comment = '对象表'; ) comment = '对象表';




+ 0
- 88
common/pkgs/cmd/update_package.go View File

@@ -1,88 +0,0 @@
package cmd

import (
"fmt"

"github.com/samber/lo"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

type UpdatePackage struct {
userID cdssdk.UserID
packageID cdssdk.PackageID
objectIter iterator.UploadingObjectIterator
}

type UpdatePackageResult struct {
ObjectResults []ObjectUploadResult
}

type UpdateNodeInfo struct {
UploadNodeInfo
HasOldObject bool
}

func NewUpdatePackage(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator) *UpdatePackage {
return &UpdatePackage{
userID: userID,
packageID: packageID,
objectIter: objIter,
}
}

func (t *UpdatePackage) Execute(ctx *UpdatePackageContext) (*UpdatePackageResult, error) {
defer t.objectIter.Close()

coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return nil, fmt.Errorf("new coordinator client: %w", err)
}

getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err)
}

userNodes := lo.Map(getUserNodesResp.Nodes, func(node cdssdk.Node, index int) UploadNodeInfo {
return UploadNodeInfo{
Node: node,
IsSameLocation: node.LocationID == stgglb.Local.LocationID,
}
})

// 给上传节点的IPFS加锁
ipfsReqBlder := reqbuilder.NewBuilder()
// 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁
if stgglb.Local.NodeID != nil {
ipfsReqBlder.IPFS().Buzy(*stgglb.Local.NodeID)
}
for _, node := range userNodes {
if stgglb.Local.NodeID != nil && node.Node.NodeID == *stgglb.Local.NodeID {
continue
}

ipfsReqBlder.IPFS().Buzy(node.Node.NodeID)
}
// TODO 加Object的Create锁,最好一次性能加多个
// 防止上传的副本被清除
ipfsMutex, err := ipfsReqBlder.MutexLock(ctx.Distlock)
if err != nil {
return nil, fmt.Errorf("acquire locks failed, err: %w", err)
}
defer ipfsMutex.Unlock()

rets, err := uploadAndUpdatePackage(t.packageID, t.objectIter, userNodes, nil)
if err != nil {
return nil, err
}

return &UpdatePackageResult{
ObjectResults: rets,
}, nil
}

common/pkgs/cmd/create_package.go → common/pkgs/cmd/upload_objects.go View File

@@ -3,32 +3,34 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "io"
"math"
"math/rand" "math/rand"
"time"


"github.com/samber/lo" "github.com/samber/lo"


"gitlink.org.cn/cloudream/common/pkgs/distlock" "gitlink.org.cn/cloudream/common/pkgs/distlock"
"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/sort2"


stgglb "gitlink.org.cn/cloudream/storage/common/globals" stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator" "gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
) )


type CreatePackage struct {
type UploadObjects struct {
userID cdssdk.UserID userID cdssdk.UserID
bucketID cdssdk.BucketID
name string
packageID cdssdk.PackageID
objectIter iterator.UploadingObjectIterator objectIter iterator.UploadingObjectIterator
nodeAffinity *cdssdk.NodeID nodeAffinity *cdssdk.NodeID
} }


type CreatePackageResult struct {
PackageID cdssdk.PackageID
ObjectResults []ObjectUploadResult
type UploadObjectsResult struct {
Objects []ObjectUploadResult
} }


type ObjectUploadResult struct { type ObjectUploadResult struct {
@@ -40,24 +42,25 @@ type ObjectUploadResult struct {


type UploadNodeInfo struct { type UploadNodeInfo struct {
Node cdssdk.Node Node cdssdk.Node
Delay time.Duration
IsSameLocation bool IsSameLocation bool
} }


type UpdatePackageContext struct {
Distlock *distlock.Service
type UploadObjectsContext struct {
Distlock *distlock.Service
Connectivity *connectivity.Collector
} }


func NewCreatePackage(userID cdssdk.UserID, bucketID cdssdk.BucketID, name string, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *CreatePackage {
return &CreatePackage{
func NewUploadObjects(userID cdssdk.UserID, packageID cdssdk.PackageID, objIter iterator.UploadingObjectIterator, nodeAffinity *cdssdk.NodeID) *UploadObjects {
return &UploadObjects{
userID: userID, userID: userID,
bucketID: bucketID,
name: name,
packageID: packageID,
objectIter: objIter, objectIter: objIter,
nodeAffinity: nodeAffinity, nodeAffinity: nodeAffinity,
} }
} }


func (t *CreatePackage) Execute(ctx *UpdatePackageContext) (*CreatePackageResult, error) {
func (t *UploadObjects) Execute(ctx *UploadObjectsContext) (*UploadObjectsResult, error) {
defer t.objectIter.Close() defer t.objectIter.Close()


coorCli, err := stgglb.CoordinatorMQPool.Acquire() coorCli, err := stgglb.CoordinatorMQPool.Acquire()
@@ -65,22 +68,29 @@ func (t *CreatePackage) Execute(ctx *UpdatePackageContext) (*CreatePackageResult
return nil, fmt.Errorf("new coordinator client: %w", err) return nil, fmt.Errorf("new coordinator client: %w", err)
} }


createPkgResp, err := coorCli.CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name))
if err != nil {
return nil, fmt.Errorf("creating package: %w", err)
}

getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID)) getUserNodesResp, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(t.userID))
if err != nil { if err != nil {
return nil, fmt.Errorf("getting user nodes: %w", err) return nil, fmt.Errorf("getting user nodes: %w", err)
} }


cons := ctx.Connectivity.GetAll()
userNodes := lo.Map(getUserNodesResp.Nodes, func(node cdssdk.Node, index int) UploadNodeInfo { userNodes := lo.Map(getUserNodesResp.Nodes, func(node cdssdk.Node, index int) UploadNodeInfo {
delay := time.Duration(math.MaxInt64)

con, ok := cons[node.NodeID]
if ok && con.Delay != nil {
delay = *con.Delay
}

return UploadNodeInfo{ return UploadNodeInfo{
Node: node, Node: node,
Delay: delay,
IsSameLocation: node.LocationID == stgglb.Local.LocationID, IsSameLocation: node.LocationID == stgglb.Local.LocationID,
} }
}) })
if len(userNodes) == 0 {
return nil, fmt.Errorf("user no available nodes")
}


// 给上传节点的IPFS加锁 // 给上传节点的IPFS加锁
ipfsReqBlder := reqbuilder.NewBuilder() ipfsReqBlder := reqbuilder.NewBuilder()
@@ -103,21 +113,20 @@ func (t *CreatePackage) Execute(ctx *UpdatePackageContext) (*CreatePackageResult
} }
defer ipfsMutex.Unlock() defer ipfsMutex.Unlock()


rets, err := uploadAndUpdatePackage(createPkgResp.PackageID, t.objectIter, userNodes, t.nodeAffinity)
rets, err := uploadAndUpdatePackage(t.packageID, t.objectIter, userNodes, t.nodeAffinity)
if err != nil { if err != nil {
return nil, err return nil, err
} }


return &CreatePackageResult{
PackageID: createPkgResp.PackageID,
ObjectResults: rets,
return &UploadObjectsResult{
Objects: rets,
}, nil }, nil
} }


// chooseUploadNode 选择一个上传文件的节点 // chooseUploadNode 选择一个上传文件的节点
// 1. 选择设置了亲和性的节点 // 1. 选择设置了亲和性的节点
// 2. 从与当前客户端相同地域的节点中随机选一个 // 2. 从与当前客户端相同地域的节点中随机选一个
// 3. 没有用的话从所有节点中随机选一个
// 3. 没有的话从所有节点选择延迟最低的节点
func chooseUploadNode(nodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) UploadNodeInfo { func chooseUploadNode(nodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) UploadNodeInfo {
if nodeAffinity != nil { if nodeAffinity != nil {
aff, ok := lo.Find(nodes, func(node UploadNodeInfo) bool { return node.Node.NodeID == *nodeAffinity }) aff, ok := lo.Find(nodes, func(node UploadNodeInfo) bool { return node.Node.NodeID == *nodeAffinity })
@@ -131,7 +140,10 @@ func chooseUploadNode(nodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) Uploa
return sameLocationNodes[rand.Intn(len(sameLocationNodes))] return sameLocationNodes[rand.Intn(len(sameLocationNodes))]
} }


return nodes[rand.Intn(len(nodes))]
// 选择延迟最低的节点
nodes = sort2.Sort(nodes, func(e1, e2 UploadNodeInfo) int { return sort2.Cmp(e1.Delay, e2.Delay) })

return nodes[0]
} }


func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, userNodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) ([]ObjectUploadResult, error) { func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.UploadingObjectIterator, userNodes []UploadNodeInfo, nodeAffinity *cdssdk.NodeID) ([]ObjectUploadResult, error) {
@@ -158,6 +170,7 @@ func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.Uplo
err = func() error { err = func() error {
defer objInfo.File.Close() defer objInfo.File.Close()


uploadTime := time.Now()
fileHash, err := uploadFile(objInfo.File, uploadNode) fileHash, err := uploadFile(objInfo.File, uploadNode)
if err != nil { if err != nil {
return fmt.Errorf("uploading file: %w", err) return fmt.Errorf("uploading file: %w", err)
@@ -168,7 +181,7 @@ func uploadAndUpdatePackage(packageID cdssdk.PackageID, objectIter iterator.Uplo
Error: err, Error: err,
}) })


adds = append(adds, coormq.NewAddObjectEntry(objInfo.Path, objInfo.Size, fileHash, uploadNode.Node.NodeID))
adds = append(adds, coormq.NewAddObjectEntry(objInfo.Path, objInfo.Size, fileHash, uploadTime, uploadNode.Node.NodeID))
return nil return nil
}() }()
if err != nil { if err != nil {

+ 219
- 0
common/pkgs/connectivity/collector.go View File

@@ -0,0 +1,219 @@
package connectivity

import (
"math/rand"
"sync"
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
)

type Connectivity struct {
ToNodeID cdssdk.NodeID
Delay *time.Duration
TestTime time.Time
}

type Collector struct {
cfg *Config
onCollected func(collector *Collector)
collectNow chan any
close chan any
connectivities map[cdssdk.NodeID]Connectivity
lock *sync.RWMutex
}

func NewCollector(cfg *Config, onCollected func(collector *Collector)) Collector {
rpt := Collector{
cfg: cfg,
collectNow: make(chan any),
close: make(chan any),
connectivities: make(map[cdssdk.NodeID]Connectivity),
lock: &sync.RWMutex{},
onCollected: onCollected,
}
go rpt.serve()
return rpt
}

func (r *Collector) Get(nodeID cdssdk.NodeID) *Connectivity {
r.lock.RLock()
defer r.lock.RUnlock()

con, ok := r.connectivities[nodeID]
if ok {
return &con
}

return nil
}
func (r *Collector) GetAll() map[cdssdk.NodeID]Connectivity {
r.lock.RLock()
defer r.lock.RUnlock()

ret := make(map[cdssdk.NodeID]Connectivity)
for k, v := range r.connectivities {
ret[k] = v
}

return ret
}

// 启动一次收集
func (r *Collector) CollecNow() {
select {
case r.collectNow <- nil:
default:
}
}

// 就地进行收集,会阻塞当前线程
func (r *Collector) CollectInPlace() {
r.testing()
}

func (r *Collector) Close() {
select {
case r.close <- nil:
default:
}
}

func (r *Collector) serve() {
log := logger.WithType[Collector]("")
log.Info("start connectivity reporter")

// 为了防止同时启动的节点会集中进行Ping,所以第一次上报间隔为0-TestInterval秒之间随机
startup := true
firstReportDelay := time.Duration(float64(r.cfg.TestInterval) * float64(time.Second) * rand.Float64())
ticker := time.NewTicker(firstReportDelay)

loop:
for {
select {
case <-ticker.C:
r.testing()
if startup {
startup = false
ticker.Reset(time.Duration(r.cfg.TestInterval) * time.Second)
}

case <-r.collectNow:
r.testing()

case <-r.close:
ticker.Stop()
break loop
}
}

log.Info("stop connectivity reporter")
}

func (r *Collector) testing() {
log := logger.WithType[Collector]("")
log.Debug("do testing")

coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil {
return
}
defer stgglb.CoordinatorMQPool.Release(coorCli)

getNodeResp, err := coorCli.GetNodes(coormq.NewGetNodes(nil))
if err != nil {
return
}

wg := sync.WaitGroup{}
cons := make([]Connectivity, len(getNodeResp.Nodes))
for i, node := range getNodeResp.Nodes {
tmpIdx := i
tmpNode := node

wg.Add(1)
go func() {
defer wg.Done()
cons[tmpIdx] = r.ping(tmpNode)
}()
}

wg.Wait()

r.lock.Lock()
// 删除所有node的记录,然后重建,避免node数量变化时导致残余数据
r.connectivities = make(map[cdssdk.NodeID]Connectivity)
for _, con := range cons {
r.connectivities[con.ToNodeID] = con
}
r.lock.Unlock()

if r.onCollected != nil {
r.onCollected(r)
}
}

func (r *Collector) ping(node cdssdk.Node) Connectivity {
log := logger.WithType[Collector]("").WithField("NodeID", node.NodeID)

ip := node.ExternalIP
port := node.ExternalGRPCPort
if node.LocationID == stgglb.Local.LocationID {
ip = node.LocalIP
port = node.LocalGRPCPort
}

agtCli, err := stgglb.AgentRPCPool.Acquire(ip, port)
if err != nil {
log.Warnf("new agent %v:%v rpc client: %w", ip, port, err)
return Connectivity{
ToNodeID: node.NodeID,
Delay: nil,
TestTime: time.Now(),
}
}
defer stgglb.AgentRPCPool.Release(agtCli)

// 第一次ping保证网络连接建立成功
err = agtCli.Ping()
if err != nil {
log.Warnf("pre ping: %v", err)
return Connectivity{
ToNodeID: node.NodeID,
Delay: nil,
TestTime: time.Now(),
}
}

// 后几次ping计算延迟
var avgDelay time.Duration
for i := 0; i < 3; i++ {
start := time.Now()
err = agtCli.Ping()
if err != nil {
log.Warnf("ping: %v", err)
return Connectivity{
ToNodeID: node.NodeID,
Delay: nil,
TestTime: time.Now(),
}
}

// 此时间差为一个来回的时间,因此单程延迟需要除以2
delay := time.Since(start) / 2
avgDelay += delay

// 每次ping之间间隔1秒
<-time.After(time.Second)
}
delay := avgDelay / 3

return Connectivity{
ToNodeID: node.NodeID,
Delay: &delay,
TestTime: time.Now(),
}
}

+ 5
- 0
common/pkgs/connectivity/config.go View File

@@ -0,0 +1,5 @@
package connectivity

type Config struct {
TestInterval int `json:"testInterval"` // 进行测试的间隔
}

+ 11
- 0
common/pkgs/db/cache.go View File

@@ -46,6 +46,9 @@ func (*CacheDB) Create(ctx SQLContext, fileHash string, nodeID cdssdk.NodeID, pr


// 批量创建缓存记录 // 批量创建缓存记录
func (*CacheDB) BatchCreate(ctx SQLContext, caches []model.Cache) error { func (*CacheDB) BatchCreate(ctx SQLContext, caches []model.Cache) error {
if len(caches) == 0 {
return nil
}
return BatchNamedExec( return BatchNamedExec(
ctx, ctx,
"insert into Cache(FileHash,NodeID,CreateTime,Priority) values(:FileHash,:NodeID,:CreateTime,:Priority)"+ "insert into Cache(FileHash,NodeID,CreateTime,Priority) values(:FileHash,:NodeID,:CreateTime,:Priority)"+
@@ -57,6 +60,10 @@ func (*CacheDB) BatchCreate(ctx SQLContext, caches []model.Cache) error {
} }


func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeID cdssdk.NodeID, priority int) error { func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeID cdssdk.NodeID, priority int) error {
if len(fileHashes) == 0 {
return nil
}

var caches []model.Cache var caches []model.Cache
var nowTime = time.Now() var nowTime = time.Now()
for _, hash := range fileHashes { for _, hash := range fileHashes {
@@ -78,6 +85,10 @@ func (*CacheDB) BatchCreateOnSameNode(ctx SQLContext, fileHashes []string, nodeI
} }


func (*CacheDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error { func (*CacheDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error {
if len(fileHashes) == 0 {
return nil
}

// TODO in语句有长度限制 // TODO in语句有长度限制
query, args, err := sqlx.In("delete from Cache where NodeID = ? and FileHash in (?)", nodeID, fileHashes) query, args, err := sqlx.In("delete from Cache where NodeID = ? and FileHash in (?)", nodeID, fileHashes)
if err != nil { if err != nil {


+ 3
- 11
common/pkgs/db/model/model.go View File

@@ -20,12 +20,6 @@ type Storage struct {
State string `db:"State" json:"state"` State string `db:"State" json:"state"`
} }


type NodeDelay struct {
SourceNodeID int64 `db:"SourceNodeID"`
DestinationNodeID int64 `db:"DestinationNodeID"`
DelayInMs int `db:"DelayInMs"`
}

type User struct { type User struct {
UserID cdssdk.UserID `db:"UserID" json:"userID"` UserID cdssdk.UserID `db:"UserID" json:"userID"`
Password string `db:"PassWord" json:"password"` Password string `db:"PassWord" json:"password"`
@@ -46,16 +40,14 @@ type UserStorage struct {
StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"` StorageID cdssdk.StorageID `db:"StorageID" json:"storageID"`
} }


type Bucket struct {
BucketID cdssdk.BucketID `db:"BucketID" json:"bucketID"`
Name string `db:"Name" json:"name"`
CreatorID cdssdk.UserID `db:"CreatorID" json:"creatorID"`
}
type Bucket = cdssdk.Bucket


type Package = cdssdk.Package type Package = cdssdk.Package


type Object = cdssdk.Object type Object = cdssdk.Object


type NodeConnectivity = cdssdk.NodeConnectivity

// 由于Object的Redundancy字段是interface,所以不能直接将查询结果scan成Object,必须先scan成TempObject, // 由于Object的Redundancy字段是interface,所以不能直接将查询结果scan成Object,必须先scan成TempObject,
// 再.ToObject()转成Object // 再.ToObject()转成Object
type TempObject struct { type TempObject struct {


+ 40
- 0
common/pkgs/db/node_connectivity.go View File

@@ -0,0 +1,40 @@
package db

import (
"github.com/jmoiron/sqlx"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/db/model"
)

type NodeConnectivityDB struct {
*DB
}

func (db *DB) NodeConnectivity() *NodeConnectivityDB {
return &NodeConnectivityDB{DB: db}
}

func (db *NodeConnectivityDB) BatchGetByFromNode(ctx SQLContext, nodeIDs []cdssdk.NodeID) ([]model.NodeConnectivity, error) {
if len(nodeIDs) == 0 {
return nil, nil
}

var ret []model.NodeConnectivity

sql, args, err := sqlx.In("select * from NodeConnectivity where NodeID in (?)", nodeIDs)
if err != nil {
return nil, err
}

return ret, sqlx.Select(ctx, &ret, sql, args...)
}

func (db *NodeConnectivityDB) BatchUpdateOrCreate(ctx SQLContext, cons []model.NodeConnectivity) error {
if len(cons) == 0 {
return nil
}

return BatchNamedExec(ctx,
"insert into NodeConnectivity(FromNodeID, ToNodeID, Delay, TestTime) values(:FromNodeID, :ToNodeID, :Delay, :TestTime) as new"+
" on duplicate key update Delay = new.Delay, TestTime = new.TestTime", 4, cons, nil)
}

+ 37
- 60
common/pkgs/db/object.go View File

@@ -27,6 +27,10 @@ func (db *ObjectDB) GetByID(ctx SQLContext, objectID cdssdk.ObjectID) (model.Obj
} }


func (db *ObjectDB) BatchGetPackageObjectIDs(ctx SQLContext, pkgID cdssdk.PackageID, pathes []string) ([]cdssdk.ObjectID, error) { func (db *ObjectDB) BatchGetPackageObjectIDs(ctx SQLContext, pkgID cdssdk.PackageID, pathes []string) ([]cdssdk.ObjectID, error) {
if len(pathes) == 0 {
return nil, nil
}

// TODO In语句 // TODO In语句
stmt, args, err := sqlx.In("select ObjectID from Object force index(PackagePath) where PackageID=? and Path in (?)", pkgID, pathes) stmt, args, err := sqlx.In("select ObjectID from Object force index(PackagePath) where PackageID=? and Path in (?)", pkgID, pathes)
if err != nil { if err != nil {
@@ -43,10 +47,10 @@ func (db *ObjectDB) BatchGetPackageObjectIDs(ctx SQLContext, pkgID cdssdk.Packag
return objIDs, nil return objIDs, nil
} }


func (db *ObjectDB) Create(ctx SQLContext, packageID cdssdk.PackageID, path string, size int64, fileHash string, redundancy cdssdk.Redundancy) (int64, error) {
sql := "insert into Object(PackageID, Path, Size, FileHash, Redundancy) values(?,?,?,?,?)"
func (db *ObjectDB) Create(ctx SQLContext, obj cdssdk.Object) (cdssdk.ObjectID, error) {
sql := "insert into Object(PackageID, Path, Size, FileHash, Redundancy, CreateTime, UpdateTime) values(?,?,?,?,?,?,?)"


ret, err := ctx.Exec(sql, packageID, path, size, redundancy)
ret, err := ctx.Exec(sql, obj.PackageID, obj.Path, obj.Size, obj.FileHash, obj.Redundancy, obj.UpdateTime, obj.UpdateTime)
if err != nil { if err != nil {
return 0, fmt.Errorf("insert object failed, err: %w", err) return 0, fmt.Errorf("insert object failed, err: %w", err)
} }
@@ -56,64 +60,22 @@ func (db *ObjectDB) Create(ctx SQLContext, packageID cdssdk.PackageID, path stri
return 0, fmt.Errorf("get id of inserted object failed, err: %w", err) return 0, fmt.Errorf("get id of inserted object failed, err: %w", err)
} }


return objectID, nil
return cdssdk.ObjectID(objectID), nil
} }


// 创建或者更新记录,返回值true代表是创建,false代表是更新
func (db *ObjectDB) CreateOrUpdate(ctx SQLContext, packageID cdssdk.PackageID, path string, size int64, fileHash string) (cdssdk.ObjectID, bool, error) {
// 首次上传Object时,默认不启用冗余,即使是在更新一个已有的Object也是如此
defRed := cdssdk.NewNoneRedundancy()

sql := "insert into Object(PackageID, Path, Size, FileHash, Redundancy) values(?,?,?,?,?) on duplicate key update Size = ?, FileHash = ?, Redundancy = ?"

ret, err := ctx.Exec(sql, packageID, path, size, fileHash, defRed, size, fileHash, defRed)
if err != nil {
return 0, false, fmt.Errorf("insert object failed, err: %w", err)
}

affs, err := ret.RowsAffected()
if err != nil {
return 0, false, fmt.Errorf("getting affected rows: %w", err)
}

// 影响行数为1时是插入,为2时是更新
if affs == 1 {
objectID, err := ret.LastInsertId()
if err != nil {
return 0, false, fmt.Errorf("get id of inserted object failed, err: %w", err)
}
return cdssdk.ObjectID(objectID), true, nil
}

var objID cdssdk.ObjectID
if err = sqlx.Get(ctx, &objID, "select ObjectID from Object where PackageID = ? and Path = ?", packageID, path); err != nil {
return 0, false, fmt.Errorf("getting object id: %w", err)
}

return objID, false, nil
}

// 批量创建或者更新记录
// 可以用于批量创建或者更新记录
// 用于创建时,需要额外检查PackageID+Path的唯一性
// 用于更新时,需要额外检查现存的PackageID+Path对应的ObjectID是否与待更新的ObjectID相同。不会更新CreateTime。
func (db *ObjectDB) BatchCreateOrUpdate(ctx SQLContext, objs []cdssdk.Object) error { func (db *ObjectDB) BatchCreateOrUpdate(ctx SQLContext, objs []cdssdk.Object) error {
sql := "insert into Object(PackageID, Path, Size, FileHash, Redundancy)" +
" values(:PackageID,:Path,:Size,:FileHash,:Redundancy)" +
" on duplicate key update Size = values(Size), FileHash = values(FileHash), Redundancy = values(Redundancy)"

return BatchNamedExec(ctx, sql, 5, objs, nil)
}

func (*ObjectDB) UpdateFileInfo(ctx SQLContext, objectID cdssdk.ObjectID, fileSize int64) (bool, error) {
ret, err := ctx.Exec("update Object set FileSize = ? where ObjectID = ?", fileSize, objectID)
if err != nil {
return false, err
if len(objs) == 0 {
return nil
} }


cnt, err := ret.RowsAffected()
if err != nil {
return false, fmt.Errorf("get affected rows failed, err: %w", err)
}
sql := "insert into Object(PackageID, Path, Size, FileHash, Redundancy, CreateTime ,UpdateTime)" +
" values(:PackageID,:Path,:Size,:FileHash,:Redundancy, :CreateTime, :UpdateTime) as new" +
" on duplicate key update Size = new.Size, FileHash = new.FileHash, Redundancy = new.Redundancy, UpdateTime = new.UpdateTime"


return cnt > 0, nil
return BatchNamedExec(ctx, sql, 7, objs, nil)
} }


func (*ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Object, error) { func (*ObjectDB) GetPackageObjects(ctx SQLContext, packageID cdssdk.PackageID) ([]model.Object, error) {
@@ -175,6 +137,10 @@ func (db *ObjectDB) GetPackageObjectDetails(ctx SQLContext, packageID cdssdk.Pac
} }


func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []coormq.AddObjectEntry) ([]cdssdk.ObjectID, error) { func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []coormq.AddObjectEntry) ([]cdssdk.ObjectID, error) {
if len(adds) == 0 {
return nil, nil
}

objs := make([]cdssdk.Object, 0, len(adds)) objs := make([]cdssdk.Object, 0, len(adds))
for _, add := range adds { for _, add := range adds {
objs = append(objs, cdssdk.Object{ objs = append(objs, cdssdk.Object{
@@ -183,6 +149,8 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []
Size: add.Size, Size: add.Size,
FileHash: add.FileHash, FileHash: add.FileHash,
Redundancy: cdssdk.NewNoneRedundancy(), // 首次上传默认使用不分块的none模式 Redundancy: cdssdk.NewNoneRedundancy(), // 首次上传默认使用不分块的none模式
CreateTime: add.UploadTime,
UpdateTime: add.UploadTime,
}) })
} }


@@ -219,7 +187,6 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []
FileHash: add.FileHash, FileHash: add.FileHash,
}) })
} }

err = db.ObjectBlock().BatchCreate(ctx, objBlocks) err = db.ObjectBlock().BatchCreate(ctx, objBlocks)
if err != nil { if err != nil {
return nil, fmt.Errorf("batch create object blocks: %w", err) return nil, fmt.Errorf("batch create object blocks: %w", err)
@@ -234,7 +201,6 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []
Priority: 0, Priority: 0,
}) })
} }

err = db.Cache().BatchCreate(ctx, caches) err = db.Cache().BatchCreate(ctx, caches)
if err != nil { if err != nil {
return nil, fmt.Errorf("batch create caches: %w", err) return nil, fmt.Errorf("batch create caches: %w", err)
@@ -244,6 +210,11 @@ func (db *ObjectDB) BatchAdd(ctx SQLContext, packageID cdssdk.PackageID, adds []
} }


func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.ChangeObjectRedundancyEntry) error { func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.ChangeObjectRedundancyEntry) error {
if len(objs) == 0 {
return nil
}

nowTime := time.Now()
objIDs := make([]cdssdk.ObjectID, 0, len(objs)) objIDs := make([]cdssdk.ObjectID, 0, len(objs))
dummyObjs := make([]cdssdk.Object, 0, len(objs)) dummyObjs := make([]cdssdk.Object, 0, len(objs))
for _, obj := range objs { for _, obj := range objs {
@@ -251,14 +222,16 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.ChangeOb
dummyObjs = append(dummyObjs, cdssdk.Object{ dummyObjs = append(dummyObjs, cdssdk.Object{
ObjectID: obj.ObjectID, ObjectID: obj.ObjectID,
Redundancy: obj.Redundancy, Redundancy: obj.Redundancy,
CreateTime: nowTime,
UpdateTime: nowTime,
}) })
} }


// 目前只能使用这种方式来同时更新大量数据 // 目前只能使用这种方式来同时更新大量数据
err := BatchNamedExec(ctx, err := BatchNamedExec(ctx,
"insert into Object(ObjectID, PackageID, Path, Size, FileHash, Redundancy)"+
" values(:ObjectID, :PackageID, :Path, :Size, :FileHash, :Redundancy) as new"+
" on duplicate key update Redundancy=new.Redundancy", 6, dummyObjs, nil)
"insert into Object(ObjectID, PackageID, Path, Size, FileHash, Redundancy, CreateTime, UpdateTime)"+
" values(:ObjectID, :PackageID, :Path, :Size, :FileHash, :Redundancy, :CreateTime, :UpdateTime) as new"+
" on duplicate key update Redundancy=new.Redundancy", 8, dummyObjs, nil)
if err != nil { if err != nil {
return fmt.Errorf("batch update object redundancy: %w", err) return fmt.Errorf("batch update object redundancy: %w", err)
} }
@@ -319,6 +292,10 @@ func (db *ObjectDB) BatchUpdateRedundancy(ctx SQLContext, objs []coormq.ChangeOb
} }


func (*ObjectDB) BatchDelete(ctx SQLContext, ids []cdssdk.ObjectID) error { func (*ObjectDB) BatchDelete(ctx SQLContext, ids []cdssdk.ObjectID) error {
if len(ids) == 0 {
return nil
}

query, args, err := sqlx.In("delete from Object where ObjectID in (?)", ids) query, args, err := sqlx.In("delete from Object where ObjectID in (?)", ids)
if err != nil { if err != nil {
return err return err


+ 12
- 0
common/pkgs/db/object_block.go View File

@@ -30,6 +30,10 @@ func (db *ObjectBlockDB) Create(ctx SQLContext, objectID cdssdk.ObjectID, index
} }


func (db *ObjectBlockDB) BatchCreate(ctx SQLContext, blocks []stgmod.ObjectBlock) error { func (db *ObjectBlockDB) BatchCreate(ctx SQLContext, blocks []stgmod.ObjectBlock) error {
if len(blocks) == 0 {
return nil
}

return BatchNamedExec(ctx, return BatchNamedExec(ctx,
"insert ignore into ObjectBlock(ObjectID, `Index`, NodeID, FileHash) values(:ObjectID, :Index, :NodeID, :FileHash)", "insert ignore into ObjectBlock(ObjectID, `Index`, NodeID, FileHash) values(:ObjectID, :Index, :NodeID, :FileHash)",
4, 4,
@@ -44,6 +48,10 @@ func (db *ObjectBlockDB) DeleteByObjectID(ctx SQLContext, objectID cdssdk.Object
} }


func (db *ObjectBlockDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []cdssdk.ObjectID) error { func (db *ObjectBlockDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []cdssdk.ObjectID) error {
if len(objectIDs) == 0 {
return nil
}

// TODO in语句有长度限制 // TODO in语句有长度限制
query, args, err := sqlx.In("delete from ObjectBlock where ObjectID in (?)", objectIDs) query, args, err := sqlx.In("delete from ObjectBlock where ObjectID in (?)", objectIDs)
if err != nil { if err != nil {
@@ -59,6 +67,10 @@ func (db *ObjectBlockDB) DeleteInPackage(ctx SQLContext, packageID cdssdk.Packag
} }


func (db *ObjectBlockDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error { func (db *ObjectBlockDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, fileHashes []string) error {
if len(fileHashes) == 0 {
return nil
}

query, args, err := sqlx.In("delete from ObjectBlock where NodeID = ? and FileHash in (?)", nodeID, fileHashes) query, args, err := sqlx.In("delete from ObjectBlock where NodeID = ? and FileHash in (?)", nodeID, fileHashes)
if err != nil { if err != nil {
return err return err


+ 17
- 1
common/pkgs/db/pinned_object.go View File

@@ -40,6 +40,10 @@ func (*PinnedObjectDB) TryCreate(ctx SQLContext, nodeID cdssdk.NodeID, objectID
} }


func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObject) error { func (*PinnedObjectDB) BatchTryCreate(ctx SQLContext, pinneds []cdssdk.PinnedObject) error {
if len(pinneds) == 0 {
return nil
}

return BatchNamedExec(ctx, "insert ignore into PinnedObject values(:NodeID,:ObjectID,:CreateTime)", 3, pinneds, nil) return BatchNamedExec(ctx, "insert ignore into PinnedObject values(:NodeID,:ObjectID,:CreateTime)", 3, pinneds, nil)
} }


@@ -54,6 +58,10 @@ func (*PinnedObjectDB) CreateFromPackage(ctx SQLContext, packageID cdssdk.Packag
} }


func (db *PinnedObjectDB) ObjectBatchCreate(ctx SQLContext, objectID cdssdk.ObjectID, nodeIDs []cdssdk.NodeID) error { func (db *PinnedObjectDB) ObjectBatchCreate(ctx SQLContext, objectID cdssdk.ObjectID, nodeIDs []cdssdk.NodeID) error {
if len(nodeIDs) == 0 {
return nil
}

for _, id := range nodeIDs { for _, id := range nodeIDs {
err := db.TryCreate(ctx, id, objectID, time.Now()) err := db.TryCreate(ctx, id, objectID, time.Now())
if err != nil { if err != nil {
@@ -74,6 +82,10 @@ func (*PinnedObjectDB) DeleteByObjectID(ctx SQLContext, objectID cdssdk.ObjectID
} }


func (*PinnedObjectDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []cdssdk.ObjectID) error { func (*PinnedObjectDB) BatchDeleteByObjectID(ctx SQLContext, objectIDs []cdssdk.ObjectID) error {
if len(objectIDs) == 0 {
return nil
}

// TODO in语句有长度限制 // TODO in语句有长度限制
query, args, err := sqlx.In("delete from PinnedObject where ObjectID in (?)", objectIDs) query, args, err := sqlx.In("delete from PinnedObject where ObjectID in (?)", objectIDs)
if err != nil { if err != nil {
@@ -94,7 +106,11 @@ func (*PinnedObjectDB) DeleteInPackageAtNode(ctx SQLContext, packageID cdssdk.Pa
} }


func (*PinnedObjectDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, objectIDs []cdssdk.ObjectID) error { func (*PinnedObjectDB) NodeBatchDelete(ctx SQLContext, nodeID cdssdk.NodeID, objectIDs []cdssdk.ObjectID) error {
query, args, err := sqlx.In("delete from PinnedObject where NodeID = ? and ObjectID in (?)", objectIDs)
if len(objectIDs) == 0 {
return nil
}

query, args, err := sqlx.In("delete from PinnedObject where NodeID = ? and ObjectID in (?)", nodeID, objectIDs)
if err != nil { if err != nil {
return err return err
} }


+ 2
- 2
common/pkgs/db/utils.go View File

@@ -31,7 +31,7 @@ func BatchNamedExec[T any](ctx SQLContext, sql string, argCnt int, arr []T, call


ret, err := ctx.NamedExec(sql, arr[:curBatchSize]) ret, err := ctx.NamedExec(sql, arr[:curBatchSize])
if err != nil { if err != nil {
return nil
return err
} }
if callback != nil && !callback(ret) { if callback != nil && !callback(ret) {
return nil return nil
@@ -63,7 +63,7 @@ func BatchNamedQuery[T any](ctx SQLContext, sql string, argCnt int, arr []T, cal


ret, err := ctx.NamedQuery(sql, arr[:curBatchSize]) ret, err := ctx.NamedQuery(sql, arr[:curBatchSize])
if err != nil { if err != nil {
return nil
return err
} }
if callback != nil && !callback(ret) { if callback != nil && !callback(ret) {
return nil return nil


+ 136
- 28
common/pkgs/grpc/agent/agent.pb.go View File

@@ -386,6 +386,82 @@ func (x *FetchStreamReq) GetStreamID() string {
return "" return ""
} }


type PingReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *PingReq) Reset() {
*x = PingReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *PingReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*PingReq) ProtoMessage() {}

func (x *PingReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use PingReq.ProtoReflect.Descriptor instead.
func (*PingReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{6}
}

type PingResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *PingResp) Reset() {
*x = PingResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *PingResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*PingResp) ProtoMessage() {}

func (x *PingResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use PingResp.ProtoReflect.Descriptor instead.
func (*PingResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{7}
}

var File_pkgs_grpc_agent_agent_proto protoreflect.FileDescriptor var File_pkgs_grpc_agent_agent_proto protoreflect.FileDescriptor


var file_pkgs_grpc_agent_agent_proto_rawDesc = []byte{ var file_pkgs_grpc_agent_agent_proto_rawDesc = []byte{
@@ -415,26 +491,30 @@ var file_pkgs_grpc_agent_agent_proto_rawDesc = []byte{
0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e,
0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44,
0x12, 0x1a, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x2a, 0x37, 0x0a, 0x14,
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x4f, 0x46, 0x10, 0x00, 0x12, 0x08, 0x0a,
0x04, 0x44, 0x61, 0x74, 0x61, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x64, 0x41,
0x72, 0x67, 0x73, 0x10, 0x02, 0x32, 0xe1, 0x01, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12,
0x36, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12,
0x0f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
0x1a, 0x11, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52,
0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x33, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x50,
0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53,
0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61,
0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x0a,
0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x11, 0x2e, 0x53, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x0f, 0x2e,
0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00,
0x28, 0x01, 0x12, 0x35, 0x0a, 0x0b, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x12, 0x0f, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52,
0x65, 0x71, 0x1a, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50,
0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x28, 0x09, 0x52, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x22, 0x09, 0x0a, 0x07,
0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x22, 0x0a, 0x0a, 0x08, 0x50, 0x69, 0x6e, 0x67, 0x52,
0x65, 0x73, 0x70, 0x2a, 0x37, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74,
0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x45,
0x4f, 0x46, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x10, 0x01, 0x12, 0x0c,
0x0a, 0x08, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x72, 0x67, 0x73, 0x10, 0x02, 0x32, 0x80, 0x02, 0x0a,
0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50,
0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74,
0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x11, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50,
0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x33,
0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e,
0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0f,
0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22,
0x00, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x12, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61,
0x63, 0x6b, 0x65, 0x74, 0x1a, 0x0f, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x35, 0x0a, 0x0b, 0x46, 0x65, 0x74,
0x63, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0f, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68,
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01,
0x12, 0x1d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x08, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52,
0x65, 0x71, 0x1a, 0x09, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x42,
0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
} }


var ( var (
@@ -450,7 +530,7 @@ func file_pkgs_grpc_agent_agent_proto_rawDescGZIP() []byte {
} }


var file_pkgs_grpc_agent_agent_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_pkgs_grpc_agent_agent_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_pkgs_grpc_agent_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_pkgs_grpc_agent_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_pkgs_grpc_agent_agent_proto_goTypes = []interface{}{ var file_pkgs_grpc_agent_agent_proto_goTypes = []interface{}{
(StreamDataPacketType)(0), // 0: StreamDataPacketType (StreamDataPacketType)(0), // 0: StreamDataPacketType
(*FileDataPacket)(nil), // 1: FileDataPacket (*FileDataPacket)(nil), // 1: FileDataPacket
@@ -459,6 +539,8 @@ var file_pkgs_grpc_agent_agent_proto_goTypes = []interface{}{
(*StreamDataPacket)(nil), // 4: StreamDataPacket (*StreamDataPacket)(nil), // 4: StreamDataPacket
(*SendStreamResp)(nil), // 5: SendStreamResp (*SendStreamResp)(nil), // 5: SendStreamResp
(*FetchStreamReq)(nil), // 6: FetchStreamReq (*FetchStreamReq)(nil), // 6: FetchStreamReq
(*PingReq)(nil), // 7: PingReq
(*PingResp)(nil), // 8: PingResp
} }
var file_pkgs_grpc_agent_agent_proto_depIdxs = []int32{ var file_pkgs_grpc_agent_agent_proto_depIdxs = []int32{
0, // 0: FileDataPacket.Type:type_name -> StreamDataPacketType 0, // 0: FileDataPacket.Type:type_name -> StreamDataPacketType
@@ -467,12 +549,14 @@ var file_pkgs_grpc_agent_agent_proto_depIdxs = []int32{
3, // 3: Agent.GetIPFSFile:input_type -> GetIPFSFileReq 3, // 3: Agent.GetIPFSFile:input_type -> GetIPFSFileReq
4, // 4: Agent.SendStream:input_type -> StreamDataPacket 4, // 4: Agent.SendStream:input_type -> StreamDataPacket
6, // 5: Agent.FetchStream:input_type -> FetchStreamReq 6, // 5: Agent.FetchStream:input_type -> FetchStreamReq
2, // 6: Agent.SendIPFSFile:output_type -> SendIPFSFileResp
1, // 7: Agent.GetIPFSFile:output_type -> FileDataPacket
5, // 8: Agent.SendStream:output_type -> SendStreamResp
4, // 9: Agent.FetchStream:output_type -> StreamDataPacket
6, // [6:10] is the sub-list for method output_type
2, // [2:6] is the sub-list for method input_type
7, // 6: Agent.Ping:input_type -> PingReq
2, // 7: Agent.SendIPFSFile:output_type -> SendIPFSFileResp
1, // 8: Agent.GetIPFSFile:output_type -> FileDataPacket
5, // 9: Agent.SendStream:output_type -> SendStreamResp
4, // 10: Agent.FetchStream:output_type -> StreamDataPacket
8, // 11: Agent.Ping:output_type -> PingResp
7, // [7:12] is the sub-list for method output_type
2, // [2:7] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name 2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee 2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name 0, // [0:2] is the sub-list for field type_name
@@ -556,6 +640,30 @@ func file_pkgs_grpc_agent_agent_proto_init() {
return nil return nil
} }
} }
file_pkgs_grpc_agent_agent_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
} }
type x struct{} type x struct{}
out := protoimpl.TypeBuilder{ out := protoimpl.TypeBuilder{
@@ -563,7 +671,7 @@ func file_pkgs_grpc_agent_agent_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(), GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkgs_grpc_agent_agent_proto_rawDesc, RawDescriptor: file_pkgs_grpc_agent_agent_proto_rawDesc,
NumEnums: 1, NumEnums: 1,
NumMessages: 6,
NumMessages: 8,
NumExtensions: 0, NumExtensions: 0,
NumServices: 1, NumServices: 1,
}, },


+ 5
- 0
common/pkgs/grpc/agent/agent.proto View File

@@ -40,11 +40,16 @@ message FetchStreamReq {
string StreamID = 2; string StreamID = 2;
} }


message PingReq {}
message PingResp {}

service Agent { service Agent {
rpc SendIPFSFile(stream FileDataPacket)returns(SendIPFSFileResp){} rpc SendIPFSFile(stream FileDataPacket)returns(SendIPFSFileResp){}
rpc GetIPFSFile(GetIPFSFileReq)returns(stream FileDataPacket){} rpc GetIPFSFile(GetIPFSFileReq)returns(stream FileDataPacket){}


rpc SendStream(stream StreamDataPacket)returns(SendStreamResp){} rpc SendStream(stream StreamDataPacket)returns(SendStreamResp){}
rpc FetchStream(FetchStreamReq)returns(stream StreamDataPacket){} rpc FetchStream(FetchStreamReq)returns(stream StreamDataPacket){}

rpc Ping(PingReq) returns(PingResp){}
} }



+ 39
- 1
common/pkgs/grpc/agent/agent_grpc.pb.go View File

@@ -25,6 +25,7 @@ const (
Agent_GetIPFSFile_FullMethodName = "/Agent/GetIPFSFile" Agent_GetIPFSFile_FullMethodName = "/Agent/GetIPFSFile"
Agent_SendStream_FullMethodName = "/Agent/SendStream" Agent_SendStream_FullMethodName = "/Agent/SendStream"
Agent_FetchStream_FullMethodName = "/Agent/FetchStream" Agent_FetchStream_FullMethodName = "/Agent/FetchStream"
Agent_Ping_FullMethodName = "/Agent/Ping"
) )


// AgentClient is the client API for Agent service. // AgentClient is the client API for Agent service.
@@ -35,6 +36,7 @@ type AgentClient interface {
GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error) GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error)
SendStream(ctx context.Context, opts ...grpc.CallOption) (Agent_SendStreamClient, error) SendStream(ctx context.Context, opts ...grpc.CallOption) (Agent_SendStreamClient, error)
FetchStream(ctx context.Context, in *FetchStreamReq, opts ...grpc.CallOption) (Agent_FetchStreamClient, error) FetchStream(ctx context.Context, in *FetchStreamReq, opts ...grpc.CallOption) (Agent_FetchStreamClient, error)
Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error)
} }


type agentClient struct { type agentClient struct {
@@ -177,6 +179,15 @@ func (x *agentFetchStreamClient) Recv() (*StreamDataPacket, error) {
return m, nil return m, nil
} }


func (c *agentClient) Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error) {
out := new(PingResp)
err := c.cc.Invoke(ctx, Agent_Ping_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}

// AgentServer is the server API for Agent service. // AgentServer is the server API for Agent service.
// All implementations must embed UnimplementedAgentServer // All implementations must embed UnimplementedAgentServer
// for forward compatibility // for forward compatibility
@@ -185,6 +196,7 @@ type AgentServer interface {
GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error
SendStream(Agent_SendStreamServer) error SendStream(Agent_SendStreamServer) error
FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error
Ping(context.Context, *PingReq) (*PingResp, error)
mustEmbedUnimplementedAgentServer() mustEmbedUnimplementedAgentServer()
} }


@@ -204,6 +216,9 @@ func (UnimplementedAgentServer) SendStream(Agent_SendStreamServer) error {
func (UnimplementedAgentServer) FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error { func (UnimplementedAgentServer) FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error {
return status.Errorf(codes.Unimplemented, "method FetchStream not implemented") return status.Errorf(codes.Unimplemented, "method FetchStream not implemented")
} }
func (UnimplementedAgentServer) Ping(context.Context, *PingReq) (*PingResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented")
}
func (UnimplementedAgentServer) mustEmbedUnimplementedAgentServer() {} func (UnimplementedAgentServer) mustEmbedUnimplementedAgentServer() {}


// UnsafeAgentServer may be embedded to opt out of forward compatibility for this service. // UnsafeAgentServer may be embedded to opt out of forward compatibility for this service.
@@ -311,13 +326,36 @@ func (x *agentFetchStreamServer) Send(m *StreamDataPacket) error {
return x.ServerStream.SendMsg(m) return x.ServerStream.SendMsg(m)
} }


func _Agent_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PingReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).Ping(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Agent_Ping_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).Ping(ctx, req.(*PingReq))
}
return interceptor(ctx, in, info, handler)
}

// Agent_ServiceDesc is the grpc.ServiceDesc for Agent service. // Agent_ServiceDesc is the grpc.ServiceDesc for Agent service.
// It's only intended for direct use with grpc.RegisterService, // It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy) // and not to be introspected or modified (even as a copy)
var Agent_ServiceDesc = grpc.ServiceDesc{ var Agent_ServiceDesc = grpc.ServiceDesc{
ServiceName: "Agent", ServiceName: "Agent",
HandlerType: (*AgentServer)(nil), HandlerType: (*AgentServer)(nil),
Methods: []grpc.MethodDesc{},
Methods: []grpc.MethodDesc{
{
MethodName: "Ping",
Handler: _Agent_Ping_Handler,
},
},
Streams: []grpc.StreamDesc{ Streams: []grpc.StreamDesc{
{ {
StreamName: "SendIPFSFile", StreamName: "SendIPFSFile",


+ 5
- 0
common/pkgs/grpc/agent/client.go View File

@@ -209,6 +209,11 @@ func (c *Client) FetchStream(planID ioswitch.PlanID, streamID ioswitch.StreamID)
}, nil }, nil
} }


func (c *Client) Ping() error {
_, err := c.cli.Ping(context.Background(), &PingReq{})
return err
}

func (c *Client) Close() { func (c *Client) Close() {
c.con.Close() c.con.Close()
} }

+ 53
- 0
common/pkgs/mq/coordinator/node.go View File

@@ -9,6 +9,10 @@ type NodeService interface {
GetUserNodes(msg *GetUserNodes) (*GetUserNodesResp, *mq.CodeMessage) GetUserNodes(msg *GetUserNodes) (*GetUserNodesResp, *mq.CodeMessage)


GetNodes(msg *GetNodes) (*GetNodesResp, *mq.CodeMessage) GetNodes(msg *GetNodes) (*GetNodesResp, *mq.CodeMessage)

GetNodeConnectivities(msg *GetNodeConnectivities) (*GetNodeConnectivitiesResp, *mq.CodeMessage)

UpdateNodeConnectivities(msg *UpdateNodeConnectivities) (*UpdateNodeConnectivitiesResp, *mq.CodeMessage)
} }


// 查询用户可用的节点 // 查询用户可用的节点
@@ -71,3 +75,52 @@ func (r *GetNodesResp) GetNode(id cdssdk.NodeID) *cdssdk.Node {
func (client *Client) GetNodes(msg *GetNodes) (*GetNodesResp, error) { func (client *Client) GetNodes(msg *GetNodes) (*GetNodesResp, error) {
return mq.Request(Service.GetNodes, client.rabbitCli, msg) return mq.Request(Service.GetNodes, client.rabbitCli, msg)
} }

// 获取节点连通性信息
var _ = Register(Service.GetNodeConnectivities)

type GetNodeConnectivities struct {
mq.MessageBodyBase
NodeIDs []cdssdk.NodeID `json:"nodeIDs"`
}
type GetNodeConnectivitiesResp struct {
mq.MessageBodyBase
Connectivities []cdssdk.NodeConnectivity `json:"nodes"`
}

func ReqGetNodeConnectivities(nodeIDs []cdssdk.NodeID) *GetNodeConnectivities {
return &GetNodeConnectivities{
NodeIDs: nodeIDs,
}
}
func RespGetNodeConnectivities(cons []cdssdk.NodeConnectivity) *GetNodeConnectivitiesResp {
return &GetNodeConnectivitiesResp{
Connectivities: cons,
}
}
func (client *Client) GetNodeConnectivities(msg *GetNodeConnectivities) (*GetNodeConnectivitiesResp, error) {
return mq.Request(Service.GetNodeConnectivities, client.rabbitCli, msg)
}

// 批量更新节点连通性信息
var _ = Register(Service.UpdateNodeConnectivities)

type UpdateNodeConnectivities struct {
mq.MessageBodyBase
Connectivities []cdssdk.NodeConnectivity `json:"connectivities"`
}
type UpdateNodeConnectivitiesResp struct {
mq.MessageBodyBase
}

func ReqUpdateNodeConnectivities(cons []cdssdk.NodeConnectivity) *UpdateNodeConnectivities {
return &UpdateNodeConnectivities{
Connectivities: cons,
}
}
func RespUpdateNodeConnectivities() *UpdateNodeConnectivitiesResp {
return &UpdateNodeConnectivitiesResp{}
}
func (client *Client) UpdateNodeConnectivities(msg *UpdateNodeConnectivities) (*UpdateNodeConnectivitiesResp, error) {
return mq.Request(Service.UpdateNodeConnectivities, client.rabbitCli, msg)
}

+ 13
- 9
common/pkgs/mq/coordinator/package.go View File

@@ -1,6 +1,8 @@
package coordinator package coordinator


import ( import (
"time"

"gitlink.org.cn/cloudream/common/pkgs/mq" "gitlink.org.cn/cloudream/common/pkgs/mq"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"


@@ -92,10 +94,11 @@ type UpdatePackageResp struct {
mq.MessageBodyBase mq.MessageBodyBase
} }
type AddObjectEntry struct { type AddObjectEntry struct {
Path string `json:"path"`
Size int64 `json:"size,string"`
FileHash string `json:"fileHash"`
NodeID cdssdk.NodeID `json:"nodeID"`
Path string `json:"path"`
Size int64 `json:"size,string"`
FileHash string `json:"fileHash"`
UploadTime time.Time `json:"uploadTime"` // 开始上传文件的时间
NodeID cdssdk.NodeID `json:"nodeID"`
} }


func NewUpdatePackage(packageID cdssdk.PackageID, adds []AddObjectEntry, deletes []cdssdk.ObjectID) *UpdatePackage { func NewUpdatePackage(packageID cdssdk.PackageID, adds []AddObjectEntry, deletes []cdssdk.ObjectID) *UpdatePackage {
@@ -108,12 +111,13 @@ func NewUpdatePackage(packageID cdssdk.PackageID, adds []AddObjectEntry, deletes
func NewUpdatePackageResp() *UpdatePackageResp { func NewUpdatePackageResp() *UpdatePackageResp {
return &UpdatePackageResp{} return &UpdatePackageResp{}
} }
func NewAddObjectEntry(path string, size int64, fileHash string, nodeIDs cdssdk.NodeID) AddObjectEntry {
func NewAddObjectEntry(path string, size int64, fileHash string, uploadTime time.Time, nodeID cdssdk.NodeID) AddObjectEntry {
return AddObjectEntry{ return AddObjectEntry{
Path: path,
Size: size,
FileHash: fileHash,
NodeID: nodeIDs,
Path: path,
Size: size,
FileHash: fileHash,
UploadTime: uploadTime,
NodeID: nodeID,
} }
} }
func (client *Client) UpdatePackage(msg *UpdatePackage) (*UpdatePackageResp, error) { func (client *Client) UpdatePackage(msg *UpdatePackage) (*UpdatePackageResp, error) {


+ 49
- 0
coordinator/internal/mq/node.go View File

@@ -1,6 +1,10 @@
package mq package mq


import ( import (
"database/sql"
"fmt"

"github.com/jmoiron/sqlx"
"gitlink.org.cn/cloudream/common/consts/errorcode" "gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger" "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq" "gitlink.org.cn/cloudream/common/pkgs/mq"
@@ -46,3 +50,48 @@ func (svc *Service) GetNodes(msg *coormq.GetNodes) (*coormq.GetNodesResp, *mq.Co


return mq.ReplyOK(coormq.NewGetNodesResp(nodes)) return mq.ReplyOK(coormq.NewGetNodesResp(nodes))
} }

func (svc *Service) GetNodeConnectivities(msg *coormq.GetNodeConnectivities) (*coormq.GetNodeConnectivitiesResp, *mq.CodeMessage) {
cons, err := svc.db.NodeConnectivity().BatchGetByFromNode(svc.db.SQLCtx(), msg.NodeIDs)
if err != nil {
logger.Warnf("batch get node connectivities by from node: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "batch get node connectivities by from node failed")
}

return mq.ReplyOK(coormq.RespGetNodeConnectivities(cons))
}

func (svc *Service) UpdateNodeConnectivities(msg *coormq.UpdateNodeConnectivities) (*coormq.UpdateNodeConnectivitiesResp, *mq.CodeMessage) {
err := svc.db.DoTx(sql.LevelSerializable, func(tx *sqlx.Tx) error {
// 只有发起节点和目的节点都存在,才能插入这条记录到数据库
allNodes, err := svc.db.Node().GetAllNodes(tx)
if err != nil {
return fmt.Errorf("getting all nodes: %w", err)
}

allNodeID := make(map[cdssdk.NodeID]bool)
for _, node := range allNodes {
allNodeID[node.NodeID] = true
}

var avaiCons []cdssdk.NodeConnectivity
for _, con := range msg.Connectivities {
if allNodeID[con.FromNodeID] && allNodeID[con.ToNodeID] {
avaiCons = append(avaiCons, con)
}
}

err = svc.db.NodeConnectivity().BatchUpdateOrCreate(tx, avaiCons)
if err != nil {
return fmt.Errorf("batch update or create node connectivities: %s", err)
}

return nil
})
if err != nil {
logger.Warn(err.Error())
return nil, mq.Failed(errorcode.OperationFailed, err.Error())
}

return mq.ReplyOK(coormq.RespUpdateNodeConnectivities())
}

+ 4
- 1
scanner/internal/event/agent_cache_gc.go View File

@@ -40,8 +40,11 @@ func (t *AgentCacheGC) TryMerge(other Event) bool {


func (t *AgentCacheGC) Execute(execCtx ExecuteContext) { func (t *AgentCacheGC) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCacheGC]("Event") log := logger.WithType[AgentCacheGC]("Event")
startTime := time.Now()
log.Debugf("begin with %v", logger.FormatStruct(t.AgentCacheGC)) log.Debugf("begin with %v", logger.FormatStruct(t.AgentCacheGC))
defer log.Debugf("end")
defer func() {
log.Debugf("end, time: %v", time.Since(startTime))
}()


// TODO unavailable的节点需不需要发送任务? // TODO unavailable的节点需不需要发送任务?




+ 4
- 2
scanner/internal/event/agent_check_cache.go View File

@@ -40,9 +40,11 @@ func (t *AgentCheckCache) TryMerge(other Event) bool {


func (t *AgentCheckCache) Execute(execCtx ExecuteContext) { func (t *AgentCheckCache) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentCheckCache]("Event") log := logger.WithType[AgentCheckCache]("Event")
startTime := time.Now()
log.Debugf("begin with %v", logger.FormatStruct(t.AgentCheckCache)) log.Debugf("begin with %v", logger.FormatStruct(t.AgentCheckCache))
defer log.Debugf("end")

defer func() {
log.Debugf("end, time: %v", time.Since(startTime))
}()
// TODO unavailable的节点需不需要发送任务? // TODO unavailable的节点需不需要发送任务?


agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID) agtCli, err := stgglb.AgentMQPool.Acquire(t.NodeID)


+ 4
- 1
scanner/internal/event/agent_storage_gc.go View File

@@ -37,8 +37,11 @@ func (t *AgentStorageGC) TryMerge(other Event) bool {


func (t *AgentStorageGC) Execute(execCtx ExecuteContext) { func (t *AgentStorageGC) Execute(execCtx ExecuteContext) {
log := logger.WithType[AgentStorageGC]("Event") log := logger.WithType[AgentStorageGC]("Event")
startTime := time.Now()
log.Debugf("begin with %v", logger.FormatStruct(t.AgentStorageGC)) log.Debugf("begin with %v", logger.FormatStruct(t.AgentStorageGC))
defer log.Debugf("end")
defer func() {
log.Debugf("end, time: %v", time.Since(startTime))
}()


// TODO unavailable的节点需不需要发送任务? // TODO unavailable的节点需不需要发送任务?




+ 5
- 2
scanner/internal/event/check_package_redundancy.go View File

@@ -51,8 +51,11 @@ func (t *CheckPackageRedundancy) TryMerge(other Event) bool {


func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) { func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) {
log := logger.WithType[CheckPackageRedundancy]("Event") log := logger.WithType[CheckPackageRedundancy]("Event")
startTime := time.Now()
log.Debugf("begin with %v", logger.FormatStruct(t.CheckPackageRedundancy)) log.Debugf("begin with %v", logger.FormatStruct(t.CheckPackageRedundancy))
defer log.Debugf("end")
defer func() {
log.Debugf("end, time: %v", time.Since(startTime))
}()


coorCli, err := stgglb.CoordinatorMQPool.Acquire() coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil { if err != nil {
@@ -74,7 +77,7 @@ func (t *CheckPackageRedundancy) Execute(execCtx ExecuteContext) {
} }


// TODO UserID // TODO UserID
getNodes, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(0))
getNodes, err := coorCli.GetUserNodes(coormq.NewGetUserNodes(1))
if err != nil { if err != nil {
log.Warnf("getting all nodes: %s", err.Error()) log.Warnf("getting all nodes: %s", err.Error())
return return


+ 5
- 1
scanner/internal/event/clean_pinned.go View File

@@ -5,6 +5,7 @@ import (
"math" "math"
"math/rand" "math/rand"
"sync" "sync"
"time"


"github.com/samber/lo" "github.com/samber/lo"
"gitlink.org.cn/cloudream/common/pkgs/bitmap" "gitlink.org.cn/cloudream/common/pkgs/bitmap"
@@ -44,8 +45,11 @@ func (t *CleanPinned) TryMerge(other Event) bool {


func (t *CleanPinned) Execute(execCtx ExecuteContext) { func (t *CleanPinned) Execute(execCtx ExecuteContext) {
log := logger.WithType[CleanPinned]("Event") log := logger.WithType[CleanPinned]("Event")
startTime := time.Now()
log.Debugf("begin with %v", logger.FormatStruct(t.CleanPinned)) log.Debugf("begin with %v", logger.FormatStruct(t.CleanPinned))
defer log.Debugf("end")
defer func() {
log.Debugf("end, time: %v", time.Since(startTime))
}()


coorCli, err := stgglb.CoordinatorMQPool.Acquire() coorCli, err := stgglb.CoordinatorMQPool.Acquire()
if err != nil { if err != nil {


Loading…
Cancel
Save