Browse Source

重构ioswitch模块

gitlink
Sydonian 1 year ago
parent
commit
d040e99e3b
39 changed files with 1726 additions and 1802 deletions
  1. +109
    -19
      agent/internal/grpc/io.go
  2. +3
    -126
      agent/internal/grpc/service.go
  3. +0
    -65
      agent/internal/mq/io.go
  4. +3
    -3
      agent/internal/mq/service.go
  5. +0
    -45
      agent/internal/task/execute_io_plan.go
  6. +3
    -3
      agent/internal/task/task.go
  7. +1
    -1
      agent/main.go
  8. +12
    -0
      common/globals/utils.go
  9. +14
    -17
      common/pkgs/cmd/upload_objects.go
  10. +5
    -12
      common/pkgs/downloader/io.go
  11. +401
    -153
      common/pkgs/grpc/agent/agent.pb.go
  12. +31
    -16
      common/pkgs/grpc/agent/agent.proto
  13. +122
    -146
      common/pkgs/grpc/agent/agent_grpc.pb.go
  14. +67
    -91
      common/pkgs/grpc/agent/client.go
  15. +40
    -16
      common/pkgs/ioswitch/ioswitch.go
  16. +83
    -0
      common/pkgs/ioswitch/manager.go
  17. +13
    -17
      common/pkgs/ioswitch/ops/chunked_join.go
  18. +18
    -22
      common/pkgs/ioswitch/ops/chunked_split.go
  19. +41
    -20
      common/pkgs/ioswitch/ops/clone.go
  20. +37
    -34
      common/pkgs/ioswitch/ops/ec.go
  21. +13
    -13
      common/pkgs/ioswitch/ops/file.go
  22. +67
    -32
      common/pkgs/ioswitch/ops/grpc.go
  23. +16
    -23
      common/pkgs/ioswitch/ops/ipfs.go
  24. +13
    -17
      common/pkgs/ioswitch/ops/join.go
  25. +11
    -15
      common/pkgs/ioswitch/ops/length.go
  26. +30
    -0
      common/pkgs/ioswitch/ops/store.go
  27. +281
    -0
      common/pkgs/ioswitch/plans/agent.go
  28. +0
    -276
      common/pkgs/ioswitch/plans/agent_plan.go
  29. +92
    -140
      common/pkgs/ioswitch/plans/executor.go
  30. +48
    -63
      common/pkgs/ioswitch/plans/plan_builder.go
  31. +10
    -0
      common/pkgs/ioswitch/plans/utils.go
  32. +89
    -232
      common/pkgs/ioswitch/switch.go
  33. +21
    -0
      common/pkgs/ioswitch/utils.go
  34. +0
    -120
      common/pkgs/mq/agent/io.go
  35. +0
    -2
      common/pkgs/mq/agent/server.go
  36. +1
    -0
      go.mod
  37. +2
    -0
      go.sum
  38. +14
    -42
      scanner/internal/event/check_package_redundancy.go
  39. +15
    -21
      scanner/internal/event/clean_pinned.go

+ 109
- 19
agent/internal/grpc/io.go View File

@@ -1,15 +1,38 @@
package grpc

import (
"context"
"fmt"
"io"
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/common/utils/serder"
agentserver "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

func (s *Service) ExecuteIOPlan(ctx context.Context, req *agtrpc.ExecuteIOPlanReq) (*agtrpc.ExecuteIOPlanResp, error) {
plan, err := serder.JSONToObjectEx[ioswitch.Plan]([]byte(req.Plan))
if err != nil {
return nil, fmt.Errorf("deserializing plan: %w", err)
}

logger.WithField("PlanID", plan.ID).Infof("begin execute io plan")

sw := ioswitch.NewSwitch(plan)
s.swMgr.Add(sw)

err = sw.Run(ctx)
if err != nil {
return nil, fmt.Errorf("running io plan: %w", err)
}

return &agtrpc.ExecuteIOPlanResp{}, nil
}

func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error {
msg, err := server.Recv()
if err != nil {
@@ -21,12 +44,24 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error {

logger.
WithField("PlanID", msg.PlanID).
WithField("StreamID", msg.StreamID).
Debugf("receive stream from grpc")
WithField("VarID", msg.VarID).
Debugf("receive stream")

// 同一批Plan中每个节点的Plan的启动时间有先后,但最多不应该超过30秒
ctx, cancel := context.WithTimeout(server.Context(), time.Second*30)
defer cancel()

sw := s.swMgr.FindByIDContexted(ctx, ioswitch.PlanID(msg.PlanID))
if sw == nil {
return fmt.Errorf("plan not found")
}

pr, pw := io.Pipe()

s.sw.StreamReady(ioswitch.PlanID(msg.PlanID), ioswitch.NewStream(ioswitch.StreamID(msg.StreamID), pr))
sw.PutVars(&ioswitch.StreamVar{
ID: ioswitch.VarID(msg.VarID),
Stream: pr,
})

// 然后读取文件数据
var recvSize int64
@@ -36,7 +71,7 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error {
// 读取客户端数据失败
// 即使err是io.EOF,只要没有收到客户端包含EOF数据包就被断开了连接,就认为接收失败
if err != nil {
// 关闭文件写入,不需要返回的hash和error
// 关闭文件写入
pw.CloseWithError(io.ErrClosedPipe)
logger.WithField("ReceiveSize", recvSize).
Warnf("recv message failed, err: %s", err.Error())
@@ -45,7 +80,7 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error {

err = io2.WriteAll(pw, msg.Data)
if err != nil {
// 关闭文件写入,不需要返回的hash和error
// 关闭文件写入
pw.CloseWithError(io.ErrClosedPipe)
logger.Warnf("write data to file failed, err: %s", err.Error())
return fmt.Errorf("write data to file failed, err: %w", err)
@@ -73,25 +108,33 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error {
}
}

func (s *Service) FetchStream(req *agentserver.FetchStreamReq, server agentserver.Agent_FetchStreamServer) error {
func (s *Service) GetStream(req *agentserver.GetStreamReq, server agentserver.Agent_GetStreamServer) error {
logger.
WithField("PlanID", req.PlanID).
WithField("StreamID", req.StreamID).
Debugf("send stream by grpc")
WithField("VarID", req.VarID).
Debugf("send stream")

// 同上
ctx, cancel := context.WithTimeout(server.Context(), time.Second*30)
defer cancel()

sw := s.swMgr.FindByIDContexted(ctx, ioswitch.PlanID(req.PlanID))
if sw == nil {
return fmt.Errorf("plan not found")
}

strs, err := s.sw.WaitStreams(ioswitch.PlanID(req.PlanID), ioswitch.StreamID(req.StreamID))
strVar := &ioswitch.StreamVar{
ID: ioswitch.VarID(req.VarID),
}
err := sw.BindVars(server.Context(), strVar)
if err != nil {
logger.
WithField("PlanID", req.PlanID).
WithField("StreamID", req.StreamID).
Warnf("watting stream: %s", err.Error())
return fmt.Errorf("watting stream: %w", err)
return fmt.Errorf("binding vars: %w", err)
}

reader := strs[0].Stream
reader := strVar.Stream
defer reader.Close()

buf := make([]byte, 4096)
buf := make([]byte, 1024*64)
readAllCnt := 0
for {
readCnt, err := reader.Read(buf)
@@ -105,7 +148,7 @@ func (s *Service) FetchStream(req *agentserver.FetchStreamReq, server agentserve
if err != nil {
logger.
WithField("PlanID", req.PlanID).
WithField("StreamID", req.StreamID).
WithField("VarID", req.VarID).
Warnf("send stream data failed, err: %s", err.Error())
return fmt.Errorf("send stream data failed, err: %w", err)
}
@@ -115,7 +158,7 @@ func (s *Service) FetchStream(req *agentserver.FetchStreamReq, server agentserve
if err == io.EOF {
logger.
WithField("PlanID", req.PlanID).
WithField("StreamID", req.StreamID).
WithField("VarID", req.VarID).
Debugf("send data size %d", readAllCnt)
// 发送EOF消息
server.Send(&agentserver.StreamDataPacket{
@@ -128,9 +171,56 @@ func (s *Service) FetchStream(req *agentserver.FetchStreamReq, server agentserve
if err != nil && err != io.ErrUnexpectedEOF {
logger.
WithField("PlanID", req.PlanID).
WithField("StreamID", req.StreamID).
WithField("VarID", req.VarID).
Warnf("reading stream data: %s", err.Error())
return fmt.Errorf("reading stream data: %w", err)
}
}
}

func (s *Service) SendVar(ctx context.Context, req *agtrpc.SendVarReq) (*agtrpc.SendVarResp, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()

sw := s.swMgr.FindByIDContexted(ctx, ioswitch.PlanID(req.PlanID))
if sw == nil {
return nil, fmt.Errorf("plan not found")
}

v, err := serder.JSONToObjectEx[ioswitch.Var]([]byte(req.Var))
if err != nil {
return nil, fmt.Errorf("deserializing var: %w", err)
}

sw.PutVars(v)
return &agtrpc.SendVarResp{}, nil
}

func (s *Service) GetVar(ctx context.Context, req *agtrpc.GetVarReq) (*agtrpc.GetVarResp, error) {
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
defer cancel()

sw := s.swMgr.FindByIDContexted(ctx, ioswitch.PlanID(req.PlanID))
if sw == nil {
return nil, fmt.Errorf("plan not found")
}

v, err := serder.JSONToObjectEx[ioswitch.Var]([]byte(req.Var))
if err != nil {
return nil, fmt.Errorf("deserializing var: %w", err)
}

err = sw.BindVars(ctx, v)
if err != nil {
return nil, fmt.Errorf("binding vars: %w", err)
}

vd, err := serder.ObjectToJSON(v)
if err != nil {
return nil, fmt.Errorf("serializing var: %w", err)
}

return &agtrpc.GetVarResp{
Var: string(vd),
}, nil
}

+ 3
- 126
agent/internal/grpc/service.go View File

@@ -1,140 +1,17 @@
package grpc

import (
"fmt"
"io"

log "gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/utils/io2"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
agentserver "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

type Service struct {
agentserver.AgentServer
sw *ioswitch.Switch
swMgr *ioswitch.Manager
}

func NewService(sw *ioswitch.Switch) *Service {
func NewService(swMgr *ioswitch.Manager) *Service {
return &Service{
sw: sw,
}
}

func (s *Service) SendIPFSFile(server agentserver.Agent_SendIPFSFileServer) error {
log.Debugf("client upload file")

ipfsCli, err := stgglb.IPFSPool.Acquire()
if err != nil {
log.Warnf("new ipfs client: %s", err.Error())
return fmt.Errorf("new ipfs client: %w", err)
}
defer ipfsCli.Close()

writer, err := ipfsCli.CreateFileStream()
if err != nil {
log.Warnf("create file failed, err: %s", err.Error())
return fmt.Errorf("create file failed, err: %w", err)
}

// 然后读取文件数据
var recvSize int64
for {
msg, err := server.Recv()

// 读取客户端数据失败
// 即使err是io.EOF,只要没有收到客户端包含EOF数据包就被断开了连接,就认为接收失败
if err != nil {
// 关闭文件写入,不需要返回的hash和error
writer.Abort(io.ErrClosedPipe)
log.WithField("ReceiveSize", recvSize).
Warnf("recv message failed, err: %s", err.Error())
return fmt.Errorf("recv message failed, err: %w", err)
}

err = io2.WriteAll(writer, msg.Data)
if err != nil {
// 关闭文件写入,不需要返回的hash和error
writer.Abort(io.ErrClosedPipe)
log.Warnf("write data to file failed, err: %s", err.Error())
return fmt.Errorf("write data to file failed, err: %w", err)
}

recvSize += int64(len(msg.Data))

if msg.Type == agentserver.StreamDataPacketType_EOF {
// 客户端明确说明文件传输已经结束,那么结束写入,获得文件Hash
hash, err := writer.Finish()
if err != nil {
log.Warnf("finish writing failed, err: %s", err.Error())
return fmt.Errorf("finish writing failed, err: %w", err)
}

// 并将结果返回到客户端
err = server.SendAndClose(&agentserver.SendIPFSFileResp{
FileHash: hash,
})
if err != nil {
log.Warnf("send response failed, err: %s", err.Error())
return fmt.Errorf("send response failed, err: %w", err)
}

log.Debugf("%d bytes received ", recvSize)
return nil
}
}
}

func (s *Service) GetIPFSFile(req *agentserver.GetIPFSFileReq, server agentserver.Agent_GetIPFSFileServer) error {
log.WithField("FileHash", req.FileHash).Debugf("client download file")

ipfsCli, err := stgglb.IPFSPool.Acquire()
if err != nil {
log.Warnf("new ipfs client: %s", err.Error())
return fmt.Errorf("new ipfs client: %w", err)
}
defer ipfsCli.Close()

reader, err := ipfsCli.OpenRead(req.FileHash)
if err != nil {
log.Warnf("open file %s to read failed, err: %s", req.FileHash, err.Error())
return fmt.Errorf("open file to read failed, err: %w", err)
}
defer reader.Close()

buf := make([]byte, 1024)
readAllCnt := 0
for {
readCnt, err := reader.Read(buf)

if readCnt > 0 {
readAllCnt += readCnt
err = server.Send(&agentserver.FileDataPacket{
Type: agentserver.StreamDataPacketType_Data,
Data: buf[:readCnt],
})
if err != nil {
log.WithField("FileHash", req.FileHash).
Warnf("send file data failed, err: %s", err.Error())
return fmt.Errorf("send file data failed, err: %w", err)
}
}

// 文件读取完毕
if err == io.EOF {
log.WithField("FileHash", req.FileHash).Debugf("send data size %d", readAllCnt)
// 发送EOF消息
server.Send(&agentserver.FileDataPacket{
Type: agentserver.StreamDataPacketType_EOF,
})
return nil
}

// io.ErrUnexpectedEOF没有读满整个buf就遇到了EOF,此时正常发送剩余数据即可。除了这两个错误之外,其他错误都中断操作
if err != nil && err != io.ErrUnexpectedEOF {
log.Warnf("read file %s data failed, err: %s", req.FileHash, err.Error())
return fmt.Errorf("read file data failed, err: %w", err)
}
swMgr: swMgr,
}
}

+ 0
- 65
agent/internal/mq/io.go View File

@@ -1,65 +0,0 @@
package mq

import (
"time"

"gitlink.org.cn/cloudream/common/consts/errorcode"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/mq"
mytask "gitlink.org.cn/cloudream/storage/agent/internal/task"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
)

func (svc *Service) SetupIOPlan(msg *agtmq.SetupIOPlan) (*agtmq.SetupIOPlanResp, *mq.CodeMessage) {
err := svc.sw.SetupPlan(msg.Plan)
if err != nil {
logger.WithField("PlanID", msg.Plan.ID).Warnf("adding plan: %s", err.Error())
return nil, mq.Failed(errorcode.OperationFailed, "adding plan failed")
}

return mq.ReplyOK(agtmq.NewSetupIOPlanResp())
}

func (svc *Service) StartIOPlan(msg *agtmq.StartIOPlan) (*agtmq.StartIOPlanResp, *mq.CodeMessage) {
tsk := svc.taskManager.StartNew(mytask.NewExecuteIOPlan(msg.PlanID))
return mq.ReplyOK(agtmq.NewStartIOPlanResp(tsk.ID()))
}

func (svc *Service) WaitIOPlan(msg *agtmq.WaitIOPlan) (*agtmq.WaitIOPlanResp, *mq.CodeMessage) {
tsk := svc.taskManager.FindByID(msg.TaskID)
if tsk == nil {
return nil, mq.Failed(errorcode.TaskNotFound, "task not found")
}

if msg.WaitTimeoutMs == 0 {
tsk.Wait()

errMsg := ""
if tsk.Error() != nil {
errMsg = tsk.Error().Error()
}

planTsk := tsk.Body().(*mytask.ExecuteIOPlan)
return mq.ReplyOK(agtmq.NewWaitIOPlanResp(true, errMsg, planTsk.Result))

} else {
if tsk.WaitTimeout(time.Duration(msg.WaitTimeoutMs) * time.Millisecond) {

errMsg := ""
if tsk.Error() != nil {
errMsg = tsk.Error().Error()
}

planTsk := tsk.Body().(*mytask.ExecuteIOPlan)
return mq.ReplyOK(agtmq.NewWaitIOPlanResp(true, errMsg, planTsk.Result))
}

return mq.ReplyOK(agtmq.NewWaitIOPlanResp(false, "", ioswitch.PlanResult{}))
}
}

func (svc *Service) CancelIOPlan(msg *agtmq.CancelIOPlan) (*agtmq.CancelIOPlanResp, *mq.CodeMessage) {
svc.sw.CancelPlan(msg.PlanID)
return mq.ReplyOK(agtmq.NewCancelIOPlanResp())
}

+ 3
- 3
agent/internal/mq/service.go View File

@@ -7,12 +7,12 @@ import (

type Service struct {
taskManager *task.Manager
sw *ioswitch.Switch
swMgr *ioswitch.Manager
}

func NewService(taskMgr *task.Manager, sw *ioswitch.Switch) *Service {
func NewService(taskMgr *task.Manager, swMgr *ioswitch.Manager) *Service {
return &Service{
taskManager: taskMgr,
sw: sw,
swMgr: swMgr,
}
}

+ 0
- 45
agent/internal/task/execute_io_plan.go View File

@@ -1,45 +0,0 @@
package task

import (
"fmt"
"time"

"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/pkgs/task"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

// TODO 临时使用Task来等待Plan执行进度
type ExecuteIOPlan struct {
PlanID ioswitch.PlanID
Result ioswitch.PlanResult
}

func NewExecuteIOPlan(planID ioswitch.PlanID) *ExecuteIOPlan {
return &ExecuteIOPlan{
PlanID: planID,
}
}

func (t *ExecuteIOPlan) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) {
log := logger.WithType[ExecuteIOPlan]("Task")
log.Debugf("begin with %v", logger.FormatStruct(t))
defer log.Debugf("end")

ret, err := ctx.sw.ExecutePlan(t.PlanID)
if err != nil {
err := fmt.Errorf("executing io plan: %w", err)
log.WithField("PlanID", t.PlanID).Warn(err.Error())

complete(err, CompleteOption{
RemovingDelay: time.Minute,
})
return
}

t.Result = ret

complete(nil, CompleteOption{
RemovingDelay: time.Minute,
})
}

+ 3
- 3
agent/internal/task/task.go View File

@@ -10,7 +10,7 @@ import (

type TaskContext struct {
distlock *distlock.Service
sw *ioswitch.Switch
swMgr *ioswitch.Manager
connectivity *connectivity.Collector
downloader *downloader.Downloader
}
@@ -27,10 +27,10 @@ type Task = task.Task[TaskContext]

type CompleteOption = task.CompleteOption

func NewManager(distlock *distlock.Service, sw *ioswitch.Switch, connectivity *connectivity.Collector, downloader *downloader.Downloader) Manager {
func NewManager(distlock *distlock.Service, swMgr *ioswitch.Manager, connectivity *connectivity.Collector, downloader *downloader.Downloader) Manager {
return task.NewManager(TaskContext{
distlock: distlock,
sw: sw,
swMgr: swMgr,
connectivity: connectivity,
downloader: downloader,
})


+ 1
- 1
agent/main.go View File

@@ -92,7 +92,7 @@ func main() {
log.Fatalf("new ipfs failed, err: %s", err.Error())
}

sw := ioswitch.NewSwitch()
sw := ioswitch.NewManager()

dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol)



+ 12
- 0
common/globals/utils.go View File

@@ -0,0 +1,12 @@
package stgglb

import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"

// 根据当前节点与目标地址的距离关系,选择合适的地址
func SelectGRPCAddress(node *cdssdk.Node) (string, int) {
if Local != nil && Local.LocationID == node.LocationID {
return node.LocalIP, node.LocalGRPCPort
}

return node.ExternalIP, node.ExternalGRPCPort
}

+ 14
- 17
common/pkgs/cmd/upload_objects.go View File

@@ -1,6 +1,7 @@
package cmd

import (
"context"
"fmt"
"io"
"math"
@@ -17,6 +18,7 @@ import (
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/connectivity"
"gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/plans"
"gitlink.org.cn/cloudream/storage/common/pkgs/iterator"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator"
@@ -227,32 +229,27 @@ func uploadFile(file io.Reader, uploadNode UploadNodeInfo) (string, error) {
}

// 否则发送到agent上传
// 如果客户端与节点在同一个地域,则使用内网地址连接节点
nodeIP := uploadNode.Node.ExternalIP
grpcPort := uploadNode.Node.ExternalGRPCPort
if uploadNode.IsSameLocation {
nodeIP = uploadNode.Node.LocalIP
grpcPort = uploadNode.Node.LocalGRPCPort

logger.Debugf("client and node %d are at the same location, use local ip", uploadNode.Node.NodeID)
}

fileHash, err := uploadToNode(file, nodeIP, grpcPort)
fileHash, err := uploadToNode(file, uploadNode.Node)
if err != nil {
return "", fmt.Errorf("upload to node %s failed, err: %w", nodeIP, err)
return "", fmt.Errorf("uploading to node %v: %w", uploadNode.Node.NodeID, err)
}

return fileHash, nil
}

func uploadToNode(file io.Reader, nodeIP string, grpcPort int) (string, error) {
rpcCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort)
func uploadToNode(file io.Reader, node cdssdk.Node) (string, error) {
plan := plans.NewPlanBuilder()
str, v := plan.AtExecutor().WillWrite()
v.To(node).IPFSWrite().ToExecutor().Store("fileHash")

exec := plan.Execute()
exec.BeginWrite(io.NopCloser(file), str)
ret, err := exec.Wait(context.TODO())
if err != nil {
return "", fmt.Errorf("new agent rpc client: %w", err)
return "", err
}
defer rpcCli.Close()

return rpcCli.SendIPFSFile(file)
return ret["fileHash"].(string), nil
}

func uploadToLocalIPFS(file io.Reader, nodeID cdssdk.NodeID, shouldPin bool) (string, error) {


+ 5
- 12
common/pkgs/downloader/io.go View File

@@ -1,6 +1,7 @@
package downloader

import (
"context"
"fmt"
"io"

@@ -109,22 +110,14 @@ func (r *IPFSReader) fromNode() (io.ReadCloser, error) {
fileStr := planBld.AtAgent(r.node).IPFSRead(r.fileHash, ipfs.ReadOption{
Offset: r.offset,
Length: -1,
}).ToExecutor()
}).ToExecutor().WillRead()

plan, err := planBld.Build()
if err != nil {
return nil, fmt.Errorf("building plan: %w", err)
}

waiter, err := plans.Execute(*plan)
if err != nil {
return nil, fmt.Errorf("execute plan: %w", err)
}
exec := planBld.Execute()
go func() {
waiter.Wait()
exec.Wait(context.Background())
}()

return waiter.ReadStream(fileStr)
return exec.BeginRead(fileStr)
}

func (r *IPFSReader) fromLocalIPFS() (io.ReadCloser, error) {


+ 401
- 153
common/pkgs/grpc/agent/agent.pb.go View File

@@ -71,18 +71,16 @@ func (StreamDataPacketType) EnumDescriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{0}
}

// 文件数据。注意:只在Type为Data的时候,Data字段才能有数据
type FileDataPacket struct {
type ExecuteIOPlanReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"`
Plan string `protobuf:"bytes,1,opt,name=Plan,proto3" json:"Plan,omitempty"`
}

func (x *FileDataPacket) Reset() {
*x = FileDataPacket{}
func (x *ExecuteIOPlanReq) Reset() {
*x = ExecuteIOPlanReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -90,13 +88,13 @@ func (x *FileDataPacket) Reset() {
}
}

func (x *FileDataPacket) String() string {
func (x *ExecuteIOPlanReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*FileDataPacket) ProtoMessage() {}
func (*ExecuteIOPlanReq) ProtoMessage() {}

func (x *FileDataPacket) ProtoReflect() protoreflect.Message {
func (x *ExecuteIOPlanReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -108,35 +106,26 @@ func (x *FileDataPacket) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}

// Deprecated: Use FileDataPacket.ProtoReflect.Descriptor instead.
func (*FileDataPacket) Descriptor() ([]byte, []int) {
// Deprecated: Use ExecuteIOPlanReq.ProtoReflect.Descriptor instead.
func (*ExecuteIOPlanReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{0}
}

func (x *FileDataPacket) GetType() StreamDataPacketType {
func (x *ExecuteIOPlanReq) GetPlan() string {
if x != nil {
return x.Type
return x.Plan
}
return StreamDataPacketType_EOF
}

func (x *FileDataPacket) GetData() []byte {
if x != nil {
return x.Data
}
return nil
return ""
}

type SendIPFSFileResp struct {
type ExecuteIOPlanResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

FileHash string `protobuf:"bytes,1,opt,name=FileHash,proto3" json:"FileHash,omitempty"`
}

func (x *SendIPFSFileResp) Reset() {
*x = SendIPFSFileResp{}
func (x *ExecuteIOPlanResp) Reset() {
*x = ExecuteIOPlanResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -144,13 +133,13 @@ func (x *SendIPFSFileResp) Reset() {
}
}

func (x *SendIPFSFileResp) String() string {
func (x *ExecuteIOPlanResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*SendIPFSFileResp) ProtoMessage() {}
func (*ExecuteIOPlanResp) ProtoMessage() {}

func (x *SendIPFSFileResp) ProtoReflect() protoreflect.Message {
func (x *ExecuteIOPlanResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -162,28 +151,23 @@ func (x *SendIPFSFileResp) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}

// Deprecated: Use SendIPFSFileResp.ProtoReflect.Descriptor instead.
func (*SendIPFSFileResp) Descriptor() ([]byte, []int) {
// Deprecated: Use ExecuteIOPlanResp.ProtoReflect.Descriptor instead.
func (*ExecuteIOPlanResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{1}
}

func (x *SendIPFSFileResp) GetFileHash() string {
if x != nil {
return x.FileHash
}
return ""
}

type GetIPFSFileReq struct {
// 文件数据。注意:只在Type为Data的时候,Data字段才能有数据
type FileDataPacket struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

FileHash string `protobuf:"bytes,1,opt,name=FileHash,proto3" json:"FileHash,omitempty"`
Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"`
}

func (x *GetIPFSFileReq) Reset() {
*x = GetIPFSFileReq{}
func (x *FileDataPacket) Reset() {
*x = FileDataPacket{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -191,13 +175,13 @@ func (x *GetIPFSFileReq) Reset() {
}
}

func (x *GetIPFSFileReq) String() string {
func (x *FileDataPacket) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*GetIPFSFileReq) ProtoMessage() {}
func (*FileDataPacket) ProtoMessage() {}

func (x *GetIPFSFileReq) ProtoReflect() protoreflect.Message {
func (x *FileDataPacket) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -209,16 +193,23 @@ func (x *GetIPFSFileReq) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}

// Deprecated: Use GetIPFSFileReq.ProtoReflect.Descriptor instead.
func (*GetIPFSFileReq) Descriptor() ([]byte, []int) {
// Deprecated: Use FileDataPacket.ProtoReflect.Descriptor instead.
func (*FileDataPacket) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{2}
}

func (x *GetIPFSFileReq) GetFileHash() string {
func (x *FileDataPacket) GetType() StreamDataPacketType {
if x != nil {
return x.FileHash
return x.Type
}
return ""
return StreamDataPacketType_EOF
}

func (x *FileDataPacket) GetData() []byte {
if x != nil {
return x.Data
}
return nil
}

// 注:EOF时data也可能有数据
@@ -227,10 +218,10 @@ type StreamDataPacket struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"`
PlanID string `protobuf:"bytes,2,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
StreamID string `protobuf:"bytes,3,opt,name=StreamID,proto3" json:"StreamID,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"`
Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"`
PlanID string `protobuf:"bytes,2,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
VarID int32 `protobuf:"varint,3,opt,name=VarID,proto3" json:"VarID,omitempty"`
Data []byte `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"`
}

func (x *StreamDataPacket) Reset() {
@@ -279,11 +270,11 @@ func (x *StreamDataPacket) GetPlanID() string {
return ""
}

func (x *StreamDataPacket) GetStreamID() string {
func (x *StreamDataPacket) GetVarID() int32 {
if x != nil {
return x.StreamID
return x.VarID
}
return ""
return 0
}

func (x *StreamDataPacket) GetData() []byte {
@@ -331,17 +322,17 @@ func (*SendStreamResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{4}
}

type FetchStreamReq struct {
type GetStreamReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
StreamID string `protobuf:"bytes,2,opt,name=StreamID,proto3" json:"StreamID,omitempty"`
PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
VarID int32 `protobuf:"varint,2,opt,name=VarID,proto3" json:"VarID,omitempty"`
}

func (x *FetchStreamReq) Reset() {
*x = FetchStreamReq{}
func (x *GetStreamReq) Reset() {
*x = GetStreamReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -349,13 +340,13 @@ func (x *FetchStreamReq) Reset() {
}
}

func (x *FetchStreamReq) String() string {
func (x *GetStreamReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*FetchStreamReq) ProtoMessage() {}
func (*GetStreamReq) ProtoMessage() {}

func (x *FetchStreamReq) ProtoReflect() protoreflect.Message {
func (x *GetStreamReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -367,21 +358,216 @@ func (x *FetchStreamReq) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}

// Deprecated: Use FetchStreamReq.ProtoReflect.Descriptor instead.
func (*FetchStreamReq) Descriptor() ([]byte, []int) {
// Deprecated: Use GetStreamReq.ProtoReflect.Descriptor instead.
func (*GetStreamReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{5}
}

func (x *FetchStreamReq) GetPlanID() string {
func (x *GetStreamReq) GetPlanID() string {
if x != nil {
return x.PlanID
}
return ""
}

func (x *GetStreamReq) GetVarID() int32 {
if x != nil {
return x.VarID
}
return 0
}

type SendVarReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
Var string `protobuf:"bytes,2,opt,name=Var,proto3" json:"Var,omitempty"`
}

func (x *SendVarReq) Reset() {
*x = SendVarReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *SendVarReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*SendVarReq) ProtoMessage() {}

func (x *SendVarReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use SendVarReq.ProtoReflect.Descriptor instead.
func (*SendVarReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{6}
}

func (x *SendVarReq) GetPlanID() string {
if x != nil {
return x.PlanID
}
return ""
}

func (x *SendVarReq) GetVar() string {
if x != nil {
return x.Var
}
return ""
}

type SendVarResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}

func (x *SendVarResp) Reset() {
*x = SendVarResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *SendVarResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*SendVarResp) ProtoMessage() {}

func (x *SendVarResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use SendVarResp.ProtoReflect.Descriptor instead.
func (*SendVarResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{7}
}

type GetVarReq struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"`
Var string `protobuf:"bytes,2,opt,name=Var,proto3" json:"Var,omitempty"`
}

func (x *GetVarReq) Reset() {
*x = GetVarReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *GetVarReq) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*GetVarReq) ProtoMessage() {}

func (x *GetVarReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use GetVarReq.ProtoReflect.Descriptor instead.
func (*GetVarReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{8}
}

func (x *GetVarReq) GetPlanID() string {
if x != nil {
return x.PlanID
}
return ""
}

func (x *FetchStreamReq) GetStreamID() string {
func (x *GetVarReq) GetVar() string {
if x != nil {
return x.Var
}
return ""
}

type GetVarResp struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields

Var string `protobuf:"bytes,1,opt,name=Var,proto3" json:"Var,omitempty"`
}

func (x *GetVarResp) Reset() {
*x = GetVarResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}

func (x *GetVarResp) String() string {
return protoimpl.X.MessageStringOf(x)
}

func (*GetVarResp) ProtoMessage() {}

func (x *GetVarResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}

// Deprecated: Use GetVarResp.ProtoReflect.Descriptor instead.
func (*GetVarResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{9}
}

func (x *GetVarResp) GetVar() string {
if x != nil {
return x.StreamID
return x.Var
}
return ""
}
@@ -395,7 +581,7 @@ type PingReq struct {
func (x *PingReq) Reset() {
*x = PingReq{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6]
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -408,7 +594,7 @@ func (x *PingReq) String() string {
func (*PingReq) ProtoMessage() {}

func (x *PingReq) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6]
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -421,7 +607,7 @@ func (x *PingReq) ProtoReflect() protoreflect.Message {

// Deprecated: Use PingReq.ProtoReflect.Descriptor instead.
func (*PingReq) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{6}
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{10}
}

type PingResp struct {
@@ -433,7 +619,7 @@ type PingResp struct {
func (x *PingResp) Reset() {
*x = PingResp{}
if protoimpl.UnsafeEnabled {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7]
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -446,7 +632,7 @@ func (x *PingResp) String() string {
func (*PingResp) ProtoMessage() {}

func (x *PingResp) ProtoReflect() protoreflect.Message {
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7]
mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -459,62 +645,70 @@ func (x *PingResp) ProtoReflect() protoreflect.Message {

// Deprecated: Use PingResp.ProtoReflect.Descriptor instead.
func (*PingResp) Descriptor() ([]byte, []int) {
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{7}
return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{11}
}

var File_pkgs_grpc_agent_agent_proto protoreflect.FileDescriptor

var file_pkgs_grpc_agent_agent_proto_rawDesc = []byte{
0x0a, 0x1b, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x61, 0x67, 0x65, 0x6e,
0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4f, 0x0a,
0x0e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12,
0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a,
0x10, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65,
0x71, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6c, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x50, 0x6c, 0x61, 0x6e, 0x22, 0x13, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x4f, 0x0a, 0x0e, 0x46, 0x69,
0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x04,
0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x53, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70,
0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x7f, 0x0a, 0x10, 0x53,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12,
0x29, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e,
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74,
0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61,
0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x2e,
0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65,
0x73, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x22, 0x2c,
0x0a, 0x0e, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71,
0x12, 0x1a, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x22, 0x85, 0x01, 0x0a,
0x10, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65,
0x74, 0x12, 0x29, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
0x15, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b,
0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c,
0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44,
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44,
0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04,
0x44, 0x61, 0x74, 0x61, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x44, 0x0a, 0x0e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e,
0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44,
0x12, 0x1a, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x22, 0x09, 0x0a, 0x07,
0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x22, 0x0a, 0x0a, 0x08, 0x50, 0x69, 0x6e, 0x67, 0x52,
0x65, 0x73, 0x70, 0x2a, 0x37, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74,
0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x45,
0x4f, 0x46, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x10, 0x01, 0x12, 0x0c,
0x0a, 0x08, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x72, 0x67, 0x73, 0x10, 0x02, 0x32, 0x80, 0x02, 0x0a,
0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50,
0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74,
0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x11, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50,
0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x33,
0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e,
0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0f,
0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22,
0x00, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x12, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61,
0x63, 0x6b, 0x65, 0x74, 0x1a, 0x0f, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x35, 0x0a, 0x0b, 0x46, 0x65, 0x74,
0x63, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0f, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68,
0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65,
0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01,
0x12, 0x1d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x08, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52,
0x65, 0x71, 0x1a, 0x09, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x42,
0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c,
0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e,
0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28,
0x05, 0x52, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x10, 0x0a, 0x0e,
0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x3c,
0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x12, 0x16,
0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x18,
0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x22, 0x36, 0x0a, 0x0a,
0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c,
0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e,
0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x03, 0x56, 0x61, 0x72, 0x22, 0x0d, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52,
0x65, 0x73, 0x70, 0x22, 0x35, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71,
0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x56, 0x61, 0x72, 0x22, 0x1e, 0x0a, 0x0a, 0x47, 0x65,
0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x56, 0x61, 0x72, 0x22, 0x09, 0x0a, 0x07, 0x50, 0x69,
0x6e, 0x67, 0x52, 0x65, 0x71, 0x22, 0x0a, 0x0a, 0x08, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73,
0x70, 0x2a, 0x37, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50,
0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x4f, 0x46,
0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08,
0x53, 0x65, 0x6e, 0x64, 0x41, 0x72, 0x67, 0x73, 0x10, 0x02, 0x32, 0x96, 0x02, 0x0a, 0x05, 0x41,
0x67, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49,
0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x11, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49,
0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x12, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75,
0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x34,
0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x11, 0x2e, 0x53,
0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a,
0x0f, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70,
0x22, 0x00, 0x28, 0x01, 0x12, 0x31, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61,
0x6d, 0x12, 0x0d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71,
0x1a, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63,
0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x26, 0x0a, 0x07, 0x53, 0x65, 0x6e, 0x64, 0x56,
0x61, 0x72, 0x12, 0x0b, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71, 0x1a,
0x0c, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12,
0x23, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x12, 0x0a, 0x2e, 0x47, 0x65, 0x74, 0x56,
0x61, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x52, 0x65,
0x73, 0x70, 0x22, 0x00, 0x12, 0x1d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x08, 0x2e, 0x50,
0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x1a, 0x09, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73,
0x70, 0x22, 0x00, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}

var (
@@ -530,36 +724,42 @@ func file_pkgs_grpc_agent_agent_proto_rawDescGZIP() []byte {
}

var file_pkgs_grpc_agent_agent_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_pkgs_grpc_agent_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_pkgs_grpc_agent_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
var file_pkgs_grpc_agent_agent_proto_goTypes = []interface{}{
(StreamDataPacketType)(0), // 0: StreamDataPacketType
(*FileDataPacket)(nil), // 1: FileDataPacket
(*SendIPFSFileResp)(nil), // 2: SendIPFSFileResp
(*GetIPFSFileReq)(nil), // 3: GetIPFSFileReq
(*ExecuteIOPlanReq)(nil), // 1: ExecuteIOPlanReq
(*ExecuteIOPlanResp)(nil), // 2: ExecuteIOPlanResp
(*FileDataPacket)(nil), // 3: FileDataPacket
(*StreamDataPacket)(nil), // 4: StreamDataPacket
(*SendStreamResp)(nil), // 5: SendStreamResp
(*FetchStreamReq)(nil), // 6: FetchStreamReq
(*PingReq)(nil), // 7: PingReq
(*PingResp)(nil), // 8: PingResp
(*GetStreamReq)(nil), // 6: GetStreamReq
(*SendVarReq)(nil), // 7: SendVarReq
(*SendVarResp)(nil), // 8: SendVarResp
(*GetVarReq)(nil), // 9: GetVarReq
(*GetVarResp)(nil), // 10: GetVarResp
(*PingReq)(nil), // 11: PingReq
(*PingResp)(nil), // 12: PingResp
}
var file_pkgs_grpc_agent_agent_proto_depIdxs = []int32{
0, // 0: FileDataPacket.Type:type_name -> StreamDataPacketType
0, // 1: StreamDataPacket.Type:type_name -> StreamDataPacketType
1, // 2: Agent.SendIPFSFile:input_type -> FileDataPacket
3, // 3: Agent.GetIPFSFile:input_type -> GetIPFSFileReq
4, // 4: Agent.SendStream:input_type -> StreamDataPacket
6, // 5: Agent.FetchStream:input_type -> FetchStreamReq
7, // 6: Agent.Ping:input_type -> PingReq
2, // 7: Agent.SendIPFSFile:output_type -> SendIPFSFileResp
1, // 8: Agent.GetIPFSFile:output_type -> FileDataPacket
5, // 9: Agent.SendStream:output_type -> SendStreamResp
4, // 10: Agent.FetchStream:output_type -> StreamDataPacket
8, // 11: Agent.Ping:output_type -> PingResp
7, // [7:12] is the sub-list for method output_type
2, // [2:7] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
0, // 0: FileDataPacket.Type:type_name -> StreamDataPacketType
0, // 1: StreamDataPacket.Type:type_name -> StreamDataPacketType
1, // 2: Agent.ExecuteIOPlan:input_type -> ExecuteIOPlanReq
4, // 3: Agent.SendStream:input_type -> StreamDataPacket
6, // 4: Agent.GetStream:input_type -> GetStreamReq
7, // 5: Agent.SendVar:input_type -> SendVarReq
9, // 6: Agent.GetVar:input_type -> GetVarReq
11, // 7: Agent.Ping:input_type -> PingReq
2, // 8: Agent.ExecuteIOPlan:output_type -> ExecuteIOPlanResp
5, // 9: Agent.SendStream:output_type -> SendStreamResp
4, // 10: Agent.GetStream:output_type -> StreamDataPacket
8, // 11: Agent.SendVar:output_type -> SendVarResp
10, // 12: Agent.GetVar:output_type -> GetVarResp
12, // 13: Agent.Ping:output_type -> PingResp
8, // [8:14] is the sub-list for method output_type
2, // [2:8] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}

func init() { file_pkgs_grpc_agent_agent_proto_init() }
@@ -569,7 +769,7 @@ func file_pkgs_grpc_agent_agent_proto_init() {
}
if !protoimpl.UnsafeEnabled {
file_pkgs_grpc_agent_agent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FileDataPacket); i {
switch v := v.(*ExecuteIOPlanReq); i {
case 0:
return &v.state
case 1:
@@ -581,7 +781,7 @@ func file_pkgs_grpc_agent_agent_proto_init() {
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SendIPFSFileResp); i {
switch v := v.(*ExecuteIOPlanResp); i {
case 0:
return &v.state
case 1:
@@ -593,7 +793,7 @@ func file_pkgs_grpc_agent_agent_proto_init() {
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetIPFSFileReq); i {
switch v := v.(*FileDataPacket); i {
case 0:
return &v.state
case 1:
@@ -629,7 +829,7 @@ func file_pkgs_grpc_agent_agent_proto_init() {
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*FetchStreamReq); i {
switch v := v.(*GetStreamReq); i {
case 0:
return &v.state
case 1:
@@ -641,7 +841,7 @@ func file_pkgs_grpc_agent_agent_proto_init() {
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingReq); i {
switch v := v.(*SendVarReq); i {
case 0:
return &v.state
case 1:
@@ -653,6 +853,54 @@ func file_pkgs_grpc_agent_agent_proto_init() {
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SendVarResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetVarReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetVarResp); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingReq); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkgs_grpc_agent_agent_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PingResp); i {
case 0:
return &v.state
@@ -671,7 +919,7 @@ func file_pkgs_grpc_agent_agent_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkgs_grpc_agent_agent_proto_rawDesc,
NumEnums: 1,
NumMessages: 8,
NumMessages: 12,
NumExtensions: 0,
NumServices: 1,
},


+ 31
- 16
common/pkgs/grpc/agent/agent.proto View File

@@ -5,50 +5,65 @@ syntax = "proto3";
option go_package = ".;agent";//grpc这里生效了



message ExecuteIOPlanReq {
string Plan = 1;
}

message ExecuteIOPlanResp {
}

enum StreamDataPacketType {
EOF = 0;
Data = 1;
SendArgs = 2;
}
// 文件数据。注意:只在Type为Data的时候,Data字段才能有数据
// 文件数据。注意:只在Type为Data或EOF的时候,Data字段才能有数据
message FileDataPacket {
StreamDataPacketType Type = 1;
bytes Data = 2;
}

message SendIPFSFileResp {
string FileHash = 1;
}

message GetIPFSFileReq {
string FileHash = 1;
}

// 注:EOF时data也可能有数据
message StreamDataPacket {
StreamDataPacketType Type = 1;
string PlanID = 2;
string StreamID = 3;
int32 VarID = 3;
bytes Data = 4;
}

message SendStreamResp {
message SendStreamResp {}

message GetStreamReq {
string PlanID = 1;
int32 VarID = 2;
}

message SendVarReq {
string PlanID = 1;
string Var = 2;
}
message SendVarResp {}

message FetchStreamReq {
message GetVarReq {
string PlanID = 1;
string StreamID = 2;
string Var = 2;
}
message GetVarResp {
string Var = 1; // 此处不使用VarID的原因是,Switch的BindVars函数还需要知道Var的类型
}

message PingReq {}
message PingResp {}

service Agent {
rpc SendIPFSFile(stream FileDataPacket)returns(SendIPFSFileResp){}
rpc GetIPFSFile(GetIPFSFileReq)returns(stream FileDataPacket){}
rpc ExecuteIOPlan(ExecuteIOPlanReq) returns(ExecuteIOPlanResp){}

rpc SendStream(stream StreamDataPacket)returns(SendStreamResp){}
rpc FetchStream(FetchStreamReq)returns(stream StreamDataPacket){}
rpc GetStream(GetStreamReq)returns(stream StreamDataPacket){}

rpc SendVar(SendVarReq)returns(SendVarResp){}
rpc GetVar(GetVarReq)returns(GetVarResp){}

rpc Ping(PingReq) returns(PingResp){}
}


+ 122
- 146
common/pkgs/grpc/agent/agent_grpc.pb.go View File

@@ -21,21 +21,23 @@ import (
const _ = grpc.SupportPackageIsVersion7

const (
Agent_SendIPFSFile_FullMethodName = "/Agent/SendIPFSFile"
Agent_GetIPFSFile_FullMethodName = "/Agent/GetIPFSFile"
Agent_SendStream_FullMethodName = "/Agent/SendStream"
Agent_FetchStream_FullMethodName = "/Agent/FetchStream"
Agent_Ping_FullMethodName = "/Agent/Ping"
Agent_ExecuteIOPlan_FullMethodName = "/Agent/ExecuteIOPlan"
Agent_SendStream_FullMethodName = "/Agent/SendStream"
Agent_GetStream_FullMethodName = "/Agent/GetStream"
Agent_SendVar_FullMethodName = "/Agent/SendVar"
Agent_GetVar_FullMethodName = "/Agent/GetVar"
Agent_Ping_FullMethodName = "/Agent/Ping"
)

// AgentClient is the client API for Agent service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type AgentClient interface {
SendIPFSFile(ctx context.Context, opts ...grpc.CallOption) (Agent_SendIPFSFileClient, error)
GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error)
ExecuteIOPlan(ctx context.Context, in *ExecuteIOPlanReq, opts ...grpc.CallOption) (*ExecuteIOPlanResp, error)
SendStream(ctx context.Context, opts ...grpc.CallOption) (Agent_SendStreamClient, error)
FetchStream(ctx context.Context, in *FetchStreamReq, opts ...grpc.CallOption) (Agent_FetchStreamClient, error)
GetStream(ctx context.Context, in *GetStreamReq, opts ...grpc.CallOption) (Agent_GetStreamClient, error)
SendVar(ctx context.Context, in *SendVarReq, opts ...grpc.CallOption) (*SendVarResp, error)
GetVar(ctx context.Context, in *GetVarReq, opts ...grpc.CallOption) (*GetVarResp, error)
Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error)
}

@@ -47,74 +49,17 @@ func NewAgentClient(cc grpc.ClientConnInterface) AgentClient {
return &agentClient{cc}
}

func (c *agentClient) SendIPFSFile(ctx context.Context, opts ...grpc.CallOption) (Agent_SendIPFSFileClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[0], Agent_SendIPFSFile_FullMethodName, opts...)
func (c *agentClient) ExecuteIOPlan(ctx context.Context, in *ExecuteIOPlanReq, opts ...grpc.CallOption) (*ExecuteIOPlanResp, error) {
out := new(ExecuteIOPlanResp)
err := c.cc.Invoke(ctx, Agent_ExecuteIOPlan_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
x := &agentSendIPFSFileClient{stream}
return x, nil
}

type Agent_SendIPFSFileClient interface {
Send(*FileDataPacket) error
CloseAndRecv() (*SendIPFSFileResp, error)
grpc.ClientStream
}

type agentSendIPFSFileClient struct {
grpc.ClientStream
}

func (x *agentSendIPFSFileClient) Send(m *FileDataPacket) error {
return x.ClientStream.SendMsg(m)
}

func (x *agentSendIPFSFileClient) CloseAndRecv() (*SendIPFSFileResp, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(SendIPFSFileResp)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}

func (c *agentClient) GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[1], Agent_GetIPFSFile_FullMethodName, opts...)
if err != nil {
return nil, err
}
x := &agentGetIPFSFileClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}

type Agent_GetIPFSFileClient interface {
Recv() (*FileDataPacket, error)
grpc.ClientStream
}

type agentGetIPFSFileClient struct {
grpc.ClientStream
}

func (x *agentGetIPFSFileClient) Recv() (*FileDataPacket, error) {
m := new(FileDataPacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
return out, nil
}

func (c *agentClient) SendStream(ctx context.Context, opts ...grpc.CallOption) (Agent_SendStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[2], Agent_SendStream_FullMethodName, opts...)
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[0], Agent_SendStream_FullMethodName, opts...)
if err != nil {
return nil, err
}
@@ -147,12 +92,12 @@ func (x *agentSendStreamClient) CloseAndRecv() (*SendStreamResp, error) {
return m, nil
}

func (c *agentClient) FetchStream(ctx context.Context, in *FetchStreamReq, opts ...grpc.CallOption) (Agent_FetchStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[3], Agent_FetchStream_FullMethodName, opts...)
func (c *agentClient) GetStream(ctx context.Context, in *GetStreamReq, opts ...grpc.CallOption) (Agent_GetStreamClient, error) {
stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[1], Agent_GetStream_FullMethodName, opts...)
if err != nil {
return nil, err
}
x := &agentFetchStreamClient{stream}
x := &agentGetStreamClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -162,16 +107,16 @@ func (c *agentClient) FetchStream(ctx context.Context, in *FetchStreamReq, opts
return x, nil
}

type Agent_FetchStreamClient interface {
type Agent_GetStreamClient interface {
Recv() (*StreamDataPacket, error)
grpc.ClientStream
}

type agentFetchStreamClient struct {
type agentGetStreamClient struct {
grpc.ClientStream
}

func (x *agentFetchStreamClient) Recv() (*StreamDataPacket, error) {
func (x *agentGetStreamClient) Recv() (*StreamDataPacket, error) {
m := new(StreamDataPacket)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
@@ -179,6 +124,24 @@ func (x *agentFetchStreamClient) Recv() (*StreamDataPacket, error) {
return m, nil
}

func (c *agentClient) SendVar(ctx context.Context, in *SendVarReq, opts ...grpc.CallOption) (*SendVarResp, error) {
out := new(SendVarResp)
err := c.cc.Invoke(ctx, Agent_SendVar_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}

func (c *agentClient) GetVar(ctx context.Context, in *GetVarReq, opts ...grpc.CallOption) (*GetVarResp, error) {
out := new(GetVarResp)
err := c.cc.Invoke(ctx, Agent_GetVar_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}

func (c *agentClient) Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error) {
out := new(PingResp)
err := c.cc.Invoke(ctx, Agent_Ping_FullMethodName, in, out, opts...)
@@ -192,10 +155,11 @@ func (c *agentClient) Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOp
// All implementations must embed UnimplementedAgentServer
// for forward compatibility
type AgentServer interface {
SendIPFSFile(Agent_SendIPFSFileServer) error
GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error
ExecuteIOPlan(context.Context, *ExecuteIOPlanReq) (*ExecuteIOPlanResp, error)
SendStream(Agent_SendStreamServer) error
FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error
GetStream(*GetStreamReq, Agent_GetStreamServer) error
SendVar(context.Context, *SendVarReq) (*SendVarResp, error)
GetVar(context.Context, *GetVarReq) (*GetVarResp, error)
Ping(context.Context, *PingReq) (*PingResp, error)
mustEmbedUnimplementedAgentServer()
}
@@ -204,17 +168,20 @@ type AgentServer interface {
type UnimplementedAgentServer struct {
}

func (UnimplementedAgentServer) SendIPFSFile(Agent_SendIPFSFileServer) error {
return status.Errorf(codes.Unimplemented, "method SendIPFSFile not implemented")
}
func (UnimplementedAgentServer) GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error {
return status.Errorf(codes.Unimplemented, "method GetIPFSFile not implemented")
func (UnimplementedAgentServer) ExecuteIOPlan(context.Context, *ExecuteIOPlanReq) (*ExecuteIOPlanResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method ExecuteIOPlan not implemented")
}
func (UnimplementedAgentServer) SendStream(Agent_SendStreamServer) error {
return status.Errorf(codes.Unimplemented, "method SendStream not implemented")
}
func (UnimplementedAgentServer) FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error {
return status.Errorf(codes.Unimplemented, "method FetchStream not implemented")
func (UnimplementedAgentServer) GetStream(*GetStreamReq, Agent_GetStreamServer) error {
return status.Errorf(codes.Unimplemented, "method GetStream not implemented")
}
func (UnimplementedAgentServer) SendVar(context.Context, *SendVarReq) (*SendVarResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method SendVar not implemented")
}
func (UnimplementedAgentServer) GetVar(context.Context, *GetVarReq) (*GetVarResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetVar not implemented")
}
func (UnimplementedAgentServer) Ping(context.Context, *PingReq) (*PingResp, error) {
return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented")
@@ -232,51 +199,22 @@ func RegisterAgentServer(s grpc.ServiceRegistrar, srv AgentServer) {
s.RegisterService(&Agent_ServiceDesc, srv)
}

func _Agent_SendIPFSFile_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(AgentServer).SendIPFSFile(&agentSendIPFSFileServer{stream})
}

type Agent_SendIPFSFileServer interface {
SendAndClose(*SendIPFSFileResp) error
Recv() (*FileDataPacket, error)
grpc.ServerStream
}

type agentSendIPFSFileServer struct {
grpc.ServerStream
}

func (x *agentSendIPFSFileServer) SendAndClose(m *SendIPFSFileResp) error {
return x.ServerStream.SendMsg(m)
}

func (x *agentSendIPFSFileServer) Recv() (*FileDataPacket, error) {
m := new(FileDataPacket)
if err := x.ServerStream.RecvMsg(m); err != nil {
func _Agent_ExecuteIOPlan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ExecuteIOPlanReq)
if err := dec(in); err != nil {
return nil, err
}
return m, nil
}

func _Agent_GetIPFSFile_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetIPFSFileReq)
if err := stream.RecvMsg(m); err != nil {
return err
if interceptor == nil {
return srv.(AgentServer).ExecuteIOPlan(ctx, in)
}
return srv.(AgentServer).GetIPFSFile(m, &agentGetIPFSFileServer{stream})
}

type Agent_GetIPFSFileServer interface {
Send(*FileDataPacket) error
grpc.ServerStream
}

type agentGetIPFSFileServer struct {
grpc.ServerStream
}

func (x *agentGetIPFSFileServer) Send(m *FileDataPacket) error {
return x.ServerStream.SendMsg(m)
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Agent_ExecuteIOPlan_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).ExecuteIOPlan(ctx, req.(*ExecuteIOPlanReq))
}
return interceptor(ctx, in, info, handler)
}

func _Agent_SendStream_Handler(srv interface{}, stream grpc.ServerStream) error {
@@ -305,27 +243,63 @@ func (x *agentSendStreamServer) Recv() (*StreamDataPacket, error) {
return m, nil
}

func _Agent_FetchStream_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(FetchStreamReq)
func _Agent_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetStreamReq)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(AgentServer).FetchStream(m, &agentFetchStreamServer{stream})
return srv.(AgentServer).GetStream(m, &agentGetStreamServer{stream})
}

type Agent_FetchStreamServer interface {
type Agent_GetStreamServer interface {
Send(*StreamDataPacket) error
grpc.ServerStream
}

type agentFetchStreamServer struct {
type agentGetStreamServer struct {
grpc.ServerStream
}

func (x *agentFetchStreamServer) Send(m *StreamDataPacket) error {
func (x *agentGetStreamServer) Send(m *StreamDataPacket) error {
return x.ServerStream.SendMsg(m)
}

func _Agent_SendVar_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendVarReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).SendVar(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Agent_SendVar_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).SendVar(ctx, req.(*SendVarReq))
}
return interceptor(ctx, in, info, handler)
}

func _Agent_GetVar_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetVarReq)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AgentServer).GetVar(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Agent_GetVar_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AgentServer).GetVar(ctx, req.(*GetVarReq))
}
return interceptor(ctx, in, info, handler)
}

func _Agent_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PingReq)
if err := dec(in); err != nil {
@@ -352,29 +326,31 @@ var Agent_ServiceDesc = grpc.ServiceDesc{
HandlerType: (*AgentServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Ping",
Handler: _Agent_Ping_Handler,
MethodName: "ExecuteIOPlan",
Handler: _Agent_ExecuteIOPlan_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "SendIPFSFile",
Handler: _Agent_SendIPFSFile_Handler,
ClientStreams: true,
MethodName: "SendVar",
Handler: _Agent_SendVar_Handler,
},
{
StreamName: "GetIPFSFile",
Handler: _Agent_GetIPFSFile_Handler,
ServerStreams: true,
MethodName: "GetVar",
Handler: _Agent_GetVar_Handler,
},
{
MethodName: "Ping",
Handler: _Agent_Ping_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "SendStream",
Handler: _Agent_SendStream_Handler,
ClientStreams: true,
},
{
StreamName: "FetchStream",
Handler: _Agent_FetchStream_Handler,
StreamName: "GetStream",
Handler: _Agent_GetStream_Handler,
ServerStreams: true,
},
},


+ 67
- 91
common/pkgs/grpc/agent/client.go View File

@@ -5,6 +5,7 @@ import (
"fmt"
"io"

"gitlink.org.cn/cloudream/common/utils/serder"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
@@ -27,59 +28,29 @@ func NewClient(addr string) (*Client, error) {
}, nil
}

func (c *Client) SendIPFSFile(file io.Reader) (string, error) {
sendCli, err := c.cli.SendIPFSFile(context.Background())
func (c *Client) ExecuteIOPlan(ctx context.Context, plan ioswitch.Plan) error {
data, err := serder.ObjectToJSONEx(plan)
if err != nil {
return "", err
return err
}

buf := make([]byte, 4096)
for {
rd, err := file.Read(buf)
if err == io.EOF {
err := sendCli.Send(&FileDataPacket{
Type: StreamDataPacketType_EOF,
Data: buf[:rd],
})
if err != nil {
return "", fmt.Errorf("sending EOF packet: %w", err)
}

resp, err := sendCli.CloseAndRecv()
if err != nil {
return "", fmt.Errorf("receiving response: %w", err)
}

return resp.FileHash, nil
}

if err != nil {
return "", fmt.Errorf("reading file data: %w", err)
}

err = sendCli.Send(&FileDataPacket{
Type: StreamDataPacketType_Data,
Data: buf[:rd],
})
if err != nil {
return "", fmt.Errorf("sending data packet: %w", err)
}
}
_, err = c.cli.ExecuteIOPlan(ctx, &ExecuteIOPlanReq{
Plan: string(data),
})
return err
}

type fileReadCloser struct {
type grpcStreamReadCloser struct {
io.ReadCloser
// stream Agent_GetIPFSFileClient
// TODO 临时使用
recvFn func() (*StreamDataPacket, error)
stream Agent_GetStreamClient
cancelFn context.CancelFunc
readingData []byte
recvEOF bool
}

func (s *fileReadCloser) Read(p []byte) (int, error) {
func (s *grpcStreamReadCloser) Read(p []byte) (int, error) {
if len(s.readingData) == 0 && !s.recvEOF {
resp, err := s.recvFn()
resp, err := s.stream.Recv()
if err != nil {
return 0, err
}
@@ -106,63 +77,34 @@ func (s *fileReadCloser) Read(p []byte) (int, error) {
return cnt, nil
}

func (s *fileReadCloser) Close() error {
func (s *grpcStreamReadCloser) Close() error {
s.cancelFn()

return nil
}

func (c *Client) GetIPFSFile(fileHash string) (io.ReadCloser, error) {
ctx, cancel := context.WithCancel(context.Background())

stream, err := c.cli.GetIPFSFile(ctx, &GetIPFSFileReq{
FileHash: fileHash,
})
if err != nil {
cancel()
return nil, fmt.Errorf("request grpc failed, err: %w", err)
}

return &fileReadCloser{
// TODO 临时处理方案
recvFn: func() (*StreamDataPacket, error) {
pkt, err := stream.Recv()
if err != nil {
return nil, err
}

return &StreamDataPacket{
Type: pkt.Type,
Data: pkt.Data,
}, nil
},
cancelFn: cancel,
}, nil
}

func (c *Client) SendStream(planID ioswitch.PlanID, streamID ioswitch.StreamID, file io.Reader) error {
sendCli, err := c.cli.SendStream(context.Background())
func (c *Client) SendStream(ctx context.Context, planID ioswitch.PlanID, varID ioswitch.VarID, str io.Reader) error {
sendCli, err := c.cli.SendStream(ctx)
if err != nil {
return err
}

err = sendCli.Send(&StreamDataPacket{
Type: StreamDataPacketType_SendArgs,
PlanID: string(planID),
StreamID: string(streamID),
Type: StreamDataPacketType_SendArgs,
PlanID: string(planID),
VarID: int32(varID),
})
if err != nil {
return fmt.Errorf("sending stream id packet: %w", err)
return fmt.Errorf("sending first stream packet: %w", err)
}

buf := make([]byte, 4096)
buf := make([]byte, 1024*64)
for {
rd, err := file.Read(buf)
rd, err := str.Read(buf)
if err == io.EOF {
err := sendCli.Send(&StreamDataPacket{
Type: StreamDataPacketType_EOF,
StreamID: string(streamID),
Data: buf[:rd],
Type: StreamDataPacketType_EOF,
Data: buf[:rd],
})
if err != nil {
return fmt.Errorf("sending EOF packet: %w", err)
@@ -177,13 +119,12 @@ func (c *Client) SendStream(planID ioswitch.PlanID, streamID ioswitch.StreamID,
}

if err != nil {
return fmt.Errorf("reading file data: %w", err)
return fmt.Errorf("reading stream data: %w", err)
}

err = sendCli.Send(&StreamDataPacket{
Type: StreamDataPacketType_Data,
StreamID: string(streamID),
Data: buf[:rd],
Type: StreamDataPacketType_Data,
Data: buf[:rd],
})
if err != nil {
return fmt.Errorf("sending data packet: %w", err)
@@ -191,24 +132,59 @@ func (c *Client) SendStream(planID ioswitch.PlanID, streamID ioswitch.StreamID,
}
}

func (c *Client) FetchStream(planID ioswitch.PlanID, streamID ioswitch.StreamID) (io.ReadCloser, error) {
func (c *Client) GetStream(planID ioswitch.PlanID, varID ioswitch.VarID) (io.ReadCloser, error) {
ctx, cancel := context.WithCancel(context.Background())

stream, err := c.cli.FetchStream(ctx, &FetchStreamReq{
PlanID: string(planID),
StreamID: string(streamID),
stream, err := c.cli.GetStream(ctx, &GetStreamReq{
PlanID: string(planID),
VarID: int32(varID),
})
if err != nil {
cancel()
return nil, fmt.Errorf("request grpc failed, err: %w", err)
}

return &fileReadCloser{
recvFn: stream.Recv,
return &grpcStreamReadCloser{
stream: stream,
cancelFn: cancel,
}, nil
}

func (c *Client) SendVar(ctx context.Context, planID ioswitch.PlanID, v ioswitch.Var) error {
data, err := serder.ObjectToJSONEx(v)
if err != nil {
return err
}

_, err = c.cli.SendVar(ctx, &SendVarReq{
PlanID: string(planID),
Var: string(data),
})
return err
}

func (c *Client) GetVar(ctx context.Context, planID ioswitch.PlanID, v ioswitch.Var) (ioswitch.Var, error) {
data, err := serder.ObjectToJSONEx(v)
if err != nil {
return nil, err
}

resp, err := c.cli.GetVar(ctx, &GetVarReq{
PlanID: string(planID),
Var: string(data),
})
if err != nil {
return nil, err
}

v2, err := serder.JSONToObjectEx[ioswitch.Var]([]byte(resp.Var))
if err != nil {
return nil, err
}

return v2, nil
}

func (c *Client) Ping() error {
_, err := c.cli.Ping(context.Background(), &PingReq{})
return err


+ 40
- 16
common/pkgs/ioswitch/ioswitch.go View File

@@ -1,35 +1,59 @@
package ioswitch

import (
"context"
"io"

"gitlink.org.cn/cloudream/common/pkgs/types"
"gitlink.org.cn/cloudream/common/utils/serder"
)

type PlanID string

type StreamID string
type VarID int

type Plan struct {
ID PlanID
Ops []Op
ID PlanID `json:"id"`
Ops []Op `json:"ops"`
}

type Stream struct {
ID StreamID
Stream io.ReadCloser
type Var interface {
GetID() VarID
}

func NewStream(id StreamID, stream io.ReadCloser) Stream {
return Stream{
ID: id,
Stream: stream,
}
var VarUnion = types.NewTypeUnion[Var](
(*IntVar)(nil),
(*StringVar)(nil),
)
var _ = serder.UseTypeUnionExternallyTagged(&VarUnion)

type StreamVar struct {
ID VarID `json:"id"`
Stream io.ReadCloser `json:"-"`
}

type Op interface {
Execute(sw *Switch, planID PlanID) error
func (v *StreamVar) GetID() VarID {
return v.ID
}

type ResultKV struct {
Key string
Value any
type IntVar struct {
ID VarID `json:"id"`
Value string `json:"value"`
}

func (v *IntVar) GetID() VarID {
return v.ID
}

type StringVar struct {
ID VarID `json:"id"`
Value string `json:"value"`
}

func (v *StringVar) GetID() VarID {
return v.ID
}

type Op interface {
Execute(ctx context.Context, sw *Switch) error
}

+ 83
- 0
common/pkgs/ioswitch/manager.go View File

@@ -0,0 +1,83 @@
package ioswitch

import (
"context"
"sync"

"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/utils/lo2"
)

type finding struct {
PlanID PlanID
Callback *future.SetValueFuture[*Switch]
}

type Manager struct {
lock sync.Mutex
switchs map[PlanID]*Switch
findings []*finding
}

func NewManager() Manager {
return Manager{
switchs: make(map[PlanID]*Switch),
}
}

func (s *Manager) Add(sw *Switch) {
s.lock.Lock()
defer s.lock.Unlock()

s.switchs[sw.Plan().ID] = sw
for i := range s.findings {
if s.findings[i].PlanID != sw.Plan().ID {
continue
}

s.findings[i].Callback.SetValue(sw)
s.findings = lo2.RemoveAt(s.findings, i)
}
}

func (s *Manager) Remove(sw *Switch) {
s.lock.Lock()
defer s.lock.Unlock()

delete(s.switchs, sw.Plan().ID)
}

func (s *Manager) FindByID(id PlanID) *Switch {
s.lock.Lock()
defer s.lock.Unlock()

return s.switchs[id]
}

func (s *Manager) FindByIDContexted(ctx context.Context, id PlanID) *Switch {
s.lock.Lock()

sw := s.switchs[id]
if sw != nil {
s.lock.Unlock()
return sw
}

cb := future.NewSetValue[*Switch]()
f := &finding{
PlanID: id,
Callback: cb,
}
s.findings = append(s.findings, f)

s.lock.Unlock()

sw, _ = cb.WaitValue(ctx)

s.lock.Lock()
defer s.lock.Unlock()

s.findings = lo2.Remove(s.findings, f)

return sw
}

+ 13
- 17
common/pkgs/ioswitch/ops/chunked_join.go View File

@@ -10,38 +10,34 @@ import (
)

type ChunkedJoin struct {
InputIDs []ioswitch.StreamID `json:"inputIDs"`
OutputID ioswitch.StreamID `json:"outputID"`
ChunkSize int `json:"chunkSize"`
Inputs []*ioswitch.StreamVar `json:"inputs"`
Output *ioswitch.StreamVar `json:"output"`
ChunkSize int `json:"chunkSize"`
}

func (o *ChunkedJoin) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
strs, err := sw.WaitStreams(planID, o.InputIDs...)
func (o *ChunkedJoin) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := ioswitch.BindArrayVars(sw, ctx, o.Inputs)
if err != nil {
return err
}

var strReaders []io.Reader
for _, s := range strs {
for _, s := range o.Inputs {
strReaders = append(strReaders, s.Stream)
}
defer func() {
for _, str := range strs {
for _, str := range o.Inputs {
str.Stream.Close()
}
}()

fut := future.NewSetVoid()
sw.StreamReady(planID,
ioswitch.NewStream(o.OutputID,
io2.AfterReadClosedOnce(io2.ChunkedJoin(strReaders, o.ChunkSize), func(closer io.ReadCloser) {
fut.SetVoid()
}),
),
)

fut.Wait(context.TODO())
return nil
o.Output.Stream = io2.AfterReadClosedOnce(io2.ChunkedJoin(strReaders, o.ChunkSize), func(closer io.ReadCloser) {
fut.SetVoid()
})
sw.PutVars(o.Output)

return fut.Wait(ctx)
}

func init() {


+ 18
- 22
common/pkgs/ioswitch/ops/chunked_split.go View File

@@ -1,47 +1,43 @@
package ops

import (
"context"
"io"
"sync"

"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
"golang.org/x/sync/semaphore"
)

type ChunkedSplit struct {
InputID ioswitch.StreamID `json:"inputID"`
OutputIDs []ioswitch.StreamID `json:"outputIDs"`
ChunkSize int `json:"chunkSize"`
StreamCount int `json:"streamCount"`
PaddingZeros bool `json:"paddingZeros"`
Input *ioswitch.StreamVar `json:"input"`
Outputs []*ioswitch.StreamVar `json:"outputs"`
ChunkSize int `json:"chunkSize"`
PaddingZeros bool `json:"paddingZeros"`
}

func (o *ChunkedSplit) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
str, err := sw.WaitStreams(planID, o.InputID)
func (o *ChunkedSplit) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Input)
if err != nil {
return err
}
defer str[0].Stream.Close()
defer o.Input.Stream.Close()

wg := sync.WaitGroup{}
outputs := io2.ChunkedSplit(str[0].Stream, o.ChunkSize, o.StreamCount, io2.ChunkedSplitOption{
outputs := io2.ChunkedSplit(o.Input.Stream, o.ChunkSize, len(o.Outputs), io2.ChunkedSplitOption{
PaddingZeros: o.PaddingZeros,
})

sem := semaphore.NewWeighted(int64(len(outputs)))
for i := range outputs {
wg.Add(1)

sw.StreamReady(planID, ioswitch.NewStream(
o.OutputIDs[i],
io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
wg.Done()
}),
))
}
sem.Acquire(ctx, 1)

wg.Wait()
o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
sem.Release(1)
})
}
ioswitch.PutArrayVars(sw, o.Outputs)

return nil
return sem.Acquire(ctx, int64(len(outputs)))
}

func init() {


+ 41
- 20
common/pkgs/ioswitch/ops/clone.go View File

@@ -1,43 +1,64 @@
package ops

import (
"context"
"fmt"
"io"
"sync"

"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
"golang.org/x/sync/semaphore"
)

type Clone struct {
InputID ioswitch.StreamID `json:"inputID"`
OutputIDs []ioswitch.StreamID `json:"outputIDs"`
type CloneStream struct {
Input *ioswitch.StreamVar `json:"input"`
Outputs []*ioswitch.StreamVar `json:"outputs"`
}

func (o *Clone) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
strs, err := sw.WaitStreams(planID, o.InputID)
func (o *CloneStream) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Input)
if err != nil {
return err
}
defer strs[0].Stream.Close()
defer o.Input.Stream.Close()

wg := sync.WaitGroup{}
cloned := io2.Clone(strs[0].Stream, len(o.OutputIDs))
cloned := io2.Clone(o.Input.Stream, len(o.Outputs))

sem := semaphore.NewWeighted(int64(len(o.Outputs)))
for i, s := range cloned {
wg.Add(1)

sw.StreamReady(planID,
ioswitch.NewStream(o.OutputIDs[i],
io2.AfterReadClosedOnce(s, func(closer io.ReadCloser) {
wg.Done()
}),
),
)
sem.Acquire(ctx, 1)

o.Outputs[i].Stream = io2.AfterReadClosedOnce(s, func(closer io.ReadCloser) {
sem.Release(1)
})
}
ioswitch.PutArrayVars(sw, o.Outputs)

return sem.Acquire(ctx, int64(len(o.Outputs)))
}

type CloneVar struct {
Raw ioswitch.Var `json:"raw"`
Cloneds []ioswitch.Var `json:"cloneds"`
}

func (o *CloneVar) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Raw)
if err != nil {
return err
}

for _, v := range o.Cloneds {
if err := ioswitch.AssignVar(o.Raw, v); err != nil {
return fmt.Errorf("clone var: %w", err)
}
}
sw.PutVars(o.Cloneds...)

wg.Wait()
return nil
}

func init() {
OpUnion.AddT((*Clone)(nil))
OpUnion.AddT((*CloneStream)(nil))
OpUnion.AddT((*CloneVar)(nil))
}

+ 37
- 34
common/pkgs/ioswitch/ops/ec.go View File

@@ -1,99 +1,102 @@
package ops

import (
"context"
"fmt"
"io"
"sync"

cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
"gitlink.org.cn/cloudream/storage/common/pkgs/ec"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
"golang.org/x/sync/semaphore"
)

type ECReconstructAny struct {
EC cdssdk.ECRedundancy `json:"ec"`
InputIDs []ioswitch.StreamID `json:"inputIDs"`
OutputIDs []ioswitch.StreamID `json:"outputIDs"`
InputBlockIndexes []int `json:"inputBlockIndexes"`
OutputBlockIndexes []int `json:"outputBlockIndexes"`
EC cdssdk.ECRedundancy `json:"ec"`
Inputs []*ioswitch.StreamVar `json:"inputs"`
Outputs []*ioswitch.StreamVar `json:"outputs"`
InputBlockIndexes []int `json:"inputBlockIndexes"`
OutputBlockIndexes []int `json:"outputBlockIndexes"`
}

func (o *ECReconstructAny) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
func (o *ECReconstructAny) Execute(ctx context.Context, sw *ioswitch.Switch) error {
rs, err := ec.NewStreamRs(o.EC.K, o.EC.N, o.EC.ChunkSize)
if err != nil {
return fmt.Errorf("new ec: %w", err)
}

strs, err := sw.WaitStreams(planID, o.InputIDs...)
err = ioswitch.BindArrayVars(sw, ctx, o.Inputs)
if err != nil {
return err
}
defer func() {
for _, s := range strs {
for _, s := range o.Inputs {
s.Stream.Close()
}
}()

var inputs []io.Reader
for _, s := range strs {
for _, s := range o.Inputs {
inputs = append(inputs, s.Stream)
}

outputs := rs.ReconstructAny(inputs, o.InputBlockIndexes, o.OutputBlockIndexes)

wg := sync.WaitGroup{}
for i, id := range o.OutputIDs {
wg.Add(1)
sw.StreamReady(planID, ioswitch.NewStream(id, io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
wg.Done()
})))
sem := semaphore.NewWeighted(int64(len(o.Outputs)))
for i := range o.Outputs {
sem.Acquire(ctx, 1)

o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
sem.Release(1)
})
}
wg.Wait()
ioswitch.PutArrayVars(sw, o.Outputs)

return nil
return sem.Acquire(ctx, int64(len(o.Outputs)))
}

type ECReconstruct struct {
EC cdssdk.ECRedundancy `json:"ec"`
InputIDs []ioswitch.StreamID `json:"inputIDs"`
OutputIDs []ioswitch.StreamID `json:"outputIDs"`
InputBlockIndexes []int `json:"inputBlockIndexes"`
EC cdssdk.ECRedundancy `json:"ec"`
Inputs []*ioswitch.StreamVar `json:"inputs"`
Outputs []*ioswitch.StreamVar `json:"outputs"`
InputBlockIndexes []int `json:"inputBlockIndexes"`
}

func (o *ECReconstruct) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
func (o *ECReconstruct) Execute(ctx context.Context, sw *ioswitch.Switch) error {
rs, err := ec.NewStreamRs(o.EC.K, o.EC.N, o.EC.ChunkSize)
if err != nil {
return fmt.Errorf("new ec: %w", err)
}

strs, err := sw.WaitStreams(planID, o.InputIDs...)
err = ioswitch.BindArrayVars(sw, ctx, o.Inputs)
if err != nil {
return err
}
defer func() {
for _, s := range strs {
for _, s := range o.Inputs {
s.Stream.Close()
}
}()

var inputs []io.Reader
for _, s := range strs {
for _, s := range o.Inputs {
inputs = append(inputs, s.Stream)
}

outputs := rs.ReconstructData(inputs, o.InputBlockIndexes)

wg := sync.WaitGroup{}
for i, id := range o.OutputIDs {
wg.Add(1)
sw.StreamReady(planID, ioswitch.NewStream(id, io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
wg.Done()
})))
sem := semaphore.NewWeighted(int64(len(o.Outputs)))
for i := range o.Outputs {
sem.Acquire(ctx, 1)

o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) {
sem.Release(1)
})
}
wg.Wait()
ioswitch.PutArrayVars(sw, o.Outputs)

return nil
return sem.Acquire(ctx, int64(len(o.Outputs)))
}

func init() {


+ 13
- 13
common/pkgs/ioswitch/ops/file.go View File

@@ -13,16 +13,16 @@ import (
)

type FileWrite struct {
InputID ioswitch.StreamID `json:"inputID"`
FilePath string `json:"filePath"`
Input *ioswitch.StreamVar `json:"input"`
FilePath string `json:"filePath"`
}

func (o *FileWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
str, err := sw.WaitStreams(planID, o.InputID)
func (o *FileWrite) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Input)
if err != nil {
return err
}
defer str[0].Stream.Close()
defer o.Input.Stream.Close()

dir := path.Dir(o.FilePath)
err = os.MkdirAll(dir, 0777)
@@ -36,7 +36,7 @@ func (o *FileWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
}
defer file.Close()

_, err = io.Copy(file, str[0].Stream)
_, err = io.Copy(file, o.Input.Stream)
if err != nil {
return fmt.Errorf("copying data to file: %w", err)
}
@@ -45,22 +45,22 @@ func (o *FileWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
}

type FileRead struct {
OutputID ioswitch.StreamID `json:"outputID"`
FilePath string `json:"filePath"`
Output *ioswitch.StreamVar `json:"output"`
FilePath string `json:"filePath"`
}

func (o *FileRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
func (o *FileRead) Execute(ctx context.Context, sw *ioswitch.Switch) error {
file, err := os.Open(o.FilePath)
if err != nil {
return fmt.Errorf("opening file: %w", err)
}

fut := future.NewSetVoid()
sw.StreamReady(planID, ioswitch.NewStream(o.OutputID, io2.AfterReadClosed(file, func(closer io.ReadCloser) {
o.Output.Stream = io2.AfterReadClosed(file, func(closer io.ReadCloser) {
fut.SetVoid()
})))
fut.Wait(context.TODO())
})
sw.PutVars(o.Output)
fut.Wait(ctx)

return nil
}


+ 67
- 32
common/pkgs/ioswitch/ops/grpc.go View File

@@ -6,39 +6,31 @@ import (
"io"

"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/pkgs/logger"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/common/utils/io2"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

type GRPCSend struct {
LocalID ioswitch.StreamID `json:"localID"`
RemoteID ioswitch.StreamID `json:"remoteID"`
Node cdssdk.Node `json:"node"`
type SendStream struct {
Stream *ioswitch.StreamVar `json:"stream"`
Node cdssdk.Node `json:"node"`
}

func (o *GRPCSend) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
logger.
WithField("LocalID", o.LocalID).
WithField("RemoteID", o.RemoteID).
Debugf("grpc send")

strs, err := sw.WaitStreams(planID, o.LocalID)
func (o *SendStream) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Stream)
if err != nil {
return err
}
defer strs[0].Stream.Close()
defer o.Stream.Stream.Close()

// TODO 根据客户端地址选择IP和端口
agtCli, err := stgglb.AgentRPCPool.Acquire(o.Node.ExternalIP, o.Node.ExternalGRPCPort)
agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node))
if err != nil {
return fmt.Errorf("new agent rpc client: %w", err)
}
defer stgglb.AgentRPCPool.Release(agtCli)

err = agtCli.SendStream(planID, o.RemoteID, strs[0].Stream)
err = agtCli.SendStream(ctx, sw.Plan().ID, o.Stream.ID, o.Stream.Stream)
if err != nil {
return fmt.Errorf("sending stream: %w", err)
}
@@ -46,39 +38,82 @@ func (o *GRPCSend) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
return nil
}

type GRPCFetch struct {
RemoteID ioswitch.StreamID `json:"remoteID"`
LocalID ioswitch.StreamID `json:"localID"`
Node cdssdk.Node `json:"node"`
type GetStream struct {
Stream *ioswitch.StreamVar `json:"stream"`
Node cdssdk.Node `json:"node"`
}

func (o *GRPCFetch) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
// TODO 根据客户端地址选择IP和端口
agtCli, err := stgglb.AgentRPCPool.Acquire(o.Node.ExternalIP, o.Node.ExternalGRPCPort)
func (o *GetStream) Execute(ctx context.Context, sw *ioswitch.Switch) error {
agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node))
if err != nil {
return fmt.Errorf("new agent rpc client: %w", err)
}
defer stgglb.AgentRPCPool.Release(agtCli)

str, err := agtCli.FetchStream(planID, o.RemoteID)
str, err := agtCli.GetStream(sw.Plan().ID, o.Stream.ID)
if err != nil {
return fmt.Errorf("fetching stream: %w", err)
return fmt.Errorf("getting stream: %w", err)
}

fut := future.NewSetVoid()
str = io2.AfterReadClosedOnce(str, func(closer io.ReadCloser) {
o.Stream.Stream = io2.AfterReadClosedOnce(str, func(closer io.ReadCloser) {
fut.SetVoid()
})
sw.PutVars(o.Stream)

return fut.Wait(ctx)
}

type SendVar struct {
Var ioswitch.Var `json:"var"`
Node cdssdk.Node `json:"node"`
}

sw.StreamReady(planID, ioswitch.NewStream(o.LocalID, str))
func (o *SendVar) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Var)
if err != nil {
return err
}

// TODO
fut.Wait(context.TODO())
agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node))
if err != nil {
return fmt.Errorf("new agent rpc client: %w", err)
}
defer stgglb.AgentRPCPool.Release(agtCli)

err = agtCli.SendVar(ctx, sw.Plan().ID, o.Var)
if err != nil {
return fmt.Errorf("sending var: %w", err)
}

return err
return nil
}

type GetVar struct {
Var ioswitch.Var `json:"var"`
Node cdssdk.Node `json:"node"`
}

func (o *GetVar) Execute(ctx context.Context, sw *ioswitch.Switch) error {
agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node))
if err != nil {
return fmt.Errorf("new agent rpc client: %w", err)
}
defer stgglb.AgentRPCPool.Release(agtCli)

v2, err := agtCli.GetVar(ctx, sw.Plan().ID, o.Var)
if err != nil {
return fmt.Errorf("getting var: %w", err)
}
o.Var = v2
sw.PutVars(o.Var)

return nil
}

func init() {
OpUnion.AddT((*GRPCSend)(nil))
OpUnion.AddT((*GRPCFetch)(nil))
OpUnion.AddT((*SendStream)(nil))
OpUnion.AddT((*GetStream)(nil))
OpUnion.AddT((*SendVar)(nil))
OpUnion.AddT((*GetVar)(nil))
}

+ 16
- 23
common/pkgs/ioswitch/ops/ipfs.go View File

@@ -14,12 +14,12 @@ import (
)

type IPFSRead struct {
Output ioswitch.StreamID `json:"output"`
FileHash string `json:"fileHash"`
Option ipfs.ReadOption `json:"option"`
Output *ioswitch.StreamVar `json:"output"`
FileHash string `json:"fileHash"`
Option ipfs.ReadOption `json:"option"`
}

func (o *IPFSRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
func (o *IPFSRead) Execute(ctx context.Context, sw *ioswitch.Switch) error {
logger.
WithField("FileHash", o.FileHash).
WithField("Output", o.Output).
@@ -36,27 +36,25 @@ func (o *IPFSRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
if err != nil {
return fmt.Errorf("reading ipfs: %w", err)
}
defer file.Close()

fut := future.NewSetVoid()
file = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
o.Output.Stream = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) {
fut.SetVoid()
})
sw.PutVars(o.Output)

sw.StreamReady(planID, ioswitch.NewStream(o.Output, file))

// TODO context
fut.Wait(context.TODO())
return nil
return fut.Wait(ctx)
}

type IPFSWrite struct {
Input ioswitch.StreamID `json:"input"`
ResultKey string `json:"resultKey"`
Input *ioswitch.StreamVar `json:"input"`
FileHash *ioswitch.StringVar `json:"fileHash"`
}

func (o *IPFSWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
func (o *IPFSWrite) Execute(ctx context.Context, sw *ioswitch.Switch) error {
logger.
WithField("ResultKey", o.ResultKey).
WithField("ResultKey", o.FileHash).
WithField("Input", o.Input).
Debugf("ipfs write op")

@@ -66,23 +64,18 @@ func (o *IPFSWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
}
defer stgglb.IPFSPool.Release(ipfsCli)

strs, err := sw.WaitStreams(planID, o.Input)
err = sw.BindVars(ctx, o.Input)
if err != nil {
return err
}
defer strs[0].Stream.Close()
defer o.Input.Stream.Close()

fileHash, err := ipfsCli.CreateFile(strs[0].Stream)
o.FileHash.Value, err = ipfsCli.CreateFile(o.Input.Stream)
if err != nil {
return fmt.Errorf("creating ipfs file: %w", err)
}

if o.ResultKey != "" {
sw.AddResultValue(planID, ioswitch.ResultKV{
Key: o.ResultKey,
Value: fileHash,
})
}
sw.PutVars(o.FileHash)

return nil
}


+ 13
- 17
common/pkgs/ioswitch/ops/join.go View File

@@ -10,38 +10,34 @@ import (
)

type Join struct {
InputIDs []ioswitch.StreamID `json:"inputIDs"`
OutputID ioswitch.StreamID `json:"outputID"`
Length int64 `json:"length"`
Inputs []*ioswitch.StreamVar `json:"inputs"`
Output *ioswitch.StreamVar `json:"output"`
Length int64 `json:"length"`
}

func (o *Join) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
strs, err := sw.WaitStreams(planID, o.InputIDs...)
func (o *Join) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := ioswitch.BindArrayVars(sw, ctx, o.Inputs)
if err != nil {
return err
}

var strReaders []io.Reader
for _, s := range strs {
for _, s := range o.Inputs {
strReaders = append(strReaders, s.Stream)
}
defer func() {
for _, str := range strs {
for _, str := range o.Inputs {
str.Stream.Close()
}
}()

fut := future.NewSetVoid()
sw.StreamReady(planID,
ioswitch.NewStream(o.OutputID,
io2.AfterReadClosedOnce(io2.Length(io2.Join(strReaders), o.Length), func(closer io.ReadCloser) {
fut.SetVoid()
}),
),
)

fut.Wait(context.TODO())
return nil
o.Output.Stream = io2.AfterReadClosedOnce(io2.Length(io2.Join(strReaders), o.Length), func(closer io.ReadCloser) {
fut.SetVoid()
})
sw.PutVars(o.Output)

return fut.Wait(ctx)
}

func init() {


+ 11
- 15
common/pkgs/ioswitch/ops/length.go View File

@@ -10,29 +10,25 @@ import (
)

type Length struct {
InputID ioswitch.StreamID `json:"inputID"`
OutputID ioswitch.StreamID `json:"outputID"`
Length int64 `json:"length"`
Input *ioswitch.StreamVar `json:"input"`
Output *ioswitch.StreamVar `json:"output"`
Length int64 `json:"length"`
}

func (o *Length) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error {
strs, err := sw.WaitStreams(planID, o.InputID)
func (o *Length) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Input)
if err != nil {
return err
}
defer strs[0].Stream.Close()
defer o.Input.Stream.Close()

fut := future.NewSetVoid()
sw.StreamReady(planID,
ioswitch.NewStream(o.OutputID,
io2.AfterReadClosedOnce(io2.Length(strs[0].Stream, o.Length), func(closer io.ReadCloser) {
fut.SetVoid()
}),
),
)
o.Output.Stream = io2.AfterReadClosedOnce(io2.Length(o.Input.Stream, o.Length), func(closer io.ReadCloser) {
fut.SetVoid()
})
sw.PutVars(o.Output)

fut.Wait(context.TODO())
return nil
return fut.Wait(ctx)
}

func init() {


+ 30
- 0
common/pkgs/ioswitch/ops/store.go View File

@@ -0,0 +1,30 @@
package ops

import (
"context"
"sync"

"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

type Store struct {
Var ioswitch.Var
Key string
Store *sync.Map
}

func (o *Store) Execute(ctx context.Context, sw *ioswitch.Switch) error {
err := sw.BindVars(ctx, o.Var)
if err != nil {
return err
}

switch v := o.Var.(type) {
case *ioswitch.IntVar:
o.Store.Store(o.Key, v.Value)
case *ioswitch.StringVar:
o.Store.Store(o.Key, v.Value)
}

return nil
}

+ 281
- 0
common/pkgs/ioswitch/plans/agent.go View File

@@ -0,0 +1,281 @@
package plans

import (
"gitlink.org.cn/cloudream/common/pkgs/ipfs"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops"
)

type AgentPlanBuilder struct {
blder *PlanBuilder
node cdssdk.Node
ops []ioswitch.Op
}

type AgentStreamVar struct {
owner *AgentPlanBuilder
v *ioswitch.StreamVar
}

type AgentIntVar struct {
owner *AgentPlanBuilder
v *ioswitch.IntVar
}

type AgentStringVar struct {
owner *AgentPlanBuilder
v *ioswitch.StringVar
}

func (b *AgentPlanBuilder) IPFSRead(fileHash string, opts ...ipfs.ReadOption) *AgentStreamVar {
opt := ipfs.ReadOption{
Offset: 0,
Length: -1,
}
if len(opts) > 0 {
opt = opts[0]
}

str := &AgentStreamVar{
owner: b,
v: b.blder.newStreamVar(),
}

b.ops = append(b.ops, &ops.IPFSRead{
Output: str.v,
FileHash: fileHash,
Option: opt,
})

return str
}

func (s *AgentStreamVar) IPFSWrite() *AgentStringVar {
v := s.owner.blder.newStringVar()

s.owner.ops = append(s.owner.ops, &ops.IPFSWrite{
Input: s.v,
FileHash: v,
})

return &AgentStringVar{
owner: s.owner,
v: v,
}
}

func (b *AgentPlanBuilder) FileRead(filePath string) *AgentStreamVar {
agtStr := &AgentStreamVar{
owner: b,
v: b.blder.newStreamVar(),
}

b.ops = append(b.ops, &ops.FileRead{
Output: agtStr.v,
FilePath: filePath,
})

return agtStr
}

func (b *AgentStreamVar) FileWrite(filePath string) {
b.owner.ops = append(b.owner.ops, &ops.FileWrite{
Input: b.v,
FilePath: filePath,
})
}

func (b *AgentPlanBuilder) ECReconstructAny(ec cdssdk.ECRedundancy, inBlockIndexes []int, outBlockIndexes []int, streams []*AgentStreamVar) []*AgentStreamVar {
var strs []*AgentStreamVar

var inputStrVars []*ioswitch.StreamVar
for _, str := range streams {
inputStrVars = append(inputStrVars, str.v)
}

var outputStrVars []*ioswitch.StreamVar
for i := 0; i < len(outBlockIndexes); i++ {
v := b.blder.newStreamVar()
strs = append(strs, &AgentStreamVar{
owner: b,
v: v,
})
outputStrVars = append(outputStrVars, v)
}

b.ops = append(b.ops, &ops.ECReconstructAny{
EC: ec,
Inputs: inputStrVars,
Outputs: outputStrVars,
InputBlockIndexes: inBlockIndexes,
OutputBlockIndexes: outBlockIndexes,
})

return strs
}

func (b *AgentPlanBuilder) ECReconstruct(ec cdssdk.ECRedundancy, inBlockIndexes []int, streams []*AgentStreamVar) []*AgentStreamVar {
var strs []*AgentStreamVar

var inputStrVars []*ioswitch.StreamVar
for _, str := range streams {
inputStrVars = append(inputStrVars, str.v)
}

var outputStrVars []*ioswitch.StreamVar
for i := 0; i < ec.K; i++ {
v := b.blder.newStreamVar()
strs = append(strs, &AgentStreamVar{
owner: b,
v: v,
})
outputStrVars = append(outputStrVars, v)
}

b.ops = append(b.ops, &ops.ECReconstruct{
EC: ec,
Inputs: inputStrVars,
Outputs: outputStrVars,
InputBlockIndexes: inBlockIndexes,
})

return strs
}

func (b *AgentStreamVar) ChunkedSplit(chunkSize int, streamCount int, paddingZeros bool) []*AgentStreamVar {
var strs []*AgentStreamVar

var outputStrVars []*ioswitch.StreamVar
for i := 0; i < streamCount; i++ {
v := b.owner.blder.newStreamVar()
strs = append(strs, &AgentStreamVar{
owner: b.owner,
v: v,
})
outputStrVars = append(outputStrVars, v)
}

b.owner.ops = append(b.owner.ops, &ops.ChunkedSplit{
Input: b.v,
Outputs: outputStrVars,
ChunkSize: chunkSize,
PaddingZeros: paddingZeros,
})

return strs
}

func (s *AgentStreamVar) Length(length int64) *AgentStreamVar {
agtStr := &AgentStreamVar{
owner: s.owner,
v: s.owner.blder.newStreamVar(),
}

s.owner.ops = append(s.owner.ops, &ops.Length{
Input: s.v,
Output: agtStr.v,
Length: length,
})

return agtStr
}

func (s *AgentStreamVar) To(node cdssdk.Node) *AgentStreamVar {
s.owner.ops = append(s.owner.ops, &ops.SendStream{Stream: s.v, Node: node})
s.owner = s.owner.blder.AtAgent(node)

return s
}

func (s *AgentStreamVar) ToExecutor() *ExecutorStreamVar {
s.owner.blder.executorPlan.ops = append(s.owner.blder.executorPlan.ops, &ops.GetStream{
Stream: s.v,
Node: s.owner.node,
})

return &ExecutorStreamVar{
blder: s.owner.blder,
v: s.v,
}
}

func (b *AgentPlanBuilder) Join(length int64, streams []*AgentStreamVar) *AgentStreamVar {
agtStr := &AgentStreamVar{
owner: b,
v: b.blder.newStreamVar(),
}

var inputStrVars []*ioswitch.StreamVar
for _, str := range streams {
inputStrVars = append(inputStrVars, str.v)
}

b.ops = append(b.ops, &ops.Join{
Inputs: inputStrVars,
Output: agtStr.v,
Length: length,
})

return agtStr
}

func (b *AgentPlanBuilder) ChunkedJoin(chunkSize int, streams []*AgentStreamVar) *AgentStreamVar {
agtStr := &AgentStreamVar{
owner: b,
v: b.blder.newStreamVar(),
}

var inputStrVars []*ioswitch.StreamVar
for _, str := range streams {
inputStrVars = append(inputStrVars, str.v)
}

b.ops = append(b.ops, &ops.ChunkedJoin{
Inputs: inputStrVars,
Output: agtStr.v,
ChunkSize: chunkSize,
})

return agtStr
}

func (s *AgentStreamVar) Clone(cnt int) []*AgentStreamVar {
var strs []*AgentStreamVar

var outputStrVars []*ioswitch.StreamVar
for i := 0; i < cnt; i++ {
v := s.owner.blder.newStreamVar()
strs = append(strs, &AgentStreamVar{
owner: s.owner,
v: v,
})
outputStrVars = append(outputStrVars, v)
}

s.owner.ops = append(s.owner.ops, &ops.CloneStream{
Input: s.v,
Outputs: outputStrVars,
})

return strs
}

func (v *AgentStringVar) To(node cdssdk.Node) *AgentStringVar {
v.owner.ops = append(v.owner.ops, &ops.SendVar{Var: v.v, Node: node})
v.owner = v.owner.blder.AtAgent(node)

return v
}

func (v *AgentStringVar) ToExecutor() *ExecutorStringVar {
v.owner.blder.executorPlan.ops = append(v.owner.blder.executorPlan.ops, &ops.GetVar{
Var: v.v,
Node: v.owner.node,
})

return &ExecutorStringVar{
blder: v.owner.blder,
v: v.v,
}
}

+ 0
- 276
common/pkgs/ioswitch/plans/agent_plan.go View File

@@ -1,276 +0,0 @@
package plans

import (
"gitlink.org.cn/cloudream/common/pkgs/ipfs"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops"
)

type AgentPlanBuilder struct {
owner *PlanBuilder
node cdssdk.Node
ops []ioswitch.Op
}

type AgentStream struct {
owner *AgentPlanBuilder
info *StreamInfo
}

func (b *AgentPlanBuilder) Build(planID ioswitch.PlanID) (AgentPlan, error) {
plan := ioswitch.Plan{
ID: planID,
Ops: b.ops,
}

return AgentPlan{
Plan: plan,
Node: b.node,
}, nil
}

func (b *AgentPlanBuilder) GRCPFetch(node cdssdk.Node, str *AgentStream) *AgentStream {
agtStr := &AgentStream{
owner: b,
info: b.owner.newStream(),
}

b.ops = append(b.ops, &ops.GRPCFetch{
RemoteID: str.info.ID,
LocalID: agtStr.info.ID,
Node: node,
})

return agtStr
}

func (s *AgentStream) GRPCSend(node cdssdk.Node) *AgentStream {
agtStr := &AgentStream{
owner: s.owner.owner.AtAgent(node),
info: s.owner.owner.newStream(),
}

s.owner.ops = append(s.owner.ops, &ops.GRPCSend{
LocalID: s.info.ID,
RemoteID: agtStr.info.ID,
Node: node,
})

return agtStr
}

func (b *AgentPlanBuilder) IPFSRead(fileHash string, opts ...ipfs.ReadOption) *AgentStream {
opt := ipfs.ReadOption{
Offset: 0,
Length: -1,
}
if len(opts) > 0 {
opt = opts[0]
}

agtStr := &AgentStream{
owner: b,
info: b.owner.newStream(),
}

b.ops = append(b.ops, &ops.IPFSRead{
Output: agtStr.info.ID,
FileHash: fileHash,
Option: opt,
})

return agtStr
}

func (s *AgentStream) IPFSWrite(resultKey string) {
s.owner.ops = append(s.owner.ops, &ops.IPFSWrite{
Input: s.info.ID,
ResultKey: resultKey,
})
}

func (b *AgentPlanBuilder) FileRead(filePath string) *AgentStream {
agtStr := &AgentStream{
owner: b,
info: b.owner.newStream(),
}

b.ops = append(b.ops, &ops.FileRead{
OutputID: agtStr.info.ID,
FilePath: filePath,
})

return agtStr
}

func (b *AgentStream) FileWrite(filePath string) {
b.owner.ops = append(b.owner.ops, &ops.FileWrite{
InputID: b.info.ID,
FilePath: filePath,
})
}

func (b *AgentPlanBuilder) ECReconstructAny(ec cdssdk.ECRedundancy, inBlockIndexes []int, outBlockIndexes []int, streams ...*AgentStream) *MultiStream {
mstr := &MultiStream{}

var inputStrIDs []ioswitch.StreamID
for _, str := range streams {
inputStrIDs = append(inputStrIDs, str.info.ID)
}

var outputStrIDs []ioswitch.StreamID
for i := 0; i < len(outBlockIndexes); i++ {
info := b.owner.newStream()
mstr.Streams = append(mstr.Streams, &AgentStream{
owner: b,
info: info,
})
outputStrIDs = append(outputStrIDs, info.ID)
}

b.ops = append(b.ops, &ops.ECReconstructAny{
EC: ec,
InputIDs: inputStrIDs,
OutputIDs: outputStrIDs,
InputBlockIndexes: inBlockIndexes,
OutputBlockIndexes: outBlockIndexes,
})

return mstr
}

func (b *AgentPlanBuilder) ECReconstruct(ec cdssdk.ECRedundancy, inBlockIndexes []int, streams ...*AgentStream) *MultiStream {
mstr := &MultiStream{}

var inputStrIDs []ioswitch.StreamID
for _, str := range streams {
inputStrIDs = append(inputStrIDs, str.info.ID)
}

var outputStrIDs []ioswitch.StreamID
for i := 0; i < ec.K; i++ {
info := b.owner.newStream()
mstr.Streams = append(mstr.Streams, &AgentStream{
owner: b,
info: info,
})
outputStrIDs = append(outputStrIDs, info.ID)
}

b.ops = append(b.ops, &ops.ECReconstruct{
EC: ec,
InputIDs: inputStrIDs,
OutputIDs: outputStrIDs,
InputBlockIndexes: inBlockIndexes,
})

return mstr
}

func (b *AgentStream) ChunkedSplit(chunkSize int, streamCount int, paddingZeros bool) *MultiStream {
mstr := &MultiStream{}

var outputStrIDs []ioswitch.StreamID
for i := 0; i < streamCount; i++ {
info := b.owner.owner.newStream()
mstr.Streams = append(mstr.Streams, &AgentStream{
owner: b.owner,
info: info,
})
outputStrIDs = append(outputStrIDs, info.ID)
}

b.owner.ops = append(b.owner.ops, &ops.ChunkedSplit{
InputID: b.info.ID,
OutputIDs: outputStrIDs,
ChunkSize: chunkSize,
StreamCount: streamCount,
PaddingZeros: paddingZeros,
})

return mstr
}

func (s *AgentStream) Length(length int64) *AgentStream {
agtStr := &AgentStream{
owner: s.owner,
info: s.owner.owner.newStream(),
}

s.owner.ops = append(s.owner.ops, &ops.Length{
InputID: s.info.ID,
OutputID: agtStr.info.ID,
Length: length,
})

return agtStr
}

func (s *AgentStream) ToExecutor() *ToExecutorStream {
return &ToExecutorStream{
info: s.info,
fromNode: &s.owner.node,
}
}

func (b *AgentPlanBuilder) Join(length int64, streams ...*AgentStream) *AgentStream {
agtStr := &AgentStream{
owner: b,
info: b.owner.newStream(),
}

var inputStrIDs []ioswitch.StreamID
for _, str := range streams {
inputStrIDs = append(inputStrIDs, str.info.ID)
}

b.ops = append(b.ops, &ops.Join{
InputIDs: inputStrIDs,
OutputID: agtStr.info.ID,
Length: length,
})

return agtStr
}

func (b *AgentPlanBuilder) ChunkedJoin(chunkSize int, streams ...*AgentStream) *AgentStream {
agtStr := &AgentStream{
owner: b,
info: b.owner.newStream(),
}

var inputStrIDs []ioswitch.StreamID
for _, str := range streams {
inputStrIDs = append(inputStrIDs, str.info.ID)
}

b.ops = append(b.ops, &ops.ChunkedJoin{
InputIDs: inputStrIDs,
OutputID: agtStr.info.ID,
ChunkSize: chunkSize,
})

return agtStr
}

func (s *AgentStream) Clone(cnt int) *MultiStream {
mstr := &MultiStream{}

var outputStrIDs []ioswitch.StreamID
for i := 0; i < cnt; i++ {
info := s.owner.owner.newStream()
mstr.Streams = append(mstr.Streams, &AgentStream{
owner: s.owner,
info: info,
})
outputStrIDs = append(outputStrIDs, info.ID)
}

s.owner.ops = append(s.owner.ops, &ops.Clone{
InputID: s.info.ID,
OutputIDs: outputStrIDs,
})

return mstr
}

+ 92
- 140
common/pkgs/ioswitch/plans/executor.go View File

@@ -2,190 +2,142 @@ package plans

import (
"context"
"errors"
"fmt"
"io"
"sync"
"sync/atomic"

"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/utils/io2"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
stgglb "gitlink.org.cn/cloudream/storage/common/globals"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops"
)

type ExecutorResult struct {
ResultValues map[string]any
type Executor struct {
planID ioswitch.PlanID
plan *PlanBuilder
callback *future.SetVoidFuture
ctx context.Context
cancel context.CancelFunc
executorSw *ioswitch.Switch
}

type Executor struct {
plan ComposedPlan
callback *future.SetValueFuture[ExecutorResult]
mqClis []*agtmq.Client
planTaskIDs []string
func (e *Executor) BeginWrite(str io.ReadCloser, target ExecutorWriteStream) {
target.stream.Stream = str
e.executorSw.PutVars(target.stream)
}

func Execute(plan ComposedPlan) (*Executor, error) {
executor := Executor{
plan: plan,
callback: future.NewSetValue[ExecutorResult](),
func (e *Executor) BeginRead(target ExecutorReadStream) (io.ReadCloser, error) {
err := e.executorSw.BindVars(e.ctx, target.stream)
if err != nil {
return nil, fmt.Errorf("bind vars: %w", err)
}

var err error
for _, a := range plan.AgentPlans {
var cli *agtmq.Client
cli, err = stgglb.AgentMQPool.Acquire(a.Node.NodeID)
if err != nil {
executor.Close()
return nil, fmt.Errorf("new mq client for %d: %w", a.Node.NodeID, err)
}
return target.stream.Stream, nil
}

executor.mqClis = append(executor.mqClis, cli)
func (e *Executor) Wait(ctx context.Context) (map[string]any, error) {
err := e.callback.Wait(ctx)
if err != nil {
return nil, err
}

for i, a := range plan.AgentPlans {
cli := executor.mqClis[i]
ret := make(map[string]any)
e.plan.storeMap.Range(func(k, v any) bool {
ret[k.(string)] = v
return true
})

_, err := cli.SetupIOPlan(agtmq.NewSetupIOPlan(a.Plan))
if err != nil {
for i -= 1; i >= 0; i-- {
executor.mqClis[i].CancelIOPlan(agtmq.NewCancelIOPlan(plan.ID))
}
executor.Close()
return nil, fmt.Errorf("setup plan at %d: %w", a.Node.NodeID, err)
}
}
return ret, nil
}

for i, a := range plan.AgentPlans {
cli := executor.mqClis[i]
func (e *Executor) execute() {
wg := sync.WaitGroup{}

resp, err := cli.StartIOPlan(agtmq.NewStartIOPlan(a.Plan.ID))
if err != nil {
executor.cancelAll()
executor.Close()
return nil, fmt.Errorf("setup plan at %d: %w", a.Node.NodeID, err)
}
for _, p := range e.plan.agentPlans {
wg.Add(1)

executor.planTaskIDs = append(executor.planTaskIDs, resp.TaskID)
}
go func(p *AgentPlanBuilder) {
defer wg.Done()

go executor.pollResult()
plan := ioswitch.Plan{
ID: e.planID,
Ops: p.ops,
}

return &executor, nil
}
cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&p.node))
if err != nil {
e.stopWith(fmt.Errorf("new agent rpc client of node %v: %w", p.node.NodeID, err))
return
}
defer stgglb.AgentRPCPool.Release(cli)

func (e *Executor) SendStream(info *FromExecutorStream, stream io.Reader) error {
// TODO 考虑不使用stgglb的Local
nodeIP := info.toNode.ExternalIP
grpcPort := info.toNode.ExternalGRPCPort
if info.toNode.LocationID == stgglb.Local.LocationID {
nodeIP = info.toNode.LocalIP
grpcPort = info.toNode.LocalGRPCPort
err = cli.ExecuteIOPlan(e.ctx, plan)
if err != nil {
e.stopWith(fmt.Errorf("execute plan at %v: %w", p.node.NodeID, err))
return
}
}(p)
}

agtCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort)
err := e.executorSw.Run(e.ctx)
if err != nil {
return fmt.Errorf("new agent rpc client: %w", err)
e.stopWith(fmt.Errorf("run executor switch: %w", err))
return
}
defer stgglb.AgentRPCPool.Release(agtCli)

return agtCli.SendStream(e.plan.ID, info.info.ID, stream)
wg.Wait()
}

func (e *Executor) ReadStream(info *ToExecutorStream) (io.ReadCloser, error) {
// TODO 考虑不使用stgglb的Local
nodeIP := info.fromNode.ExternalIP
grpcPort := info.fromNode.ExternalGRPCPort
if info.fromNode.LocationID == stgglb.Local.LocationID {
nodeIP = info.fromNode.LocalIP
grpcPort = info.fromNode.LocalGRPCPort
}

agtCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort)
if err != nil {
return nil, fmt.Errorf("new agent rpc client: %w", err)
}

str, err := agtCli.FetchStream(e.plan.ID, info.info.ID)
if err != nil {
return nil, err
}

return io2.AfterReadClosed(str, func(closer io.ReadCloser) {
stgglb.AgentRPCPool.Release(agtCli)
}), nil
func (e *Executor) stopWith(err error) {
e.callback.SetError(err)
e.cancel()
}

func (e *Executor) Wait() (ExecutorResult, error) {
return e.callback.WaitValue(context.TODO())
type ExecutorPlanBuilder struct {
blder *PlanBuilder
ops []ioswitch.Op
}

func (e *Executor) cancelAll() {
for _, cli := range e.mqClis {
cli.CancelIOPlan(agtmq.NewCancelIOPlan(e.plan.ID))
}
type ExecutorStreamVar struct {
blder *PlanBuilder
v *ioswitch.StreamVar
}

func (e *Executor) Close() {
for _, c := range e.mqClis {
stgglb.AgentMQPool.Release(c)
}
type ExecutorStringVar struct {
blder *PlanBuilder
v *ioswitch.StringVar
}

func (e *Executor) pollResult() {
wg := sync.WaitGroup{}
var anyErr error
var done atomic.Bool
rets := make([]*ioswitch.PlanResult, len(e.plan.AgentPlans))

for i, id := range e.planTaskIDs {
idx := i
taskID := id
type ExecutorWriteStream struct {
stream *ioswitch.StreamVar
}

wg.Add(1)
go func() {
defer wg.Done()
func (b *ExecutorPlanBuilder) WillWrite() (ExecutorWriteStream, *ExecutorStreamVar) {
stream := b.blder.newStreamVar()
return ExecutorWriteStream{stream}, &ExecutorStreamVar{blder: b.blder, v: stream}
}

for {
resp, err := e.mqClis[idx].WaitIOPlan(agtmq.NewWaitIOPlan(taskID, 5000))
if err != nil {
anyErr = err
break
}

if resp.IsComplete {
if resp.Error != "" {
anyErr = errors.New(resp.Error)
done.Store(true)
} else {
rets[idx] = &resp.Result
}
break
}

if done.Load() {
break
}
}
}()
}
type ExecutorReadStream struct {
stream *ioswitch.StreamVar
}

wg.Wait()
func (v *ExecutorStreamVar) WillRead() ExecutorReadStream {
return ExecutorReadStream{v.v}
}

if anyErr != nil {
e.callback.SetError(anyErr)
return
}
func (s *ExecutorStringVar) Store(key string) {
s.blder.executorPlan.ops = append(s.blder.executorPlan.ops, &ops.Store{
Var: s.v,
Key: key,
Store: s.blder.storeMap,
})
}

reducedRet := ExecutorResult{
ResultValues: make(map[string]any),
func (s *ExecutorStreamVar) To(node cdssdk.Node) *AgentStreamVar {
s.blder.executorPlan.ops = append(s.blder.executorPlan.ops, &ops.SendStream{Stream: s.v, Node: node})
return &AgentStreamVar{
owner: s.blder.AtAgent(node),
v: s.v,
}
for _, ret := range rets {
for k, v := range ret.Values {
reducedRet.ResultValues[k] = v
}
}

e.callback.SetValue(reducedRet)
}

+ 48
- 63
common/pkgs/ioswitch/plans/plan_builder.go View File

@@ -1,49 +1,19 @@
package plans

import (
"fmt"
"context"
"sync"

"github.com/google/uuid"
"gitlink.org.cn/cloudream/common/pkgs/future"
cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

type StreamInfo struct {
ID ioswitch.StreamID
}

type PlanBuilder struct {
streams []*StreamInfo
agentPlans map[cdssdk.NodeID]*AgentPlanBuilder
}

func (b *PlanBuilder) Build() (*ComposedPlan, error) {
planID := uuid.NewString()

var agentPlans []AgentPlan
for _, b := range b.agentPlans {
plan, err := b.Build(ioswitch.PlanID(planID))
if err != nil {
return nil, err
}

agentPlans = append(agentPlans, plan)
}

return &ComposedPlan{
ID: ioswitch.PlanID(planID),
AgentPlans: agentPlans,
}, nil
}

func (b *PlanBuilder) newStream() *StreamInfo {
str := &StreamInfo{
ID: ioswitch.StreamID(fmt.Sprintf("%d", len(b.streams)+1)),
}

b.streams = append(b.streams, str)

return str
vars []ioswitch.Var
agentPlans map[cdssdk.NodeID]*AgentPlanBuilder
executorPlan ExecutorPlanBuilder
storeMap *sync.Map
}

func NewPlanBuilder() PlanBuilder {
@@ -52,18 +22,15 @@ func NewPlanBuilder() PlanBuilder {
}
}

func (b *PlanBuilder) FromExecutor() *FromExecutorStream {
return &FromExecutorStream{
owner: b,
info: b.newStream(),
}
func (b *PlanBuilder) AtExecutor() *ExecutorPlanBuilder {
return &b.executorPlan
}

func (b *PlanBuilder) AtAgent(node cdssdk.Node) *AgentPlanBuilder {
agtPlan, ok := b.agentPlans[node.NodeID]
if !ok {
agtPlan = &AgentPlanBuilder{
owner: b,
blder: b,
node: node,
}
b.agentPlans[node.NodeID] = agtPlan
@@ -72,33 +39,51 @@ func (b *PlanBuilder) AtAgent(node cdssdk.Node) *AgentPlanBuilder {
return agtPlan
}

type FromExecutorStream struct {
owner *PlanBuilder
info *StreamInfo
toNode *cdssdk.Node
}
func (b *PlanBuilder) Execute() *Executor {
ctx, cancel := context.WithCancel(context.Background())
planID := genRandomPlanID()

func (s *FromExecutorStream) ToNode(node cdssdk.Node) *AgentStream {
s.toNode = &node
return &AgentStream{
owner: s.owner.AtAgent(node),
info: s.info,
execPlan := ioswitch.Plan{
ID: planID,
Ops: b.executorPlan.ops,
}
}

type ToExecutorStream struct {
info *StreamInfo
fromNode *cdssdk.Node
exec := Executor{
planID: planID,
plan: b,
callback: future.NewSetVoid(),
ctx: ctx,
cancel: cancel,
executorSw: ioswitch.NewSwitch(execPlan),
}
go exec.execute()

return &exec
}

type MultiStream struct {
Streams []*AgentStream
func (b *PlanBuilder) newStreamVar() *ioswitch.StreamVar {
v := &ioswitch.StreamVar{
ID: ioswitch.VarID(len(b.vars)),
}
b.vars = append(b.vars, v)

return v
}

func (m *MultiStream) Count() int {
return len(m.Streams)
func (b *PlanBuilder) newIntVar() *ioswitch.IntVar {
v := &ioswitch.IntVar{
ID: ioswitch.VarID(len(b.vars)),
}
b.vars = append(b.vars, v)

return v
}

func (m *MultiStream) Stream(index int) *AgentStream {
return m.Streams[index]
func (b *PlanBuilder) newStringVar() *ioswitch.StringVar {
v := &ioswitch.StringVar{
ID: ioswitch.VarID(len(b.vars)),
}
b.vars = append(b.vars, v)

return v
}

+ 10
- 0
common/pkgs/ioswitch/plans/utils.go View File

@@ -0,0 +1,10 @@
package plans

import (
"github.com/google/uuid"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

func genRandomPlanID() ioswitch.PlanID {
return ioswitch.PlanID(uuid.NewString())
}

+ 89
- 232
common/pkgs/ioswitch/switch.go View File

@@ -2,293 +2,150 @@ package ioswitch

import (
"context"
"errors"
"fmt"
"sync"

"gitlink.org.cn/cloudream/common/pkgs/future"
"gitlink.org.cn/cloudream/common/pkgs/logger"
"gitlink.org.cn/cloudream/common/utils/lo2"
"gitlink.org.cn/cloudream/common/utils/sync2"
)

var ErrPlanFinished = errors.New("plan is finished")

var ErrPlanNotFound = errors.New("plan not found")

type OpState string

const (
OpPending OpState = "Pending"
OpFinished OpState = "Finished"
)

type Oping struct {
State OpState
}

type PlanResult struct {
Values map[string]any `json:"values"`
}

type Planning struct {
plan Plan
opings []Oping
resultValues map[string]any
callback *future.SetValueFuture[PlanResult]

readys map[StreamID]Stream
waittings []*Watting
type bindingVars struct {
Waittings []Var
Bindeds []Var
Callback *future.SetVoidFuture
}

func NewPlanning(plan Plan) Planning {
planning := Planning{
plan: plan,
resultValues: make(map[string]any),
callback: future.NewSetValue[PlanResult](),
readys: make(map[StreamID]Stream),
}

for range plan.Ops {
oping := Oping{
State: OpPending,
}
planning.opings = append(planning.opings, oping)
}

return planning
type Switch struct {
plan Plan
vars map[VarID]Var
bindings []*bindingVars
lock sync.Mutex
}

func (p *Planning) IsCompleted() bool {
for _, oping := range p.opings {
if oping.State != OpFinished {
return false
}
func NewSwitch(plan Plan) *Switch {
planning := Switch{
plan: plan,
vars: make(map[VarID]Var),
}

return true
return &planning
}

func (p *Planning) MakeResult() PlanResult {
return PlanResult{
Values: p.resultValues,
}
func (s *Switch) Plan() *Plan {
return &s.plan
}

type Watting struct {
WaitIDs []StreamID
Readys []Stream
Callback *future.SetValueFuture[[]Stream]
}
func (s *Switch) Run(ctx context.Context) error {
ctx2, cancel := context.WithCancel(ctx)
defer cancel()

func (w *Watting) TryReady(str Stream) bool {
for i, id := range w.WaitIDs {
if id == str.ID {
w.Readys[i] = str
return true
}
}
return sync2.ParallelDo(s.plan.Ops, func(o Op, idx int) error {
err := o.Execute(ctx2, s)

return false
}
s.lock.Lock()
defer s.lock.Unlock()

func (c *Watting) IsAllReady() bool {
for _, s := range c.Readys {
if s.Stream == nil {
return false
if err != nil {
cancel()
return err
}
}

return true
}

func (w *Watting) Complete() {
w.Callback.SetValue(w.Readys)
}

func (w *Watting) Cancel(err error) {
w.Callback.SetError(err)
}

type Switch struct {
lock sync.Mutex
plannings map[PlanID]*Planning
}

func NewSwitch() Switch {
return Switch{
plannings: make(map[PlanID]*Planning),
}
return nil
})
}

func (s *Switch) SetupPlan(plan Plan) error {
func (s *Switch) BindVars(ctx context.Context, vs ...Var) error {
s.lock.Lock()
defer s.lock.Unlock()

if _, ok := s.plannings[plan.ID]; ok {
return fmt.Errorf("plan id exists")
callback := future.NewSetVoid()
binding := &bindingVars{
Callback: callback,
}

planning := NewPlanning(plan)
s.plannings[plan.ID] = &planning
return nil
}
for _, v := range vs {
v2 := s.vars[v.GetID()]
if v2 == nil {
binding.Waittings = append(binding.Waittings, v)
continue
}

func (s *Switch) ExecutePlan(id PlanID) (PlanResult, error) {
s.lock.Lock()
if err := AssignVar(v2, v); err != nil {
s.lock.Unlock()
return fmt.Errorf("assign var %v to %v: %w", v2.GetID(), v.GetID(), err)
}

planning, ok := s.plannings[id]
if !ok {
s.lock.Unlock()
return PlanResult{}, fmt.Errorf("plan not found")
binding.Bindeds = append(binding.Bindeds, v)
}

for i, op := range planning.plan.Ops {
idx := i
o := op
go func() {
err := o.Execute(s, id)

s.lock.Lock()
defer s.lock.Unlock()

if err != nil {
logger.Std.Warnf("exeucting op: %s", err.Error())
s.cancelPlan(id)
return
}

planning.opings[idx].State = OpFinished
if planning.IsCompleted() {
s.completePlan(id)
}
}()
if len(binding.Waittings) == 0 {
s.lock.Unlock()
return nil
}

s.bindings = append(s.bindings, binding)
s.lock.Unlock()

return planning.callback.WaitValue(context.TODO())
}
err := callback.Wait(ctx)

func (s *Switch) CancelPlan(id PlanID) {
s.lock.Lock()
defer s.lock.Unlock()

s.cancelPlan(id)
}

func (s *Switch) cancelPlan(id PlanID) {
plan, ok := s.plannings[id]
if !ok {
return
}

delete(s.plannings, id)

for _, s := range plan.readys {
s.Stream.Close()
}

for _, c := range plan.waittings {
c.Callback.SetError(ErrPlanFinished)
}
s.bindings = lo2.Remove(s.bindings, binding)

plan.callback.SetError(fmt.Errorf("plan cancelled"))
return err
}

func (s *Switch) completePlan(id PlanID) {
plan, ok := s.plannings[id]
if !ok {
return
}

delete(s.plannings, id)

for _, s := range plan.readys {
s.Stream.Close()
}

for _, c := range plan.waittings {
c.Callback.SetError(ErrPlanFinished)
}

plan.callback.SetValue(plan.MakeResult())
}

func (s *Switch) StreamReady(planID PlanID, stream Stream) {
func (s *Switch) PutVars(vs ...Var) {
s.lock.Lock()
defer s.lock.Unlock()

plan, ok := s.plannings[planID]
if !ok {
//TODO 处理错误
return
}

for i, wa := range plan.waittings {
if !wa.TryReady(stream) {
continue
}
loop:
for _, v := range vs {
for _, b := range s.bindings {
for i, w := range b.Waittings {
if w.GetID() != v.GetID() {
continue
}

if err := AssignVar(w, v); err != nil {
b.Callback.SetError(fmt.Errorf("assign var %v to %v: %w", v.GetID(), w.GetID(), err))
// 绑定类型不对,说明生成的执行计划有问题,怎么处理都可以,因为最终会执行失败
continue loop
}

b.Bindeds = append(b.Bindeds, w)
b.Waittings = lo2.RemoveAt(b.Waittings, i)
if len(b.Waittings) == 0 {
b.Callback.SetVoid()
s.bindings = lo2.RemoveAt(s.bindings, i)
}

// 绑定成功,继续最外层循环
continue loop
}

if !wa.IsAllReady() {
return
}

plan.waittings = lo2.RemoveAt(plan.waittings, i)
wa.Complete()
return
// 如果没有绑定,则直接放入变量表中
s.vars[v.GetID()] = v
}

plan.readys[stream.ID] = stream
}

func (s *Switch) WaitStreams(planID PlanID, streamIDs ...StreamID) ([]Stream, error) {
s.lock.Lock()

plan, ok := s.plannings[planID]
if !ok {
s.lock.Unlock()
return nil, ErrPlanNotFound
}

allReady := true
readys := make([]Stream, len(streamIDs))
for i, id := range streamIDs {
str, ok := plan.readys[id]
if !ok {
allReady = false
continue
}

readys[i] = str
delete(plan.readys, id)
func BindArrayVars[T Var](sw *Switch, ctx context.Context, vs []T) error {
var vs2 []Var
for _, v := range vs {
vs2 = append(vs2, v)
}

if allReady {
s.lock.Unlock()
return readys, nil
}

callback := future.NewSetValue[[]Stream]()

plan.waittings = append(plan.waittings, &Watting{
WaitIDs: streamIDs,
Readys: readys,
Callback: callback,
})
s.lock.Unlock()

return callback.WaitValue(context.TODO())
return sw.BindVars(ctx, vs2...)
}

func (s *Switch) AddResultValue(planID PlanID, rets ...ResultKV) {
s.lock.Lock()
defer s.lock.Unlock()

plan, ok := s.plannings[planID]
if !ok {
return
func PutArrayVars[T Var](sw *Switch, vs []T) {
var vs2 []Var
for _, v := range vs {
vs2 = append(vs2, v)
}

for _, ret := range rets {
plan.resultValues[ret.Key] = ret.Value
}
sw.PutVars(vs2...)
}

+ 21
- 0
common/pkgs/ioswitch/utils.go View File

@@ -0,0 +1,21 @@
package ioswitch

import (
"fmt"
"reflect"
)

func AssignVar(from Var, to Var) error {
if reflect.TypeOf(from) != reflect.TypeOf(to) {
return fmt.Errorf("cannot assign %T to %T", from, to)
}

switch from := from.(type) {
case *IntVar:
to.(*IntVar).Value = from.Value
case *StringVar:
to.(*StringVar).Value = from.Value
}

return nil
}

+ 0
- 120
common/pkgs/mq/agent/io.go View File

@@ -1,120 +0,0 @@
package agent

import (
"gitlink.org.cn/cloudream/common/pkgs/mq"
"gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
)

type IOService interface {
SetupIOPlan(msg *SetupIOPlan) (*SetupIOPlanResp, *mq.CodeMessage)

StartIOPlan(msg *StartIOPlan) (*StartIOPlanResp, *mq.CodeMessage)

WaitIOPlan(msg *WaitIOPlan) (*WaitIOPlanResp, *mq.CodeMessage)

CancelIOPlan(msg *CancelIOPlan) (*CancelIOPlanResp, *mq.CodeMessage)
}

// 设置io计划
var _ = Register(Service.SetupIOPlan)

type SetupIOPlan struct {
mq.MessageBodyBase
Plan ioswitch.Plan `json:"plan"`
}
type SetupIOPlanResp struct {
mq.MessageBodyBase
}

func NewSetupIOPlan(plan ioswitch.Plan) *SetupIOPlan {
return &SetupIOPlan{
Plan: plan,
}
}
func NewSetupIOPlanResp() *SetupIOPlanResp {
return &SetupIOPlanResp{}
}
func (client *Client) SetupIOPlan(msg *SetupIOPlan, opts ...mq.RequestOption) (*SetupIOPlanResp, error) {
return mq.Request(Service.SetupIOPlan, client.rabbitCli, msg, opts...)
}

// 启动io计划
var _ = Register(Service.StartIOPlan)

type StartIOPlan struct {
mq.MessageBodyBase
PlanID ioswitch.PlanID `json:"planID"`
}
type StartIOPlanResp struct {
mq.MessageBodyBase
TaskID string `json:"taskID"`
}

func NewStartIOPlan(planID ioswitch.PlanID) *StartIOPlan {
return &StartIOPlan{
PlanID: planID,
}
}
func NewStartIOPlanResp(taskID string) *StartIOPlanResp {
return &StartIOPlanResp{
TaskID: taskID,
}
}
func (client *Client) StartIOPlan(msg *StartIOPlan, opts ...mq.RequestOption) (*StartIOPlanResp, error) {
return mq.Request(Service.StartIOPlan, client.rabbitCli, msg, opts...)
}

// 启动io计划
var _ = Register(Service.WaitIOPlan)

type WaitIOPlan struct {
mq.MessageBodyBase
TaskID string `json:"taskID"`
WaitTimeoutMs int64 `json:"waitTimeout"`
}
type WaitIOPlanResp struct {
mq.MessageBodyBase
IsComplete bool `json:"isComplete"`
Error string `json:"error"`
Result ioswitch.PlanResult `json:"result"`
}

func NewWaitIOPlan(taskID string, waitTimeoutMs int64) *WaitIOPlan {
return &WaitIOPlan{
TaskID: taskID,
WaitTimeoutMs: waitTimeoutMs,
}
}
func NewWaitIOPlanResp(isComplete bool, err string, result ioswitch.PlanResult) *WaitIOPlanResp {
return &WaitIOPlanResp{
IsComplete: isComplete,
Error: err,
Result: result,
}
}
func (client *Client) WaitIOPlan(msg *WaitIOPlan, opts ...mq.RequestOption) (*WaitIOPlanResp, error) {
return mq.Request(Service.WaitIOPlan, client.rabbitCli, msg, opts...)
}

// 取消io计划
var _ = Register(Service.CancelIOPlan)

type CancelIOPlan struct {
mq.MessageBodyBase
PlanID ioswitch.PlanID `json:"planID"`
}
type CancelIOPlanResp struct {
mq.MessageBodyBase
}

func NewCancelIOPlan(planID ioswitch.PlanID) *CancelIOPlan {
return &CancelIOPlan{
PlanID: planID,
}
}
func NewCancelIOPlanResp() *CancelIOPlanResp {
return &CancelIOPlanResp{}
}
func (client *Client) CancelIOPlan(msg *CancelIOPlan, opts ...mq.RequestOption) (*CancelIOPlanResp, error) {
return mq.Request(Service.CancelIOPlan, client.rabbitCli, msg, opts...)
}

+ 0
- 2
common/pkgs/mq/agent/server.go View File

@@ -6,8 +6,6 @@ import (
)

type Service interface {
IOService

ObjectService

StorageService


+ 1
- 0
go.mod View File

@@ -92,6 +92,7 @@ require (
golang.org/x/crypto v0.9.0 // indirect
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.1.0
golang.org/x/sys v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect


+ 2
- 0
go.sum View File

@@ -217,6 +217,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=


+ 14
- 42
scanner/internal/event/check_package_redundancy.go View File

@@ -1,6 +1,7 @@
package event

import (
"context"
"fmt"
"strconv"
"time"
@@ -414,21 +415,12 @@ func (t *CheckPackageRedundancy) noneToEC(obj stgmod.ObjectDetail, red *cdssdk.E

planBlder := plans.NewPlanBuilder()
inputStrs := planBlder.AtAgent(getNodes.Nodes[0]).IPFSRead(obj.Object.FileHash).ChunkedSplit(red.ChunkSize, red.K, true)
outputStrs := planBlder.AtAgent(getNodes.Nodes[0]).ECReconstructAny(*red, lo.Range(red.K), lo.Range(red.N), inputStrs.Streams...)
outputStrs := planBlder.AtAgent(getNodes.Nodes[0]).ECReconstructAny(*red, lo.Range(red.K), lo.Range(red.N), inputStrs)
for i := 0; i < red.N; i++ {
outputStrs.Stream(i).GRPCSend(uploadNodes[i].Node).IPFSWrite(fmt.Sprintf("%d", i))
}
plan, err := planBlder.Build()
if err != nil {
return nil, fmt.Errorf("building io plan: %w", err)
}

exec, err := plans.Execute(*plan)
if err != nil {
return nil, fmt.Errorf("executing io plan: %w", err)
outputStrs[i].To(uploadNodes[i].Node).IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d", i))
}

ioRet, err := exec.Wait()
ioRet, err := planBlder.Execute().Wait(context.TODO())
if err != nil {
return nil, fmt.Errorf("executing io plan: %w", err)
}
@@ -439,7 +431,7 @@ func (t *CheckPackageRedundancy) noneToEC(obj stgmod.ObjectDetail, red *cdssdk.E
ObjectID: obj.Object.ObjectID,
Index: i,
NodeID: uploadNodes[i].Node.NodeID,
FileHash: ioRet.ResultValues[fmt.Sprintf("%d", i)].(string),
FileHash: ioRet[fmt.Sprintf("%d", i)].(string),
})
}

@@ -522,26 +514,16 @@ func (t *CheckPackageRedundancy) ecToRep(obj stgmod.ObjectDetail, srcRed *cdssdk
for i := range uploadNodes {
tarNode := planBlder.AtAgent(uploadNodes[i].Node)

var inputs []*plans.AgentStream
var inputs []*plans.AgentStreamVar
for _, block := range chosenBlocks {
inputs = append(inputs, tarNode.IPFSRead(block.FileHash))
}

outputs := tarNode.ECReconstruct(*srcRed, chosenBlockIndexes, inputs...)
tarNode.ChunkedJoin(srcRed.ChunkSize, outputs.Streams...).Length(obj.Object.Size).IPFSWrite(fmt.Sprintf("%d", i))
}

plan, err := planBlder.Build()
if err != nil {
return nil, fmt.Errorf("building io plan: %w", err)
}

exec, err := plans.Execute(*plan)
if err != nil {
return nil, fmt.Errorf("executing io plan: %w", err)
outputs := tarNode.ECReconstruct(*srcRed, chosenBlockIndexes, inputs)
tarNode.ChunkedJoin(srcRed.ChunkSize, outputs).Length(obj.Object.Size).IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d", i))
}

ioRet, err := exec.Wait()
ioRet, err := planBlder.Execute().Wait(context.TODO())
if err != nil {
return nil, fmt.Errorf("executing io plan: %w", err)
}
@@ -552,7 +534,7 @@ func (t *CheckPackageRedundancy) ecToRep(obj stgmod.ObjectDetail, srcRed *cdssdk
ObjectID: obj.Object.ObjectID,
Index: 0,
NodeID: uploadNodes[i].Node.NodeID,
FileHash: ioRet.ResultValues[fmt.Sprintf("%d", i)].(string),
FileHash: ioRet[fmt.Sprintf("%d", i)].(string),
})
}

@@ -615,28 +597,18 @@ func (t *CheckPackageRedundancy) ecToEC(obj stgmod.ObjectDetail, srcRed *cdssdk.
// 否则就要重建出这个节点需要的块
tarNode := planBlder.AtAgent(node.Node)

var inputs []*plans.AgentStream
var inputs []*plans.AgentStreamVar
for _, block := range chosenBlocks {
inputs = append(inputs, tarNode.IPFSRead(block.FileHash))
}

// 输出只需要自己要保存的那一块
tarNode.ECReconstructAny(*srcRed, chosenBlockIndexes, []int{i}, inputs...).Stream(0).IPFSWrite(fmt.Sprintf("%d", i))
tarNode.ECReconstructAny(*srcRed, chosenBlockIndexes, []int{i}, inputs)[0].IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d", i))
newBlocks = append(newBlocks, newBlock)
}

plan, err := planBlder.Build()
if err != nil {
return nil, fmt.Errorf("building io plan: %w", err)
}

exec, err := plans.Execute(*plan)
if err != nil {
return nil, fmt.Errorf("executing io plan: %w", err)
}

// 如果没有任何Plan,Wait会直接返回成功
ret, err := exec.Wait()
ret, err := planBlder.Execute().Wait(context.TODO())
if err != nil {
return nil, fmt.Errorf("executing io plan: %w", err)
}
@@ -645,7 +617,7 @@ func (t *CheckPackageRedundancy) ecToEC(obj stgmod.ObjectDetail, srcRed *cdssdk.
return nil, nil
}

for k, v := range ret.ResultValues {
for k, v := range ret {
idx, err := strconv.ParseInt(k, 10, 64)
if err != nil {
return nil, fmt.Errorf("parsing result key %s as index: %w", k, err)


+ 15
- 21
scanner/internal/event/clean_pinned.go View File

@@ -1,6 +1,7 @@
package event

import (
"context"
"fmt"
"math"
"math/rand"
@@ -107,6 +108,7 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {

planBld := plans.NewPlanBuilder()
pinPlans := make(map[cdssdk.NodeID]*[]string)
plnningNodeIDs := make(map[cdssdk.NodeID]bool)

// 对于rep对象,统计出所有对象块分布最多的两个节点,用这两个节点代表所有rep对象块的分布,去进行退火算法
var repObjectsUpdating []coormq.UpdatingObjectRedundancy
@@ -131,10 +133,10 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) {
pinnedAt: obj.PinnedAt,
blocks: obj.Blocks,
})
ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(allNodeInfos, solu, obj, &planBld))
ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(allNodeInfos, solu, obj, &planBld, plnningNodeIDs))
}

ioSwRets, err := t.executePlans(execCtx, pinPlans, &planBld)
ioSwRets, err := t.executePlans(execCtx, pinPlans, &planBld, plnningNodeIDs)
if err != nil {
log.Warn(err.Error())
return
@@ -748,7 +750,7 @@ func (t *CleanPinned) makePlansForRepObject(solu annealingSolution, obj stgmod.O
return entry
}

func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node, solu annealingSolution, obj stgmod.ObjectDetail, planBld *plans.PlanBuilder) coormq.UpdatingObjectRedundancy {
func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node, solu annealingSolution, obj stgmod.ObjectDetail, planBld *plans.PlanBuilder, planningNodeIDs map[cdssdk.NodeID]bool) coormq.UpdatingObjectRedundancy {
entry := coormq.UpdatingObjectRedundancy{
ObjectID: obj.Object.ObjectID,
Redundancy: obj.Object.Redundancy,
@@ -784,29 +786,26 @@ func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssd
agt := planBld.AtAgent(*allNodeInfos[id])

strs := agt.IPFSRead(obj.Object.FileHash).ChunkedSplit(ecRed.ChunkSize, ecRed.K, true)
ss := agt.ECReconstructAny(*ecRed, lo.Range(ecRed.K), *idxs, strs.Streams...)
for i, s := range ss.Streams {
s.IPFSWrite(fmt.Sprintf("%d.%d", obj.Object.ObjectID, (*idxs)[i]))
ss := agt.ECReconstructAny(*ecRed, lo.Range(ecRed.K), *idxs, strs)
for i, s := range ss {
s.IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d.%d", obj.Object.ObjectID, (*idxs)[i]))
}

planningNodeIDs[id] = true
}
return entry
}

func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.NodeID]*[]string, planBld *plans.PlanBuilder) (map[string]any, error) {
func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.NodeID]*[]string, planBld *plans.PlanBuilder, plnningNodeIDs map[cdssdk.NodeID]bool) (map[string]any, error) {
log := logger.WithType[CleanPinned]("Event")

ioPlan, err := planBld.Build()
if err != nil {
return nil, fmt.Errorf("building io switch plan: %w", err)
}

// 统一加锁,有重复也没关系
lockBld := reqbuilder.NewBuilder()
for nodeID := range pinPlans {
lockBld.IPFS().Buzy(nodeID)
}
for _, plan := range ioPlan.AgentPlans {
lockBld.IPFS().Buzy(plan.Node.NodeID)
for id := range plnningNodeIDs {
lockBld.IPFS().Buzy(id)
}
lock, err := lockBld.MutexLock(execCtx.Args.DistLock)
if err != nil {
@@ -845,17 +844,12 @@ func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.N
go func() {
defer wg.Done()

exec, err := plans.Execute(*ioPlan)
ret, err := planBld.Execute().Wait(context.TODO())
if err != nil {
ioSwErr = fmt.Errorf("executing io switch plan: %w", err)
return
}
ret, err := exec.Wait()
if err != nil {
ioSwErr = fmt.Errorf("waiting io switch plan: %w", err)
return
}
ioSwRets = ret.ResultValues
ioSwRets = ret
}()

wg.Wait()


Loading…
Cancel
Save