| @@ -1,32 +1,70 @@ | |||
| package grpc | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| agentserver "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error { | |||
| func (s *Service) ExecuteIOPlan(ctx context.Context, req *agtrpc.ExecuteIOPlanReq) (*agtrpc.ExecuteIOPlanResp, error) { | |||
| plan, err := serder.JSONToObjectEx[ioswitch.Plan]([]byte(req.Plan)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("deserializing plan: %w", err) | |||
| } | |||
| logger.WithField("PlanID", plan.ID).Infof("begin execute io plan") | |||
| defer logger.WithField("PlanID", plan.ID).Infof("plan finished") | |||
| sw := ioswitch.NewSwitch(plan) | |||
| s.swMgr.Add(sw) | |||
| defer s.swMgr.Remove(sw) | |||
| err = sw.Run(ctx) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("running io plan: %w", err) | |||
| } | |||
| return &agtrpc.ExecuteIOPlanResp{}, nil | |||
| } | |||
| func (s *Service) SendStream(server agtrpc.Agent_SendStreamServer) error { | |||
| msg, err := server.Recv() | |||
| if err != nil { | |||
| return fmt.Errorf("recving stream id packet: %w", err) | |||
| } | |||
| if msg.Type != agentserver.StreamDataPacketType_SendArgs { | |||
| if msg.Type != agtrpc.StreamDataPacketType_SendArgs { | |||
| return fmt.Errorf("first packet must be a SendArgs packet") | |||
| } | |||
| logger. | |||
| WithField("PlanID", msg.PlanID). | |||
| WithField("StreamID", msg.StreamID). | |||
| Debugf("receive stream from grpc") | |||
| WithField("VarID", msg.VarID). | |||
| Debugf("receive stream") | |||
| // 同一批Plan中每个节点的Plan的启动时间有先后,但最多不应该超过30秒 | |||
| ctx, cancel := context.WithTimeout(server.Context(), time.Second*30) | |||
| defer cancel() | |||
| sw := s.swMgr.FindByIDContexted(ctx, ioswitch.PlanID(msg.PlanID)) | |||
| if sw == nil { | |||
| return fmt.Errorf("plan not found") | |||
| } | |||
| pr, pw := io.Pipe() | |||
| s.sw.StreamReady(ioswitch.PlanID(msg.PlanID), ioswitch.NewStream(ioswitch.StreamID(msg.StreamID), pr)) | |||
| varID := ioswitch.VarID(msg.VarID) | |||
| sw.PutVars(&ioswitch.StreamVar{ | |||
| ID: varID, | |||
| Stream: pr, | |||
| }) | |||
| // 然后读取文件数据 | |||
| var recvSize int64 | |||
| @@ -36,16 +74,17 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error { | |||
| // 读取客户端数据失败 | |||
| // 即使err是io.EOF,只要没有收到客户端包含EOF数据包就被断开了连接,就认为接收失败 | |||
| if err != nil { | |||
| // 关闭文件写入,不需要返回的hash和error | |||
| // 关闭文件写入 | |||
| pw.CloseWithError(io.ErrClosedPipe) | |||
| logger.WithField("ReceiveSize", recvSize). | |||
| WithField("VarID", varID). | |||
| Warnf("recv message failed, err: %s", err.Error()) | |||
| return fmt.Errorf("recv message failed, err: %w", err) | |||
| } | |||
| err = io2.WriteAll(pw, msg.Data) | |||
| if err != nil { | |||
| // 关闭文件写入,不需要返回的hash和error | |||
| // 关闭文件写入 | |||
| pw.CloseWithError(io.ErrClosedPipe) | |||
| logger.Warnf("write data to file failed, err: %s", err.Error()) | |||
| return fmt.Errorf("write data to file failed, err: %w", err) | |||
| @@ -53,7 +92,7 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error { | |||
| recvSize += int64(len(msg.Data)) | |||
| if msg.Type == agentserver.StreamDataPacketType_EOF { | |||
| if msg.Type == agtrpc.StreamDataPacketType_EOF { | |||
| // 客户端明确说明文件传输已经结束,那么结束写入,获得文件Hash | |||
| err := pw.Close() | |||
| if err != nil { | |||
| @@ -62,7 +101,7 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error { | |||
| } | |||
| // 并将结果返回到客户端 | |||
| err = server.SendAndClose(&agentserver.SendStreamResp{}) | |||
| err = server.SendAndClose(&agtrpc.SendStreamResp{}) | |||
| if err != nil { | |||
| logger.Warnf("send response failed, err: %s", err.Error()) | |||
| return fmt.Errorf("send response failed, err: %w", err) | |||
| @@ -73,39 +112,47 @@ func (s *Service) SendStream(server agentserver.Agent_SendStreamServer) error { | |||
| } | |||
| } | |||
| func (s *Service) FetchStream(req *agentserver.FetchStreamReq, server agentserver.Agent_FetchStreamServer) error { | |||
| func (s *Service) GetStream(req *agtrpc.GetStreamReq, server agtrpc.Agent_GetStreamServer) error { | |||
| logger. | |||
| WithField("PlanID", req.PlanID). | |||
| WithField("StreamID", req.StreamID). | |||
| Debugf("send stream by grpc") | |||
| WithField("VarID", req.VarID). | |||
| Debugf("send stream") | |||
| // 同上 | |||
| ctx, cancel := context.WithTimeout(server.Context(), time.Second*30) | |||
| defer cancel() | |||
| strs, err := s.sw.WaitStreams(ioswitch.PlanID(req.PlanID), ioswitch.StreamID(req.StreamID)) | |||
| sw := s.swMgr.FindByIDContexted(ctx, ioswitch.PlanID(req.PlanID)) | |||
| if sw == nil { | |||
| return fmt.Errorf("plan not found") | |||
| } | |||
| strVar := &ioswitch.StreamVar{ | |||
| ID: ioswitch.VarID(req.VarID), | |||
| } | |||
| err := sw.BindVars(server.Context(), strVar) | |||
| if err != nil { | |||
| logger. | |||
| WithField("PlanID", req.PlanID). | |||
| WithField("StreamID", req.StreamID). | |||
| Warnf("watting stream: %s", err.Error()) | |||
| return fmt.Errorf("watting stream: %w", err) | |||
| return fmt.Errorf("binding vars: %w", err) | |||
| } | |||
| reader := strs[0].Stream | |||
| reader := strVar.Stream | |||
| defer reader.Close() | |||
| buf := make([]byte, 4096) | |||
| buf := make([]byte, 1024*64) | |||
| readAllCnt := 0 | |||
| for { | |||
| readCnt, err := reader.Read(buf) | |||
| if readCnt > 0 { | |||
| readAllCnt += readCnt | |||
| err = server.Send(&agentserver.StreamDataPacket{ | |||
| Type: agentserver.StreamDataPacketType_Data, | |||
| err = server.Send(&agtrpc.StreamDataPacket{ | |||
| Type: agtrpc.StreamDataPacketType_Data, | |||
| Data: buf[:readCnt], | |||
| }) | |||
| if err != nil { | |||
| logger. | |||
| WithField("PlanID", req.PlanID). | |||
| WithField("StreamID", req.StreamID). | |||
| WithField("VarID", req.VarID). | |||
| Warnf("send stream data failed, err: %s", err.Error()) | |||
| return fmt.Errorf("send stream data failed, err: %w", err) | |||
| } | |||
| @@ -115,11 +162,11 @@ func (s *Service) FetchStream(req *agentserver.FetchStreamReq, server agentserve | |||
| if err == io.EOF { | |||
| logger. | |||
| WithField("PlanID", req.PlanID). | |||
| WithField("StreamID", req.StreamID). | |||
| WithField("VarID", req.VarID). | |||
| Debugf("send data size %d", readAllCnt) | |||
| // 发送EOF消息 | |||
| server.Send(&agentserver.StreamDataPacket{ | |||
| Type: agentserver.StreamDataPacketType_EOF, | |||
| server.Send(&agtrpc.StreamDataPacket{ | |||
| Type: agtrpc.StreamDataPacketType_EOF, | |||
| }) | |||
| return nil | |||
| } | |||
| @@ -128,9 +175,56 @@ func (s *Service) FetchStream(req *agentserver.FetchStreamReq, server agentserve | |||
| if err != nil && err != io.ErrUnexpectedEOF { | |||
| logger. | |||
| WithField("PlanID", req.PlanID). | |||
| WithField("StreamID", req.StreamID). | |||
| WithField("VarID", req.VarID). | |||
| Warnf("reading stream data: %s", err.Error()) | |||
| return fmt.Errorf("reading stream data: %w", err) | |||
| } | |||
| } | |||
| } | |||
| func (s *Service) SendVar(ctx context.Context, req *agtrpc.SendVarReq) (*agtrpc.SendVarResp, error) { | |||
| ctx, cancel := context.WithTimeout(ctx, time.Second*30) | |||
| defer cancel() | |||
| sw := s.swMgr.FindByIDContexted(ctx, ioswitch.PlanID(req.PlanID)) | |||
| if sw == nil { | |||
| return nil, fmt.Errorf("plan not found") | |||
| } | |||
| v, err := serder.JSONToObjectEx[ioswitch.Var]([]byte(req.Var)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("deserializing var: %w", err) | |||
| } | |||
| sw.PutVars(v) | |||
| return &agtrpc.SendVarResp{}, nil | |||
| } | |||
| func (s *Service) GetVar(ctx context.Context, req *agtrpc.GetVarReq) (*agtrpc.GetVarResp, error) { | |||
| ctx2, cancel := context.WithTimeout(ctx, time.Second*30) | |||
| defer cancel() | |||
| sw := s.swMgr.FindByIDContexted(ctx2, ioswitch.PlanID(req.PlanID)) | |||
| if sw == nil { | |||
| return nil, fmt.Errorf("plan not found") | |||
| } | |||
| v, err := serder.JSONToObjectEx[ioswitch.Var]([]byte(req.Var)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("deserializing var: %w", err) | |||
| } | |||
| err = sw.BindVars(ctx, v) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("binding vars: %w", err) | |||
| } | |||
| vd, err := serder.ObjectToJSONEx(v) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("serializing var: %w", err) | |||
| } | |||
| return &agtrpc.GetVarResp{ | |||
| Var: string(vd), | |||
| }, nil | |||
| } | |||
| @@ -1,140 +1,17 @@ | |||
| package grpc | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| log "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| agentserver "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type Service struct { | |||
| agentserver.AgentServer | |||
| sw *ioswitch.Switch | |||
| swMgr *ioswitch.Manager | |||
| } | |||
| func NewService(sw *ioswitch.Switch) *Service { | |||
| func NewService(swMgr *ioswitch.Manager) *Service { | |||
| return &Service{ | |||
| sw: sw, | |||
| } | |||
| } | |||
| func (s *Service) SendIPFSFile(server agentserver.Agent_SendIPFSFileServer) error { | |||
| log.Debugf("client upload file") | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| log.Warnf("new ipfs client: %s", err.Error()) | |||
| return fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| defer ipfsCli.Close() | |||
| writer, err := ipfsCli.CreateFileStream() | |||
| if err != nil { | |||
| log.Warnf("create file failed, err: %s", err.Error()) | |||
| return fmt.Errorf("create file failed, err: %w", err) | |||
| } | |||
| // 然后读取文件数据 | |||
| var recvSize int64 | |||
| for { | |||
| msg, err := server.Recv() | |||
| // 读取客户端数据失败 | |||
| // 即使err是io.EOF,只要没有收到客户端包含EOF数据包就被断开了连接,就认为接收失败 | |||
| if err != nil { | |||
| // 关闭文件写入,不需要返回的hash和error | |||
| writer.Abort(io.ErrClosedPipe) | |||
| log.WithField("ReceiveSize", recvSize). | |||
| Warnf("recv message failed, err: %s", err.Error()) | |||
| return fmt.Errorf("recv message failed, err: %w", err) | |||
| } | |||
| err = io2.WriteAll(writer, msg.Data) | |||
| if err != nil { | |||
| // 关闭文件写入,不需要返回的hash和error | |||
| writer.Abort(io.ErrClosedPipe) | |||
| log.Warnf("write data to file failed, err: %s", err.Error()) | |||
| return fmt.Errorf("write data to file failed, err: %w", err) | |||
| } | |||
| recvSize += int64(len(msg.Data)) | |||
| if msg.Type == agentserver.StreamDataPacketType_EOF { | |||
| // 客户端明确说明文件传输已经结束,那么结束写入,获得文件Hash | |||
| hash, err := writer.Finish() | |||
| if err != nil { | |||
| log.Warnf("finish writing failed, err: %s", err.Error()) | |||
| return fmt.Errorf("finish writing failed, err: %w", err) | |||
| } | |||
| // 并将结果返回到客户端 | |||
| err = server.SendAndClose(&agentserver.SendIPFSFileResp{ | |||
| FileHash: hash, | |||
| }) | |||
| if err != nil { | |||
| log.Warnf("send response failed, err: %s", err.Error()) | |||
| return fmt.Errorf("send response failed, err: %w", err) | |||
| } | |||
| log.Debugf("%d bytes received ", recvSize) | |||
| return nil | |||
| } | |||
| } | |||
| } | |||
| func (s *Service) GetIPFSFile(req *agentserver.GetIPFSFileReq, server agentserver.Agent_GetIPFSFileServer) error { | |||
| log.WithField("FileHash", req.FileHash).Debugf("client download file") | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| if err != nil { | |||
| log.Warnf("new ipfs client: %s", err.Error()) | |||
| return fmt.Errorf("new ipfs client: %w", err) | |||
| } | |||
| defer ipfsCli.Close() | |||
| reader, err := ipfsCli.OpenRead(req.FileHash) | |||
| if err != nil { | |||
| log.Warnf("open file %s to read failed, err: %s", req.FileHash, err.Error()) | |||
| return fmt.Errorf("open file to read failed, err: %w", err) | |||
| } | |||
| defer reader.Close() | |||
| buf := make([]byte, 1024) | |||
| readAllCnt := 0 | |||
| for { | |||
| readCnt, err := reader.Read(buf) | |||
| if readCnt > 0 { | |||
| readAllCnt += readCnt | |||
| err = server.Send(&agentserver.FileDataPacket{ | |||
| Type: agentserver.StreamDataPacketType_Data, | |||
| Data: buf[:readCnt], | |||
| }) | |||
| if err != nil { | |||
| log.WithField("FileHash", req.FileHash). | |||
| Warnf("send file data failed, err: %s", err.Error()) | |||
| return fmt.Errorf("send file data failed, err: %w", err) | |||
| } | |||
| } | |||
| // 文件读取完毕 | |||
| if err == io.EOF { | |||
| log.WithField("FileHash", req.FileHash).Debugf("send data size %d", readAllCnt) | |||
| // 发送EOF消息 | |||
| server.Send(&agentserver.FileDataPacket{ | |||
| Type: agentserver.StreamDataPacketType_EOF, | |||
| }) | |||
| return nil | |||
| } | |||
| // io.ErrUnexpectedEOF没有读满整个buf就遇到了EOF,此时正常发送剩余数据即可。除了这两个错误之外,其他错误都中断操作 | |||
| if err != nil && err != io.ErrUnexpectedEOF { | |||
| log.Warnf("read file %s data failed, err: %s", req.FileHash, err.Error()) | |||
| return fmt.Errorf("read file data failed, err: %w", err) | |||
| } | |||
| swMgr: swMgr, | |||
| } | |||
| } | |||
| @@ -1,65 +0,0 @@ | |||
| package mq | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/consts/errorcode" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| mytask "gitlink.org.cn/cloudream/storage/agent/internal/task" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" | |||
| ) | |||
| func (svc *Service) SetupIOPlan(msg *agtmq.SetupIOPlan) (*agtmq.SetupIOPlanResp, *mq.CodeMessage) { | |||
| err := svc.sw.SetupPlan(msg.Plan) | |||
| if err != nil { | |||
| logger.WithField("PlanID", msg.Plan.ID).Warnf("adding plan: %s", err.Error()) | |||
| return nil, mq.Failed(errorcode.OperationFailed, "adding plan failed") | |||
| } | |||
| return mq.ReplyOK(agtmq.NewSetupIOPlanResp()) | |||
| } | |||
| func (svc *Service) StartIOPlan(msg *agtmq.StartIOPlan) (*agtmq.StartIOPlanResp, *mq.CodeMessage) { | |||
| tsk := svc.taskManager.StartNew(mytask.NewExecuteIOPlan(msg.PlanID)) | |||
| return mq.ReplyOK(agtmq.NewStartIOPlanResp(tsk.ID())) | |||
| } | |||
| func (svc *Service) WaitIOPlan(msg *agtmq.WaitIOPlan) (*agtmq.WaitIOPlanResp, *mq.CodeMessage) { | |||
| tsk := svc.taskManager.FindByID(msg.TaskID) | |||
| if tsk == nil { | |||
| return nil, mq.Failed(errorcode.TaskNotFound, "task not found") | |||
| } | |||
| if msg.WaitTimeoutMs == 0 { | |||
| tsk.Wait() | |||
| errMsg := "" | |||
| if tsk.Error() != nil { | |||
| errMsg = tsk.Error().Error() | |||
| } | |||
| planTsk := tsk.Body().(*mytask.ExecuteIOPlan) | |||
| return mq.ReplyOK(agtmq.NewWaitIOPlanResp(true, errMsg, planTsk.Result)) | |||
| } else { | |||
| if tsk.WaitTimeout(time.Duration(msg.WaitTimeoutMs) * time.Millisecond) { | |||
| errMsg := "" | |||
| if tsk.Error() != nil { | |||
| errMsg = tsk.Error().Error() | |||
| } | |||
| planTsk := tsk.Body().(*mytask.ExecuteIOPlan) | |||
| return mq.ReplyOK(agtmq.NewWaitIOPlanResp(true, errMsg, planTsk.Result)) | |||
| } | |||
| return mq.ReplyOK(agtmq.NewWaitIOPlanResp(false, "", ioswitch.PlanResult{})) | |||
| } | |||
| } | |||
| func (svc *Service) CancelIOPlan(msg *agtmq.CancelIOPlan) (*agtmq.CancelIOPlanResp, *mq.CodeMessage) { | |||
| svc.sw.CancelPlan(msg.PlanID) | |||
| return mq.ReplyOK(agtmq.NewCancelIOPlanResp()) | |||
| } | |||
| @@ -7,12 +7,12 @@ import ( | |||
| type Service struct { | |||
| taskManager *task.Manager | |||
| sw *ioswitch.Switch | |||
| swMgr *ioswitch.Manager | |||
| } | |||
| func NewService(taskMgr *task.Manager, sw *ioswitch.Switch) *Service { | |||
| func NewService(taskMgr *task.Manager, swMgr *ioswitch.Manager) *Service { | |||
| return &Service{ | |||
| taskManager: taskMgr, | |||
| sw: sw, | |||
| swMgr: swMgr, | |||
| } | |||
| } | |||
| @@ -1,45 +0,0 @@ | |||
| package task | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/pkgs/task" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| // TODO 临时使用Task来等待Plan执行进度 | |||
| type ExecuteIOPlan struct { | |||
| PlanID ioswitch.PlanID | |||
| Result ioswitch.PlanResult | |||
| } | |||
| func NewExecuteIOPlan(planID ioswitch.PlanID) *ExecuteIOPlan { | |||
| return &ExecuteIOPlan{ | |||
| PlanID: planID, | |||
| } | |||
| } | |||
| func (t *ExecuteIOPlan) Execute(task *task.Task[TaskContext], ctx TaskContext, complete CompleteFn) { | |||
| log := logger.WithType[ExecuteIOPlan]("Task") | |||
| log.Debugf("begin with %v", logger.FormatStruct(t)) | |||
| defer log.Debugf("end") | |||
| ret, err := ctx.sw.ExecutePlan(t.PlanID) | |||
| if err != nil { | |||
| err := fmt.Errorf("executing io plan: %w", err) | |||
| log.WithField("PlanID", t.PlanID).Warn(err.Error()) | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| return | |||
| } | |||
| t.Result = ret | |||
| complete(nil, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| @@ -10,7 +10,7 @@ import ( | |||
| type TaskContext struct { | |||
| distlock *distlock.Service | |||
| sw *ioswitch.Switch | |||
| swMgr *ioswitch.Manager | |||
| connectivity *connectivity.Collector | |||
| downloader *downloader.Downloader | |||
| } | |||
| @@ -27,10 +27,10 @@ type Task = task.Task[TaskContext] | |||
| type CompleteOption = task.CompleteOption | |||
| func NewManager(distlock *distlock.Service, sw *ioswitch.Switch, connectivity *connectivity.Collector, downloader *downloader.Downloader) Manager { | |||
| func NewManager(distlock *distlock.Service, swMgr *ioswitch.Manager, connectivity *connectivity.Collector, downloader *downloader.Downloader) Manager { | |||
| return task.NewManager(TaskContext{ | |||
| distlock: distlock, | |||
| sw: sw, | |||
| swMgr: swMgr, | |||
| connectivity: connectivity, | |||
| downloader: downloader, | |||
| }) | |||
| @@ -92,7 +92,7 @@ func main() { | |||
| log.Fatalf("new ipfs failed, err: %s", err.Error()) | |||
| } | |||
| sw := ioswitch.NewSwitch() | |||
| sw := ioswitch.NewManager() | |||
| dlder := downloader.NewDownloader(config.Cfg().Downloader, &conCol) | |||
| @@ -0,0 +1,12 @@ | |||
| package stgglb | |||
| import cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| // 根据当前节点与目标地址的距离关系,选择合适的地址 | |||
| func SelectGRPCAddress(node *cdssdk.Node) (string, int) { | |||
| if Local != nil && Local.LocationID == node.LocationID { | |||
| return node.LocalIP, node.LocalGRPCPort | |||
| } | |||
| return node.ExternalIP, node.ExternalGRPCPort | |||
| } | |||
| @@ -1,6 +1,7 @@ | |||
| package cmd | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| "math" | |||
| @@ -17,6 +18,7 @@ import ( | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/connectivity" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock/reqbuilder" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/plans" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/iterator" | |||
| agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" | |||
| coormq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/coordinator" | |||
| @@ -227,32 +229,27 @@ func uploadFile(file io.Reader, uploadNode UploadNodeInfo) (string, error) { | |||
| } | |||
| // 否则发送到agent上传 | |||
| // 如果客户端与节点在同一个地域,则使用内网地址连接节点 | |||
| nodeIP := uploadNode.Node.ExternalIP | |||
| grpcPort := uploadNode.Node.ExternalGRPCPort | |||
| if uploadNode.IsSameLocation { | |||
| nodeIP = uploadNode.Node.LocalIP | |||
| grpcPort = uploadNode.Node.LocalGRPCPort | |||
| logger.Debugf("client and node %d are at the same location, use local ip", uploadNode.Node.NodeID) | |||
| } | |||
| fileHash, err := uploadToNode(file, nodeIP, grpcPort) | |||
| fileHash, err := uploadToNode(file, uploadNode.Node) | |||
| if err != nil { | |||
| return "", fmt.Errorf("upload to node %s failed, err: %w", nodeIP, err) | |||
| return "", fmt.Errorf("uploading to node %v: %w", uploadNode.Node.NodeID, err) | |||
| } | |||
| return fileHash, nil | |||
| } | |||
| func uploadToNode(file io.Reader, nodeIP string, grpcPort int) (string, error) { | |||
| rpcCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort) | |||
| func uploadToNode(file io.Reader, node cdssdk.Node) (string, error) { | |||
| plan := plans.NewPlanBuilder() | |||
| str, v := plan.AtExecutor().WillWrite() | |||
| v.To(node).IPFSWrite().ToExecutor().Store("fileHash") | |||
| exec := plan.Execute() | |||
| exec.BeginWrite(io.NopCloser(file), str) | |||
| ret, err := exec.Wait(context.TODO()) | |||
| if err != nil { | |||
| return "", fmt.Errorf("new agent rpc client: %w", err) | |||
| return "", err | |||
| } | |||
| defer rpcCli.Close() | |||
| return rpcCli.SendIPFSFile(file) | |||
| return ret["fileHash"].(string), nil | |||
| } | |||
| func uploadToLocalIPFS(file io.Reader, nodeID cdssdk.NodeID, shouldPin bool) (string, error) { | |||
| @@ -1,6 +1,7 @@ | |||
| package downloader | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| @@ -109,22 +110,14 @@ func (r *IPFSReader) fromNode() (io.ReadCloser, error) { | |||
| fileStr := planBld.AtAgent(r.node).IPFSRead(r.fileHash, ipfs.ReadOption{ | |||
| Offset: r.offset, | |||
| Length: -1, | |||
| }).ToExecutor() | |||
| }).ToExecutor().WillRead() | |||
| plan, err := planBld.Build() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("building plan: %w", err) | |||
| } | |||
| waiter, err := plans.Execute(*plan) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("execute plan: %w", err) | |||
| } | |||
| exec := planBld.Execute() | |||
| go func() { | |||
| waiter.Wait() | |||
| exec.Wait(context.Background()) | |||
| }() | |||
| return waiter.ReadStream(fileStr) | |||
| return exec.BeginRead(fileStr) | |||
| } | |||
| func (r *IPFSReader) fromLocalIPFS() (io.ReadCloser, error) { | |||
| @@ -0,0 +1,7 @@ | |||
| package ec | |||
| import "github.com/klauspost/reedsolomon" | |||
| func GaloisMultiplier() *reedsolomon.MultipilerBuilder { | |||
| return &reedsolomon.MultipilerBuilder{} | |||
| } | |||
| @@ -71,18 +71,16 @@ func (StreamDataPacketType) EnumDescriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{0} | |||
| } | |||
| // 文件数据。注意:只在Type为Data的时候,Data字段才能有数据 | |||
| type FileDataPacket struct { | |||
| type ExecuteIOPlanReq struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"` | |||
| Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"` | |||
| Plan string `protobuf:"bytes,1,opt,name=Plan,proto3" json:"Plan,omitempty"` | |||
| } | |||
| func (x *FileDataPacket) Reset() { | |||
| *x = FileDataPacket{} | |||
| func (x *ExecuteIOPlanReq) Reset() { | |||
| *x = ExecuteIOPlanReq{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[0] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -90,13 +88,13 @@ func (x *FileDataPacket) Reset() { | |||
| } | |||
| } | |||
| func (x *FileDataPacket) String() string { | |||
| func (x *ExecuteIOPlanReq) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*FileDataPacket) ProtoMessage() {} | |||
| func (*ExecuteIOPlanReq) ProtoMessage() {} | |||
| func (x *FileDataPacket) ProtoReflect() protoreflect.Message { | |||
| func (x *ExecuteIOPlanReq) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[0] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -108,35 +106,26 @@ func (x *FileDataPacket) ProtoReflect() protoreflect.Message { | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use FileDataPacket.ProtoReflect.Descriptor instead. | |||
| func (*FileDataPacket) Descriptor() ([]byte, []int) { | |||
| // Deprecated: Use ExecuteIOPlanReq.ProtoReflect.Descriptor instead. | |||
| func (*ExecuteIOPlanReq) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{0} | |||
| } | |||
| func (x *FileDataPacket) GetType() StreamDataPacketType { | |||
| func (x *ExecuteIOPlanReq) GetPlan() string { | |||
| if x != nil { | |||
| return x.Type | |||
| return x.Plan | |||
| } | |||
| return StreamDataPacketType_EOF | |||
| } | |||
| func (x *FileDataPacket) GetData() []byte { | |||
| if x != nil { | |||
| return x.Data | |||
| } | |||
| return nil | |||
| return "" | |||
| } | |||
| type SendIPFSFileResp struct { | |||
| type ExecuteIOPlanResp struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| FileHash string `protobuf:"bytes,1,opt,name=FileHash,proto3" json:"FileHash,omitempty"` | |||
| } | |||
| func (x *SendIPFSFileResp) Reset() { | |||
| *x = SendIPFSFileResp{} | |||
| func (x *ExecuteIOPlanResp) Reset() { | |||
| *x = ExecuteIOPlanResp{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[1] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -144,13 +133,13 @@ func (x *SendIPFSFileResp) Reset() { | |||
| } | |||
| } | |||
| func (x *SendIPFSFileResp) String() string { | |||
| func (x *ExecuteIOPlanResp) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*SendIPFSFileResp) ProtoMessage() {} | |||
| func (*ExecuteIOPlanResp) ProtoMessage() {} | |||
| func (x *SendIPFSFileResp) ProtoReflect() protoreflect.Message { | |||
| func (x *ExecuteIOPlanResp) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[1] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -162,28 +151,23 @@ func (x *SendIPFSFileResp) ProtoReflect() protoreflect.Message { | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use SendIPFSFileResp.ProtoReflect.Descriptor instead. | |||
| func (*SendIPFSFileResp) Descriptor() ([]byte, []int) { | |||
| // Deprecated: Use ExecuteIOPlanResp.ProtoReflect.Descriptor instead. | |||
| func (*ExecuteIOPlanResp) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{1} | |||
| } | |||
| func (x *SendIPFSFileResp) GetFileHash() string { | |||
| if x != nil { | |||
| return x.FileHash | |||
| } | |||
| return "" | |||
| } | |||
| type GetIPFSFileReq struct { | |||
| // 文件数据。注意:只在Type为Data的时候,Data字段才能有数据 | |||
| type FileDataPacket struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| FileHash string `protobuf:"bytes,1,opt,name=FileHash,proto3" json:"FileHash,omitempty"` | |||
| Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"` | |||
| Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"` | |||
| } | |||
| func (x *GetIPFSFileReq) Reset() { | |||
| *x = GetIPFSFileReq{} | |||
| func (x *FileDataPacket) Reset() { | |||
| *x = FileDataPacket{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[2] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -191,13 +175,13 @@ func (x *GetIPFSFileReq) Reset() { | |||
| } | |||
| } | |||
| func (x *GetIPFSFileReq) String() string { | |||
| func (x *FileDataPacket) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*GetIPFSFileReq) ProtoMessage() {} | |||
| func (*FileDataPacket) ProtoMessage() {} | |||
| func (x *GetIPFSFileReq) ProtoReflect() protoreflect.Message { | |||
| func (x *FileDataPacket) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[2] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -209,16 +193,23 @@ func (x *GetIPFSFileReq) ProtoReflect() protoreflect.Message { | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use GetIPFSFileReq.ProtoReflect.Descriptor instead. | |||
| func (*GetIPFSFileReq) Descriptor() ([]byte, []int) { | |||
| // Deprecated: Use FileDataPacket.ProtoReflect.Descriptor instead. | |||
| func (*FileDataPacket) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{2} | |||
| } | |||
| func (x *GetIPFSFileReq) GetFileHash() string { | |||
| func (x *FileDataPacket) GetType() StreamDataPacketType { | |||
| if x != nil { | |||
| return x.FileHash | |||
| return x.Type | |||
| } | |||
| return "" | |||
| return StreamDataPacketType_EOF | |||
| } | |||
| func (x *FileDataPacket) GetData() []byte { | |||
| if x != nil { | |||
| return x.Data | |||
| } | |||
| return nil | |||
| } | |||
| // 注:EOF时data也可能有数据 | |||
| @@ -227,10 +218,10 @@ type StreamDataPacket struct { | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"` | |||
| PlanID string `protobuf:"bytes,2,opt,name=PlanID,proto3" json:"PlanID,omitempty"` | |||
| StreamID string `protobuf:"bytes,3,opt,name=StreamID,proto3" json:"StreamID,omitempty"` | |||
| Data []byte `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"` | |||
| Type StreamDataPacketType `protobuf:"varint,1,opt,name=Type,proto3,enum=StreamDataPacketType" json:"Type,omitempty"` | |||
| PlanID string `protobuf:"bytes,2,opt,name=PlanID,proto3" json:"PlanID,omitempty"` | |||
| VarID int32 `protobuf:"varint,3,opt,name=VarID,proto3" json:"VarID,omitempty"` | |||
| Data []byte `protobuf:"bytes,4,opt,name=Data,proto3" json:"Data,omitempty"` | |||
| } | |||
| func (x *StreamDataPacket) Reset() { | |||
| @@ -279,11 +270,11 @@ func (x *StreamDataPacket) GetPlanID() string { | |||
| return "" | |||
| } | |||
| func (x *StreamDataPacket) GetStreamID() string { | |||
| func (x *StreamDataPacket) GetVarID() int32 { | |||
| if x != nil { | |||
| return x.StreamID | |||
| return x.VarID | |||
| } | |||
| return "" | |||
| return 0 | |||
| } | |||
| func (x *StreamDataPacket) GetData() []byte { | |||
| @@ -331,17 +322,17 @@ func (*SendStreamResp) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{4} | |||
| } | |||
| type FetchStreamReq struct { | |||
| type GetStreamReq struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"` | |||
| StreamID string `protobuf:"bytes,2,opt,name=StreamID,proto3" json:"StreamID,omitempty"` | |||
| PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"` | |||
| VarID int32 `protobuf:"varint,2,opt,name=VarID,proto3" json:"VarID,omitempty"` | |||
| } | |||
| func (x *FetchStreamReq) Reset() { | |||
| *x = FetchStreamReq{} | |||
| func (x *GetStreamReq) Reset() { | |||
| *x = GetStreamReq{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[5] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -349,13 +340,13 @@ func (x *FetchStreamReq) Reset() { | |||
| } | |||
| } | |||
| func (x *FetchStreamReq) String() string { | |||
| func (x *GetStreamReq) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*FetchStreamReq) ProtoMessage() {} | |||
| func (*GetStreamReq) ProtoMessage() {} | |||
| func (x *FetchStreamReq) ProtoReflect() protoreflect.Message { | |||
| func (x *GetStreamReq) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[5] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| @@ -367,21 +358,216 @@ func (x *FetchStreamReq) ProtoReflect() protoreflect.Message { | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use FetchStreamReq.ProtoReflect.Descriptor instead. | |||
| func (*FetchStreamReq) Descriptor() ([]byte, []int) { | |||
| // Deprecated: Use GetStreamReq.ProtoReflect.Descriptor instead. | |||
| func (*GetStreamReq) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{5} | |||
| } | |||
| func (x *FetchStreamReq) GetPlanID() string { | |||
| func (x *GetStreamReq) GetPlanID() string { | |||
| if x != nil { | |||
| return x.PlanID | |||
| } | |||
| return "" | |||
| } | |||
| func (x *GetStreamReq) GetVarID() int32 { | |||
| if x != nil { | |||
| return x.VarID | |||
| } | |||
| return 0 | |||
| } | |||
| type SendVarReq struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"` | |||
| Var string `protobuf:"bytes,2,opt,name=Var,proto3" json:"Var,omitempty"` | |||
| } | |||
| func (x *SendVarReq) Reset() { | |||
| *x = SendVarReq{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| } | |||
| func (x *SendVarReq) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*SendVarReq) ProtoMessage() {} | |||
| func (x *SendVarReq) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| if ms.LoadMessageInfo() == nil { | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| return ms | |||
| } | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use SendVarReq.ProtoReflect.Descriptor instead. | |||
| func (*SendVarReq) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{6} | |||
| } | |||
| func (x *SendVarReq) GetPlanID() string { | |||
| if x != nil { | |||
| return x.PlanID | |||
| } | |||
| return "" | |||
| } | |||
| func (x *SendVarReq) GetVar() string { | |||
| if x != nil { | |||
| return x.Var | |||
| } | |||
| return "" | |||
| } | |||
| type SendVarResp struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| } | |||
| func (x *SendVarResp) Reset() { | |||
| *x = SendVarResp{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| } | |||
| func (x *SendVarResp) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*SendVarResp) ProtoMessage() {} | |||
| func (x *SendVarResp) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| if ms.LoadMessageInfo() == nil { | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| return ms | |||
| } | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use SendVarResp.ProtoReflect.Descriptor instead. | |||
| func (*SendVarResp) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{7} | |||
| } | |||
| type GetVarReq struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| PlanID string `protobuf:"bytes,1,opt,name=PlanID,proto3" json:"PlanID,omitempty"` | |||
| Var string `protobuf:"bytes,2,opt,name=Var,proto3" json:"Var,omitempty"` | |||
| } | |||
| func (x *GetVarReq) Reset() { | |||
| *x = GetVarReq{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[8] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| } | |||
| func (x *GetVarReq) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*GetVarReq) ProtoMessage() {} | |||
| func (x *GetVarReq) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[8] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| if ms.LoadMessageInfo() == nil { | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| return ms | |||
| } | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use GetVarReq.ProtoReflect.Descriptor instead. | |||
| func (*GetVarReq) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{8} | |||
| } | |||
| func (x *GetVarReq) GetPlanID() string { | |||
| if x != nil { | |||
| return x.PlanID | |||
| } | |||
| return "" | |||
| } | |||
| func (x *FetchStreamReq) GetStreamID() string { | |||
| func (x *GetVarReq) GetVar() string { | |||
| if x != nil { | |||
| return x.Var | |||
| } | |||
| return "" | |||
| } | |||
| type GetVarResp struct { | |||
| state protoimpl.MessageState | |||
| sizeCache protoimpl.SizeCache | |||
| unknownFields protoimpl.UnknownFields | |||
| Var string `protobuf:"bytes,1,opt,name=Var,proto3" json:"Var,omitempty"` | |||
| } | |||
| func (x *GetVarResp) Reset() { | |||
| *x = GetVarResp{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[9] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| } | |||
| func (x *GetVarResp) String() string { | |||
| return protoimpl.X.MessageStringOf(x) | |||
| } | |||
| func (*GetVarResp) ProtoMessage() {} | |||
| func (x *GetVarResp) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[9] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| if ms.LoadMessageInfo() == nil { | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| return ms | |||
| } | |||
| return mi.MessageOf(x) | |||
| } | |||
| // Deprecated: Use GetVarResp.ProtoReflect.Descriptor instead. | |||
| func (*GetVarResp) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{9} | |||
| } | |||
| func (x *GetVarResp) GetVar() string { | |||
| if x != nil { | |||
| return x.StreamID | |||
| return x.Var | |||
| } | |||
| return "" | |||
| } | |||
| @@ -395,7 +581,7 @@ type PingReq struct { | |||
| func (x *PingReq) Reset() { | |||
| *x = PingReq{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6] | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[10] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| @@ -408,7 +594,7 @@ func (x *PingReq) String() string { | |||
| func (*PingReq) ProtoMessage() {} | |||
| func (x *PingReq) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[6] | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[10] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| if ms.LoadMessageInfo() == nil { | |||
| @@ -421,7 +607,7 @@ func (x *PingReq) ProtoReflect() protoreflect.Message { | |||
| // Deprecated: Use PingReq.ProtoReflect.Descriptor instead. | |||
| func (*PingReq) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{6} | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{10} | |||
| } | |||
| type PingResp struct { | |||
| @@ -433,7 +619,7 @@ type PingResp struct { | |||
| func (x *PingResp) Reset() { | |||
| *x = PingResp{} | |||
| if protoimpl.UnsafeEnabled { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7] | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[11] | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| ms.StoreMessageInfo(mi) | |||
| } | |||
| @@ -446,7 +632,7 @@ func (x *PingResp) String() string { | |||
| func (*PingResp) ProtoMessage() {} | |||
| func (x *PingResp) ProtoReflect() protoreflect.Message { | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[7] | |||
| mi := &file_pkgs_grpc_agent_agent_proto_msgTypes[11] | |||
| if protoimpl.UnsafeEnabled && x != nil { | |||
| ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) | |||
| if ms.LoadMessageInfo() == nil { | |||
| @@ -459,62 +645,70 @@ func (x *PingResp) ProtoReflect() protoreflect.Message { | |||
| // Deprecated: Use PingResp.ProtoReflect.Descriptor instead. | |||
| func (*PingResp) Descriptor() ([]byte, []int) { | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{7} | |||
| return file_pkgs_grpc_agent_agent_proto_rawDescGZIP(), []int{11} | |||
| } | |||
| var File_pkgs_grpc_agent_agent_proto protoreflect.FileDescriptor | |||
| var file_pkgs_grpc_agent_agent_proto_rawDesc = []byte{ | |||
| 0x0a, 0x1b, 0x70, 0x6b, 0x67, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x61, 0x67, 0x65, 0x6e, | |||
| 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4f, 0x0a, | |||
| 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, | |||
| 0x74, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x26, 0x0a, | |||
| 0x10, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, | |||
| 0x71, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x6c, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, | |||
| 0x04, 0x50, 0x6c, 0x61, 0x6e, 0x22, 0x13, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, | |||
| 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x4f, 0x0a, 0x0e, 0x46, 0x69, | |||
| 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x04, | |||
| 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x53, 0x74, 0x72, | |||
| 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, | |||
| 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, | |||
| 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x7f, 0x0a, 0x10, 0x53, | |||
| 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, | |||
| 0x29, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, | |||
| 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, | |||
| 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, | |||
| 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x2e, | |||
| 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, | |||
| 0x73, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, | |||
| 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x22, 0x2c, | |||
| 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, | |||
| 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, | |||
| 0x28, 0x09, 0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x22, 0x85, 0x01, 0x0a, | |||
| 0x10, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, | |||
| 0x74, 0x12, 0x29, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, | |||
| 0x15, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, | |||
| 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, | |||
| 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, | |||
| 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, | |||
| 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, | |||
| 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, | |||
| 0x44, 0x61, 0x74, 0x61, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, | |||
| 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x44, 0x0a, 0x0e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x53, | |||
| 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, | |||
| 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, | |||
| 0x12, 0x1a, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, | |||
| 0x28, 0x09, 0x52, 0x08, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x44, 0x22, 0x09, 0x0a, 0x07, | |||
| 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x22, 0x0a, 0x0a, 0x08, 0x50, 0x69, 0x6e, 0x67, 0x52, | |||
| 0x65, 0x73, 0x70, 0x2a, 0x37, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, | |||
| 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x45, | |||
| 0x4f, 0x46, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x10, 0x01, 0x12, 0x0c, | |||
| 0x0a, 0x08, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x72, 0x67, 0x73, 0x10, 0x02, 0x32, 0x80, 0x02, 0x0a, | |||
| 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x36, 0x0a, 0x0c, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, | |||
| 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, | |||
| 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x11, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x49, 0x50, | |||
| 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x33, | |||
| 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x0f, 0x2e, | |||
| 0x47, 0x65, 0x74, 0x49, 0x50, 0x46, 0x53, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0f, | |||
| 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, | |||
| 0x00, 0x30, 0x01, 0x12, 0x34, 0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, | |||
| 0x6d, 0x12, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, | |||
| 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x0f, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, | |||
| 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x12, 0x35, 0x0a, 0x0b, 0x46, 0x65, 0x74, | |||
| 0x63, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0f, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, | |||
| 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x1a, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, | |||
| 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01, | |||
| 0x12, 0x1d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x08, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, | |||
| 0x65, 0x71, 0x1a, 0x09, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x42, | |||
| 0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, | |||
| 0x6f, 0x33, | |||
| 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, | |||
| 0x61, 0x6e, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, | |||
| 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, | |||
| 0x05, 0x52, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, | |||
| 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x10, 0x0a, 0x0e, | |||
| 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x22, 0x3c, | |||
| 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x12, 0x16, | |||
| 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, | |||
| 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x18, | |||
| 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x56, 0x61, 0x72, 0x49, 0x44, 0x22, 0x36, 0x0a, 0x0a, | |||
| 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71, 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, | |||
| 0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, | |||
| 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, | |||
| 0x03, 0x56, 0x61, 0x72, 0x22, 0x0d, 0x0a, 0x0b, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, | |||
| 0x65, 0x73, 0x70, 0x22, 0x35, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71, | |||
| 0x12, 0x16, 0x0a, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, | |||
| 0x52, 0x06, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x18, | |||
| 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x56, 0x61, 0x72, 0x22, 0x1e, 0x0a, 0x0a, 0x47, 0x65, | |||
| 0x74, 0x56, 0x61, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x18, | |||
| 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x56, 0x61, 0x72, 0x22, 0x09, 0x0a, 0x07, 0x50, 0x69, | |||
| 0x6e, 0x67, 0x52, 0x65, 0x71, 0x22, 0x0a, 0x0a, 0x08, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, | |||
| 0x70, 0x2a, 0x37, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, | |||
| 0x61, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x45, 0x4f, 0x46, | |||
| 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, | |||
| 0x53, 0x65, 0x6e, 0x64, 0x41, 0x72, 0x67, 0x73, 0x10, 0x02, 0x32, 0x96, 0x02, 0x0a, 0x05, 0x41, | |||
| 0x67, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49, | |||
| 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x11, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x49, | |||
| 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x12, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, | |||
| 0x74, 0x65, 0x49, 0x4f, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, 0x34, | |||
| 0x0a, 0x0a, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x11, 0x2e, 0x53, | |||
| 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, | |||
| 0x0f, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, | |||
| 0x22, 0x00, 0x28, 0x01, 0x12, 0x31, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, | |||
| 0x6d, 0x12, 0x0d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, | |||
| 0x1a, 0x11, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x63, | |||
| 0x6b, 0x65, 0x74, 0x22, 0x00, 0x30, 0x01, 0x12, 0x26, 0x0a, 0x07, 0x53, 0x65, 0x6e, 0x64, 0x56, | |||
| 0x61, 0x72, 0x12, 0x0b, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, 0x65, 0x71, 0x1a, | |||
| 0x0c, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x56, 0x61, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x12, | |||
| 0x23, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x12, 0x0a, 0x2e, 0x47, 0x65, 0x74, 0x56, | |||
| 0x61, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x0b, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x72, 0x52, 0x65, | |||
| 0x73, 0x70, 0x22, 0x00, 0x12, 0x1d, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x08, 0x2e, 0x50, | |||
| 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x1a, 0x09, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, | |||
| 0x70, 0x22, 0x00, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x3b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x62, 0x06, | |||
| 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, | |||
| } | |||
| var ( | |||
| @@ -530,36 +724,42 @@ func file_pkgs_grpc_agent_agent_proto_rawDescGZIP() []byte { | |||
| } | |||
| var file_pkgs_grpc_agent_agent_proto_enumTypes = make([]protoimpl.EnumInfo, 1) | |||
| var file_pkgs_grpc_agent_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 8) | |||
| var file_pkgs_grpc_agent_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 12) | |||
| var file_pkgs_grpc_agent_agent_proto_goTypes = []interface{}{ | |||
| (StreamDataPacketType)(0), // 0: StreamDataPacketType | |||
| (*FileDataPacket)(nil), // 1: FileDataPacket | |||
| (*SendIPFSFileResp)(nil), // 2: SendIPFSFileResp | |||
| (*GetIPFSFileReq)(nil), // 3: GetIPFSFileReq | |||
| (*ExecuteIOPlanReq)(nil), // 1: ExecuteIOPlanReq | |||
| (*ExecuteIOPlanResp)(nil), // 2: ExecuteIOPlanResp | |||
| (*FileDataPacket)(nil), // 3: FileDataPacket | |||
| (*StreamDataPacket)(nil), // 4: StreamDataPacket | |||
| (*SendStreamResp)(nil), // 5: SendStreamResp | |||
| (*FetchStreamReq)(nil), // 6: FetchStreamReq | |||
| (*PingReq)(nil), // 7: PingReq | |||
| (*PingResp)(nil), // 8: PingResp | |||
| (*GetStreamReq)(nil), // 6: GetStreamReq | |||
| (*SendVarReq)(nil), // 7: SendVarReq | |||
| (*SendVarResp)(nil), // 8: SendVarResp | |||
| (*GetVarReq)(nil), // 9: GetVarReq | |||
| (*GetVarResp)(nil), // 10: GetVarResp | |||
| (*PingReq)(nil), // 11: PingReq | |||
| (*PingResp)(nil), // 12: PingResp | |||
| } | |||
| var file_pkgs_grpc_agent_agent_proto_depIdxs = []int32{ | |||
| 0, // 0: FileDataPacket.Type:type_name -> StreamDataPacketType | |||
| 0, // 1: StreamDataPacket.Type:type_name -> StreamDataPacketType | |||
| 1, // 2: Agent.SendIPFSFile:input_type -> FileDataPacket | |||
| 3, // 3: Agent.GetIPFSFile:input_type -> GetIPFSFileReq | |||
| 4, // 4: Agent.SendStream:input_type -> StreamDataPacket | |||
| 6, // 5: Agent.FetchStream:input_type -> FetchStreamReq | |||
| 7, // 6: Agent.Ping:input_type -> PingReq | |||
| 2, // 7: Agent.SendIPFSFile:output_type -> SendIPFSFileResp | |||
| 1, // 8: Agent.GetIPFSFile:output_type -> FileDataPacket | |||
| 5, // 9: Agent.SendStream:output_type -> SendStreamResp | |||
| 4, // 10: Agent.FetchStream:output_type -> StreamDataPacket | |||
| 8, // 11: Agent.Ping:output_type -> PingResp | |||
| 7, // [7:12] is the sub-list for method output_type | |||
| 2, // [2:7] is the sub-list for method input_type | |||
| 2, // [2:2] is the sub-list for extension type_name | |||
| 2, // [2:2] is the sub-list for extension extendee | |||
| 0, // [0:2] is the sub-list for field type_name | |||
| 0, // 0: FileDataPacket.Type:type_name -> StreamDataPacketType | |||
| 0, // 1: StreamDataPacket.Type:type_name -> StreamDataPacketType | |||
| 1, // 2: Agent.ExecuteIOPlan:input_type -> ExecuteIOPlanReq | |||
| 4, // 3: Agent.SendStream:input_type -> StreamDataPacket | |||
| 6, // 4: Agent.GetStream:input_type -> GetStreamReq | |||
| 7, // 5: Agent.SendVar:input_type -> SendVarReq | |||
| 9, // 6: Agent.GetVar:input_type -> GetVarReq | |||
| 11, // 7: Agent.Ping:input_type -> PingReq | |||
| 2, // 8: Agent.ExecuteIOPlan:output_type -> ExecuteIOPlanResp | |||
| 5, // 9: Agent.SendStream:output_type -> SendStreamResp | |||
| 4, // 10: Agent.GetStream:output_type -> StreamDataPacket | |||
| 8, // 11: Agent.SendVar:output_type -> SendVarResp | |||
| 10, // 12: Agent.GetVar:output_type -> GetVarResp | |||
| 12, // 13: Agent.Ping:output_type -> PingResp | |||
| 8, // [8:14] is the sub-list for method output_type | |||
| 2, // [2:8] is the sub-list for method input_type | |||
| 2, // [2:2] is the sub-list for extension type_name | |||
| 2, // [2:2] is the sub-list for extension extendee | |||
| 0, // [0:2] is the sub-list for field type_name | |||
| } | |||
| func init() { file_pkgs_grpc_agent_agent_proto_init() } | |||
| @@ -569,7 +769,7 @@ func file_pkgs_grpc_agent_agent_proto_init() { | |||
| } | |||
| if !protoimpl.UnsafeEnabled { | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*FileDataPacket); i { | |||
| switch v := v.(*ExecuteIOPlanReq); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| @@ -581,7 +781,7 @@ func file_pkgs_grpc_agent_agent_proto_init() { | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*SendIPFSFileResp); i { | |||
| switch v := v.(*ExecuteIOPlanResp); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| @@ -593,7 +793,7 @@ func file_pkgs_grpc_agent_agent_proto_init() { | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*GetIPFSFileReq); i { | |||
| switch v := v.(*FileDataPacket); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| @@ -629,7 +829,7 @@ func file_pkgs_grpc_agent_agent_proto_init() { | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*FetchStreamReq); i { | |||
| switch v := v.(*GetStreamReq); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| @@ -641,7 +841,7 @@ func file_pkgs_grpc_agent_agent_proto_init() { | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*PingReq); i { | |||
| switch v := v.(*SendVarReq); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| @@ -653,6 +853,54 @@ func file_pkgs_grpc_agent_agent_proto_init() { | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*SendVarResp); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| return &v.sizeCache | |||
| case 2: | |||
| return &v.unknownFields | |||
| default: | |||
| return nil | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*GetVarReq); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| return &v.sizeCache | |||
| case 2: | |||
| return &v.unknownFields | |||
| default: | |||
| return nil | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*GetVarResp); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| return &v.sizeCache | |||
| case 2: | |||
| return &v.unknownFields | |||
| default: | |||
| return nil | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*PingReq); i { | |||
| case 0: | |||
| return &v.state | |||
| case 1: | |||
| return &v.sizeCache | |||
| case 2: | |||
| return &v.unknownFields | |||
| default: | |||
| return nil | |||
| } | |||
| } | |||
| file_pkgs_grpc_agent_agent_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { | |||
| switch v := v.(*PingResp); i { | |||
| case 0: | |||
| return &v.state | |||
| @@ -671,7 +919,7 @@ func file_pkgs_grpc_agent_agent_proto_init() { | |||
| GoPackagePath: reflect.TypeOf(x{}).PkgPath(), | |||
| RawDescriptor: file_pkgs_grpc_agent_agent_proto_rawDesc, | |||
| NumEnums: 1, | |||
| NumMessages: 8, | |||
| NumMessages: 12, | |||
| NumExtensions: 0, | |||
| NumServices: 1, | |||
| }, | |||
| @@ -5,50 +5,65 @@ syntax = "proto3"; | |||
| option go_package = ".;agent";//grpc这里生效了 | |||
| message ExecuteIOPlanReq { | |||
| string Plan = 1; | |||
| } | |||
| message ExecuteIOPlanResp { | |||
| } | |||
| enum StreamDataPacketType { | |||
| EOF = 0; | |||
| Data = 1; | |||
| SendArgs = 2; | |||
| } | |||
| // 文件数据。注意:只在Type为Data的时候,Data字段才能有数据 | |||
| // 文件数据。注意:只在Type为Data或EOF的时候,Data字段才能有数据 | |||
| message FileDataPacket { | |||
| StreamDataPacketType Type = 1; | |||
| bytes Data = 2; | |||
| } | |||
| message SendIPFSFileResp { | |||
| string FileHash = 1; | |||
| } | |||
| message GetIPFSFileReq { | |||
| string FileHash = 1; | |||
| } | |||
| // 注:EOF时data也可能有数据 | |||
| message StreamDataPacket { | |||
| StreamDataPacketType Type = 1; | |||
| string PlanID = 2; | |||
| string StreamID = 3; | |||
| int32 VarID = 3; | |||
| bytes Data = 4; | |||
| } | |||
| message SendStreamResp { | |||
| message SendStreamResp {} | |||
| message GetStreamReq { | |||
| string PlanID = 1; | |||
| int32 VarID = 2; | |||
| } | |||
| message SendVarReq { | |||
| string PlanID = 1; | |||
| string Var = 2; | |||
| } | |||
| message SendVarResp {} | |||
| message FetchStreamReq { | |||
| message GetVarReq { | |||
| string PlanID = 1; | |||
| string StreamID = 2; | |||
| string Var = 2; | |||
| } | |||
| message GetVarResp { | |||
| string Var = 1; // 此处不使用VarID的原因是,Switch的BindVars函数还需要知道Var的类型 | |||
| } | |||
| message PingReq {} | |||
| message PingResp {} | |||
| service Agent { | |||
| rpc SendIPFSFile(stream FileDataPacket)returns(SendIPFSFileResp){} | |||
| rpc GetIPFSFile(GetIPFSFileReq)returns(stream FileDataPacket){} | |||
| rpc ExecuteIOPlan(ExecuteIOPlanReq) returns(ExecuteIOPlanResp){} | |||
| rpc SendStream(stream StreamDataPacket)returns(SendStreamResp){} | |||
| rpc FetchStream(FetchStreamReq)returns(stream StreamDataPacket){} | |||
| rpc GetStream(GetStreamReq)returns(stream StreamDataPacket){} | |||
| rpc SendVar(SendVarReq)returns(SendVarResp){} | |||
| rpc GetVar(GetVarReq)returns(GetVarResp){} | |||
| rpc Ping(PingReq) returns(PingResp){} | |||
| } | |||
| @@ -21,21 +21,23 @@ import ( | |||
| const _ = grpc.SupportPackageIsVersion7 | |||
| const ( | |||
| Agent_SendIPFSFile_FullMethodName = "/Agent/SendIPFSFile" | |||
| Agent_GetIPFSFile_FullMethodName = "/Agent/GetIPFSFile" | |||
| Agent_SendStream_FullMethodName = "/Agent/SendStream" | |||
| Agent_FetchStream_FullMethodName = "/Agent/FetchStream" | |||
| Agent_Ping_FullMethodName = "/Agent/Ping" | |||
| Agent_ExecuteIOPlan_FullMethodName = "/Agent/ExecuteIOPlan" | |||
| Agent_SendStream_FullMethodName = "/Agent/SendStream" | |||
| Agent_GetStream_FullMethodName = "/Agent/GetStream" | |||
| Agent_SendVar_FullMethodName = "/Agent/SendVar" | |||
| Agent_GetVar_FullMethodName = "/Agent/GetVar" | |||
| Agent_Ping_FullMethodName = "/Agent/Ping" | |||
| ) | |||
| // AgentClient is the client API for Agent service. | |||
| // | |||
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. | |||
| type AgentClient interface { | |||
| SendIPFSFile(ctx context.Context, opts ...grpc.CallOption) (Agent_SendIPFSFileClient, error) | |||
| GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error) | |||
| ExecuteIOPlan(ctx context.Context, in *ExecuteIOPlanReq, opts ...grpc.CallOption) (*ExecuteIOPlanResp, error) | |||
| SendStream(ctx context.Context, opts ...grpc.CallOption) (Agent_SendStreamClient, error) | |||
| FetchStream(ctx context.Context, in *FetchStreamReq, opts ...grpc.CallOption) (Agent_FetchStreamClient, error) | |||
| GetStream(ctx context.Context, in *GetStreamReq, opts ...grpc.CallOption) (Agent_GetStreamClient, error) | |||
| SendVar(ctx context.Context, in *SendVarReq, opts ...grpc.CallOption) (*SendVarResp, error) | |||
| GetVar(ctx context.Context, in *GetVarReq, opts ...grpc.CallOption) (*GetVarResp, error) | |||
| Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error) | |||
| } | |||
| @@ -47,74 +49,17 @@ func NewAgentClient(cc grpc.ClientConnInterface) AgentClient { | |||
| return &agentClient{cc} | |||
| } | |||
| func (c *agentClient) SendIPFSFile(ctx context.Context, opts ...grpc.CallOption) (Agent_SendIPFSFileClient, error) { | |||
| stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[0], Agent_SendIPFSFile_FullMethodName, opts...) | |||
| func (c *agentClient) ExecuteIOPlan(ctx context.Context, in *ExecuteIOPlanReq, opts ...grpc.CallOption) (*ExecuteIOPlanResp, error) { | |||
| out := new(ExecuteIOPlanResp) | |||
| err := c.cc.Invoke(ctx, Agent_ExecuteIOPlan_FullMethodName, in, out, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| x := &agentSendIPFSFileClient{stream} | |||
| return x, nil | |||
| } | |||
| type Agent_SendIPFSFileClient interface { | |||
| Send(*FileDataPacket) error | |||
| CloseAndRecv() (*SendIPFSFileResp, error) | |||
| grpc.ClientStream | |||
| } | |||
| type agentSendIPFSFileClient struct { | |||
| grpc.ClientStream | |||
| } | |||
| func (x *agentSendIPFSFileClient) Send(m *FileDataPacket) error { | |||
| return x.ClientStream.SendMsg(m) | |||
| } | |||
| func (x *agentSendIPFSFileClient) CloseAndRecv() (*SendIPFSFileResp, error) { | |||
| if err := x.ClientStream.CloseSend(); err != nil { | |||
| return nil, err | |||
| } | |||
| m := new(SendIPFSFileResp) | |||
| if err := x.ClientStream.RecvMsg(m); err != nil { | |||
| return nil, err | |||
| } | |||
| return m, nil | |||
| } | |||
| func (c *agentClient) GetIPFSFile(ctx context.Context, in *GetIPFSFileReq, opts ...grpc.CallOption) (Agent_GetIPFSFileClient, error) { | |||
| stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[1], Agent_GetIPFSFile_FullMethodName, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| x := &agentGetIPFSFileClient{stream} | |||
| if err := x.ClientStream.SendMsg(in); err != nil { | |||
| return nil, err | |||
| } | |||
| if err := x.ClientStream.CloseSend(); err != nil { | |||
| return nil, err | |||
| } | |||
| return x, nil | |||
| } | |||
| type Agent_GetIPFSFileClient interface { | |||
| Recv() (*FileDataPacket, error) | |||
| grpc.ClientStream | |||
| } | |||
| type agentGetIPFSFileClient struct { | |||
| grpc.ClientStream | |||
| } | |||
| func (x *agentGetIPFSFileClient) Recv() (*FileDataPacket, error) { | |||
| m := new(FileDataPacket) | |||
| if err := x.ClientStream.RecvMsg(m); err != nil { | |||
| return nil, err | |||
| } | |||
| return m, nil | |||
| return out, nil | |||
| } | |||
| func (c *agentClient) SendStream(ctx context.Context, opts ...grpc.CallOption) (Agent_SendStreamClient, error) { | |||
| stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[2], Agent_SendStream_FullMethodName, opts...) | |||
| stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[0], Agent_SendStream_FullMethodName, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| @@ -147,12 +92,12 @@ func (x *agentSendStreamClient) CloseAndRecv() (*SendStreamResp, error) { | |||
| return m, nil | |||
| } | |||
| func (c *agentClient) FetchStream(ctx context.Context, in *FetchStreamReq, opts ...grpc.CallOption) (Agent_FetchStreamClient, error) { | |||
| stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[3], Agent_FetchStream_FullMethodName, opts...) | |||
| func (c *agentClient) GetStream(ctx context.Context, in *GetStreamReq, opts ...grpc.CallOption) (Agent_GetStreamClient, error) { | |||
| stream, err := c.cc.NewStream(ctx, &Agent_ServiceDesc.Streams[1], Agent_GetStream_FullMethodName, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| x := &agentFetchStreamClient{stream} | |||
| x := &agentGetStreamClient{stream} | |||
| if err := x.ClientStream.SendMsg(in); err != nil { | |||
| return nil, err | |||
| } | |||
| @@ -162,16 +107,16 @@ func (c *agentClient) FetchStream(ctx context.Context, in *FetchStreamReq, opts | |||
| return x, nil | |||
| } | |||
| type Agent_FetchStreamClient interface { | |||
| type Agent_GetStreamClient interface { | |||
| Recv() (*StreamDataPacket, error) | |||
| grpc.ClientStream | |||
| } | |||
| type agentFetchStreamClient struct { | |||
| type agentGetStreamClient struct { | |||
| grpc.ClientStream | |||
| } | |||
| func (x *agentFetchStreamClient) Recv() (*StreamDataPacket, error) { | |||
| func (x *agentGetStreamClient) Recv() (*StreamDataPacket, error) { | |||
| m := new(StreamDataPacket) | |||
| if err := x.ClientStream.RecvMsg(m); err != nil { | |||
| return nil, err | |||
| @@ -179,6 +124,24 @@ func (x *agentFetchStreamClient) Recv() (*StreamDataPacket, error) { | |||
| return m, nil | |||
| } | |||
| func (c *agentClient) SendVar(ctx context.Context, in *SendVarReq, opts ...grpc.CallOption) (*SendVarResp, error) { | |||
| out := new(SendVarResp) | |||
| err := c.cc.Invoke(ctx, Agent_SendVar_FullMethodName, in, out, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return out, nil | |||
| } | |||
| func (c *agentClient) GetVar(ctx context.Context, in *GetVarReq, opts ...grpc.CallOption) (*GetVarResp, error) { | |||
| out := new(GetVarResp) | |||
| err := c.cc.Invoke(ctx, Agent_GetVar_FullMethodName, in, out, opts...) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return out, nil | |||
| } | |||
| func (c *agentClient) Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOption) (*PingResp, error) { | |||
| out := new(PingResp) | |||
| err := c.cc.Invoke(ctx, Agent_Ping_FullMethodName, in, out, opts...) | |||
| @@ -192,10 +155,11 @@ func (c *agentClient) Ping(ctx context.Context, in *PingReq, opts ...grpc.CallOp | |||
| // All implementations must embed UnimplementedAgentServer | |||
| // for forward compatibility | |||
| type AgentServer interface { | |||
| SendIPFSFile(Agent_SendIPFSFileServer) error | |||
| GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error | |||
| ExecuteIOPlan(context.Context, *ExecuteIOPlanReq) (*ExecuteIOPlanResp, error) | |||
| SendStream(Agent_SendStreamServer) error | |||
| FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error | |||
| GetStream(*GetStreamReq, Agent_GetStreamServer) error | |||
| SendVar(context.Context, *SendVarReq) (*SendVarResp, error) | |||
| GetVar(context.Context, *GetVarReq) (*GetVarResp, error) | |||
| Ping(context.Context, *PingReq) (*PingResp, error) | |||
| mustEmbedUnimplementedAgentServer() | |||
| } | |||
| @@ -204,17 +168,20 @@ type AgentServer interface { | |||
| type UnimplementedAgentServer struct { | |||
| } | |||
| func (UnimplementedAgentServer) SendIPFSFile(Agent_SendIPFSFileServer) error { | |||
| return status.Errorf(codes.Unimplemented, "method SendIPFSFile not implemented") | |||
| } | |||
| func (UnimplementedAgentServer) GetIPFSFile(*GetIPFSFileReq, Agent_GetIPFSFileServer) error { | |||
| return status.Errorf(codes.Unimplemented, "method GetIPFSFile not implemented") | |||
| func (UnimplementedAgentServer) ExecuteIOPlan(context.Context, *ExecuteIOPlanReq) (*ExecuteIOPlanResp, error) { | |||
| return nil, status.Errorf(codes.Unimplemented, "method ExecuteIOPlan not implemented") | |||
| } | |||
| func (UnimplementedAgentServer) SendStream(Agent_SendStreamServer) error { | |||
| return status.Errorf(codes.Unimplemented, "method SendStream not implemented") | |||
| } | |||
| func (UnimplementedAgentServer) FetchStream(*FetchStreamReq, Agent_FetchStreamServer) error { | |||
| return status.Errorf(codes.Unimplemented, "method FetchStream not implemented") | |||
| func (UnimplementedAgentServer) GetStream(*GetStreamReq, Agent_GetStreamServer) error { | |||
| return status.Errorf(codes.Unimplemented, "method GetStream not implemented") | |||
| } | |||
| func (UnimplementedAgentServer) SendVar(context.Context, *SendVarReq) (*SendVarResp, error) { | |||
| return nil, status.Errorf(codes.Unimplemented, "method SendVar not implemented") | |||
| } | |||
| func (UnimplementedAgentServer) GetVar(context.Context, *GetVarReq) (*GetVarResp, error) { | |||
| return nil, status.Errorf(codes.Unimplemented, "method GetVar not implemented") | |||
| } | |||
| func (UnimplementedAgentServer) Ping(context.Context, *PingReq) (*PingResp, error) { | |||
| return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") | |||
| @@ -232,51 +199,22 @@ func RegisterAgentServer(s grpc.ServiceRegistrar, srv AgentServer) { | |||
| s.RegisterService(&Agent_ServiceDesc, srv) | |||
| } | |||
| func _Agent_SendIPFSFile_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
| return srv.(AgentServer).SendIPFSFile(&agentSendIPFSFileServer{stream}) | |||
| } | |||
| type Agent_SendIPFSFileServer interface { | |||
| SendAndClose(*SendIPFSFileResp) error | |||
| Recv() (*FileDataPacket, error) | |||
| grpc.ServerStream | |||
| } | |||
| type agentSendIPFSFileServer struct { | |||
| grpc.ServerStream | |||
| } | |||
| func (x *agentSendIPFSFileServer) SendAndClose(m *SendIPFSFileResp) error { | |||
| return x.ServerStream.SendMsg(m) | |||
| } | |||
| func (x *agentSendIPFSFileServer) Recv() (*FileDataPacket, error) { | |||
| m := new(FileDataPacket) | |||
| if err := x.ServerStream.RecvMsg(m); err != nil { | |||
| func _Agent_ExecuteIOPlan_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
| in := new(ExecuteIOPlanReq) | |||
| if err := dec(in); err != nil { | |||
| return nil, err | |||
| } | |||
| return m, nil | |||
| } | |||
| func _Agent_GetIPFSFile_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
| m := new(GetIPFSFileReq) | |||
| if err := stream.RecvMsg(m); err != nil { | |||
| return err | |||
| if interceptor == nil { | |||
| return srv.(AgentServer).ExecuteIOPlan(ctx, in) | |||
| } | |||
| return srv.(AgentServer).GetIPFSFile(m, &agentGetIPFSFileServer{stream}) | |||
| } | |||
| type Agent_GetIPFSFileServer interface { | |||
| Send(*FileDataPacket) error | |||
| grpc.ServerStream | |||
| } | |||
| type agentGetIPFSFileServer struct { | |||
| grpc.ServerStream | |||
| } | |||
| func (x *agentGetIPFSFileServer) Send(m *FileDataPacket) error { | |||
| return x.ServerStream.SendMsg(m) | |||
| info := &grpc.UnaryServerInfo{ | |||
| Server: srv, | |||
| FullMethod: Agent_ExecuteIOPlan_FullMethodName, | |||
| } | |||
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
| return srv.(AgentServer).ExecuteIOPlan(ctx, req.(*ExecuteIOPlanReq)) | |||
| } | |||
| return interceptor(ctx, in, info, handler) | |||
| } | |||
| func _Agent_SendStream_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
| @@ -305,27 +243,63 @@ func (x *agentSendStreamServer) Recv() (*StreamDataPacket, error) { | |||
| return m, nil | |||
| } | |||
| func _Agent_FetchStream_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
| m := new(FetchStreamReq) | |||
| func _Agent_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
| m := new(GetStreamReq) | |||
| if err := stream.RecvMsg(m); err != nil { | |||
| return err | |||
| } | |||
| return srv.(AgentServer).FetchStream(m, &agentFetchStreamServer{stream}) | |||
| return srv.(AgentServer).GetStream(m, &agentGetStreamServer{stream}) | |||
| } | |||
| type Agent_FetchStreamServer interface { | |||
| type Agent_GetStreamServer interface { | |||
| Send(*StreamDataPacket) error | |||
| grpc.ServerStream | |||
| } | |||
| type agentFetchStreamServer struct { | |||
| type agentGetStreamServer struct { | |||
| grpc.ServerStream | |||
| } | |||
| func (x *agentFetchStreamServer) Send(m *StreamDataPacket) error { | |||
| func (x *agentGetStreamServer) Send(m *StreamDataPacket) error { | |||
| return x.ServerStream.SendMsg(m) | |||
| } | |||
| func _Agent_SendVar_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
| in := new(SendVarReq) | |||
| if err := dec(in); err != nil { | |||
| return nil, err | |||
| } | |||
| if interceptor == nil { | |||
| return srv.(AgentServer).SendVar(ctx, in) | |||
| } | |||
| info := &grpc.UnaryServerInfo{ | |||
| Server: srv, | |||
| FullMethod: Agent_SendVar_FullMethodName, | |||
| } | |||
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
| return srv.(AgentServer).SendVar(ctx, req.(*SendVarReq)) | |||
| } | |||
| return interceptor(ctx, in, info, handler) | |||
| } | |||
| func _Agent_GetVar_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
| in := new(GetVarReq) | |||
| if err := dec(in); err != nil { | |||
| return nil, err | |||
| } | |||
| if interceptor == nil { | |||
| return srv.(AgentServer).GetVar(ctx, in) | |||
| } | |||
| info := &grpc.UnaryServerInfo{ | |||
| Server: srv, | |||
| FullMethod: Agent_GetVar_FullMethodName, | |||
| } | |||
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
| return srv.(AgentServer).GetVar(ctx, req.(*GetVarReq)) | |||
| } | |||
| return interceptor(ctx, in, info, handler) | |||
| } | |||
| func _Agent_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
| in := new(PingReq) | |||
| if err := dec(in); err != nil { | |||
| @@ -352,29 +326,31 @@ var Agent_ServiceDesc = grpc.ServiceDesc{ | |||
| HandlerType: (*AgentServer)(nil), | |||
| Methods: []grpc.MethodDesc{ | |||
| { | |||
| MethodName: "Ping", | |||
| Handler: _Agent_Ping_Handler, | |||
| MethodName: "ExecuteIOPlan", | |||
| Handler: _Agent_ExecuteIOPlan_Handler, | |||
| }, | |||
| }, | |||
| Streams: []grpc.StreamDesc{ | |||
| { | |||
| StreamName: "SendIPFSFile", | |||
| Handler: _Agent_SendIPFSFile_Handler, | |||
| ClientStreams: true, | |||
| MethodName: "SendVar", | |||
| Handler: _Agent_SendVar_Handler, | |||
| }, | |||
| { | |||
| StreamName: "GetIPFSFile", | |||
| Handler: _Agent_GetIPFSFile_Handler, | |||
| ServerStreams: true, | |||
| MethodName: "GetVar", | |||
| Handler: _Agent_GetVar_Handler, | |||
| }, | |||
| { | |||
| MethodName: "Ping", | |||
| Handler: _Agent_Ping_Handler, | |||
| }, | |||
| }, | |||
| Streams: []grpc.StreamDesc{ | |||
| { | |||
| StreamName: "SendStream", | |||
| Handler: _Agent_SendStream_Handler, | |||
| ClientStreams: true, | |||
| }, | |||
| { | |||
| StreamName: "FetchStream", | |||
| Handler: _Agent_FetchStream_Handler, | |||
| StreamName: "GetStream", | |||
| Handler: _Agent_GetStream_Handler, | |||
| ServerStreams: true, | |||
| }, | |||
| }, | |||
| @@ -5,6 +5,7 @@ import ( | |||
| "fmt" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/credentials/insecure" | |||
| @@ -27,59 +28,29 @@ func NewClient(addr string) (*Client, error) { | |||
| }, nil | |||
| } | |||
| func (c *Client) SendIPFSFile(file io.Reader) (string, error) { | |||
| sendCli, err := c.cli.SendIPFSFile(context.Background()) | |||
| func (c *Client) ExecuteIOPlan(ctx context.Context, plan ioswitch.Plan) error { | |||
| data, err := serder.ObjectToJSONEx(plan) | |||
| if err != nil { | |||
| return "", err | |||
| return err | |||
| } | |||
| buf := make([]byte, 4096) | |||
| for { | |||
| rd, err := file.Read(buf) | |||
| if err == io.EOF { | |||
| err := sendCli.Send(&FileDataPacket{ | |||
| Type: StreamDataPacketType_EOF, | |||
| Data: buf[:rd], | |||
| }) | |||
| if err != nil { | |||
| return "", fmt.Errorf("sending EOF packet: %w", err) | |||
| } | |||
| resp, err := sendCli.CloseAndRecv() | |||
| if err != nil { | |||
| return "", fmt.Errorf("receiving response: %w", err) | |||
| } | |||
| return resp.FileHash, nil | |||
| } | |||
| if err != nil { | |||
| return "", fmt.Errorf("reading file data: %w", err) | |||
| } | |||
| err = sendCli.Send(&FileDataPacket{ | |||
| Type: StreamDataPacketType_Data, | |||
| Data: buf[:rd], | |||
| }) | |||
| if err != nil { | |||
| return "", fmt.Errorf("sending data packet: %w", err) | |||
| } | |||
| } | |||
| _, err = c.cli.ExecuteIOPlan(ctx, &ExecuteIOPlanReq{ | |||
| Plan: string(data), | |||
| }) | |||
| return err | |||
| } | |||
| type fileReadCloser struct { | |||
| type grpcStreamReadCloser struct { | |||
| io.ReadCloser | |||
| // stream Agent_GetIPFSFileClient | |||
| // TODO 临时使用 | |||
| recvFn func() (*StreamDataPacket, error) | |||
| stream Agent_GetStreamClient | |||
| cancelFn context.CancelFunc | |||
| readingData []byte | |||
| recvEOF bool | |||
| } | |||
| func (s *fileReadCloser) Read(p []byte) (int, error) { | |||
| func (s *grpcStreamReadCloser) Read(p []byte) (int, error) { | |||
| if len(s.readingData) == 0 && !s.recvEOF { | |||
| resp, err := s.recvFn() | |||
| resp, err := s.stream.Recv() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| @@ -106,63 +77,34 @@ func (s *fileReadCloser) Read(p []byte) (int, error) { | |||
| return cnt, nil | |||
| } | |||
| func (s *fileReadCloser) Close() error { | |||
| func (s *grpcStreamReadCloser) Close() error { | |||
| s.cancelFn() | |||
| return nil | |||
| } | |||
| func (c *Client) GetIPFSFile(fileHash string) (io.ReadCloser, error) { | |||
| ctx, cancel := context.WithCancel(context.Background()) | |||
| stream, err := c.cli.GetIPFSFile(ctx, &GetIPFSFileReq{ | |||
| FileHash: fileHash, | |||
| }) | |||
| if err != nil { | |||
| cancel() | |||
| return nil, fmt.Errorf("request grpc failed, err: %w", err) | |||
| } | |||
| return &fileReadCloser{ | |||
| // TODO 临时处理方案 | |||
| recvFn: func() (*StreamDataPacket, error) { | |||
| pkt, err := stream.Recv() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &StreamDataPacket{ | |||
| Type: pkt.Type, | |||
| Data: pkt.Data, | |||
| }, nil | |||
| }, | |||
| cancelFn: cancel, | |||
| }, nil | |||
| } | |||
| func (c *Client) SendStream(planID ioswitch.PlanID, streamID ioswitch.StreamID, file io.Reader) error { | |||
| sendCli, err := c.cli.SendStream(context.Background()) | |||
| func (c *Client) SendStream(ctx context.Context, planID ioswitch.PlanID, varID ioswitch.VarID, str io.Reader) error { | |||
| sendCli, err := c.cli.SendStream(ctx) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = sendCli.Send(&StreamDataPacket{ | |||
| Type: StreamDataPacketType_SendArgs, | |||
| PlanID: string(planID), | |||
| StreamID: string(streamID), | |||
| Type: StreamDataPacketType_SendArgs, | |||
| PlanID: string(planID), | |||
| VarID: int32(varID), | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("sending stream id packet: %w", err) | |||
| return fmt.Errorf("sending first stream packet: %w", err) | |||
| } | |||
| buf := make([]byte, 4096) | |||
| buf := make([]byte, 1024*64) | |||
| for { | |||
| rd, err := file.Read(buf) | |||
| rd, err := str.Read(buf) | |||
| if err == io.EOF { | |||
| err := sendCli.Send(&StreamDataPacket{ | |||
| Type: StreamDataPacketType_EOF, | |||
| StreamID: string(streamID), | |||
| Data: buf[:rd], | |||
| Type: StreamDataPacketType_EOF, | |||
| Data: buf[:rd], | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("sending EOF packet: %w", err) | |||
| @@ -177,13 +119,12 @@ func (c *Client) SendStream(planID ioswitch.PlanID, streamID ioswitch.StreamID, | |||
| } | |||
| if err != nil { | |||
| return fmt.Errorf("reading file data: %w", err) | |||
| return fmt.Errorf("reading stream data: %w", err) | |||
| } | |||
| err = sendCli.Send(&StreamDataPacket{ | |||
| Type: StreamDataPacketType_Data, | |||
| StreamID: string(streamID), | |||
| Data: buf[:rd], | |||
| Type: StreamDataPacketType_Data, | |||
| Data: buf[:rd], | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("sending data packet: %w", err) | |||
| @@ -191,24 +132,59 @@ func (c *Client) SendStream(planID ioswitch.PlanID, streamID ioswitch.StreamID, | |||
| } | |||
| } | |||
| func (c *Client) FetchStream(planID ioswitch.PlanID, streamID ioswitch.StreamID) (io.ReadCloser, error) { | |||
| func (c *Client) GetStream(planID ioswitch.PlanID, varID ioswitch.VarID) (io.ReadCloser, error) { | |||
| ctx, cancel := context.WithCancel(context.Background()) | |||
| stream, err := c.cli.FetchStream(ctx, &FetchStreamReq{ | |||
| PlanID: string(planID), | |||
| StreamID: string(streamID), | |||
| stream, err := c.cli.GetStream(ctx, &GetStreamReq{ | |||
| PlanID: string(planID), | |||
| VarID: int32(varID), | |||
| }) | |||
| if err != nil { | |||
| cancel() | |||
| return nil, fmt.Errorf("request grpc failed, err: %w", err) | |||
| } | |||
| return &fileReadCloser{ | |||
| recvFn: stream.Recv, | |||
| return &grpcStreamReadCloser{ | |||
| stream: stream, | |||
| cancelFn: cancel, | |||
| }, nil | |||
| } | |||
| func (c *Client) SendVar(ctx context.Context, planID ioswitch.PlanID, v ioswitch.Var) error { | |||
| data, err := serder.ObjectToJSONEx(v) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| _, err = c.cli.SendVar(ctx, &SendVarReq{ | |||
| PlanID: string(planID), | |||
| Var: string(data), | |||
| }) | |||
| return err | |||
| } | |||
| func (c *Client) GetVar(ctx context.Context, planID ioswitch.PlanID, v ioswitch.Var) (ioswitch.Var, error) { | |||
| data, err := serder.ObjectToJSONEx(v) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| resp, err := c.cli.GetVar(ctx, &GetVarReq{ | |||
| PlanID: string(planID), | |||
| Var: string(data), | |||
| }) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| v2, err := serder.JSONToObjectEx[ioswitch.Var]([]byte(resp.Var)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return v2, nil | |||
| } | |||
| func (c *Client) Ping() error { | |||
| _, err := c.cli.Ping(context.Background(), &PingReq{}) | |||
| return err | |||
| @@ -1,35 +1,67 @@ | |||
| package ioswitch | |||
| import ( | |||
| "context" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/pkgs/types" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| ) | |||
| type PlanID string | |||
| type StreamID string | |||
| type VarID int | |||
| type Plan struct { | |||
| ID PlanID | |||
| Ops []Op | |||
| ID PlanID `json:"id"` | |||
| Ops []Op `json:"ops"` | |||
| } | |||
| type Stream struct { | |||
| ID StreamID | |||
| Stream io.ReadCloser | |||
| type Var interface { | |||
| GetID() VarID | |||
| } | |||
| func NewStream(id StreamID, stream io.ReadCloser) Stream { | |||
| return Stream{ | |||
| ID: id, | |||
| Stream: stream, | |||
| } | |||
| var VarUnion = types.NewTypeUnion[Var]( | |||
| (*IntVar)(nil), | |||
| (*StringVar)(nil), | |||
| ) | |||
| var _ = serder.UseTypeUnionExternallyTagged(&VarUnion) | |||
| type StreamVar struct { | |||
| ID VarID `json:"id"` | |||
| Stream io.ReadCloser `json:"-"` | |||
| } | |||
| type Op interface { | |||
| Execute(sw *Switch, planID PlanID) error | |||
| func (v *StreamVar) GetID() VarID { | |||
| return v.ID | |||
| } | |||
| type ResultKV struct { | |||
| Key string | |||
| Value any | |||
| type IntVar struct { | |||
| ID VarID `json:"id"` | |||
| Value string `json:"value"` | |||
| } | |||
| func (v *IntVar) GetID() VarID { | |||
| return v.ID | |||
| } | |||
| type StringVar struct { | |||
| ID VarID `json:"id"` | |||
| Value string `json:"value"` | |||
| } | |||
| func (v *StringVar) GetID() VarID { | |||
| return v.ID | |||
| } | |||
| type SignalVar struct { | |||
| ID VarID `json:"id"` | |||
| } | |||
| func (v *SignalVar) GetID() VarID { | |||
| return v.ID | |||
| } | |||
| type Op interface { | |||
| Execute(ctx context.Context, sw *Switch) error | |||
| } | |||
| @@ -0,0 +1,85 @@ | |||
| package ioswitch | |||
| import ( | |||
| "context" | |||
| "sync" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/common/utils/lo2" | |||
| ) | |||
| type finding struct { | |||
| PlanID PlanID | |||
| Callback *future.SetValueFuture[*Switch] | |||
| } | |||
| type Manager struct { | |||
| lock sync.Mutex | |||
| switchs map[PlanID]*Switch | |||
| findings []*finding | |||
| } | |||
| func NewManager() Manager { | |||
| return Manager{ | |||
| switchs: make(map[PlanID]*Switch), | |||
| } | |||
| } | |||
| func (s *Manager) Add(sw *Switch) { | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| s.switchs[sw.Plan().ID] = sw | |||
| s.findings = lo.Reject(s.findings, func(f *finding, idx int) bool { | |||
| if f.PlanID != sw.Plan().ID { | |||
| return false | |||
| } | |||
| f.Callback.SetValue(sw) | |||
| return true | |||
| }) | |||
| } | |||
| func (s *Manager) Remove(sw *Switch) { | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| delete(s.switchs, sw.Plan().ID) | |||
| } | |||
| func (s *Manager) FindByID(id PlanID) *Switch { | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| return s.switchs[id] | |||
| } | |||
| func (s *Manager) FindByIDContexted(ctx context.Context, id PlanID) *Switch { | |||
| s.lock.Lock() | |||
| sw := s.switchs[id] | |||
| if sw != nil { | |||
| s.lock.Unlock() | |||
| return sw | |||
| } | |||
| cb := future.NewSetValue[*Switch]() | |||
| f := &finding{ | |||
| PlanID: id, | |||
| Callback: cb, | |||
| } | |||
| s.findings = append(s.findings, f) | |||
| s.lock.Unlock() | |||
| sw, _ = cb.WaitValue(ctx) | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| s.findings = lo2.Remove(s.findings, f) | |||
| return sw | |||
| } | |||
| @@ -0,0 +1,78 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "golang.org/x/sync/semaphore" | |||
| ) | |||
| type ChunkedSplit struct { | |||
| Input *ioswitch.StreamVar `json:"input"` | |||
| Outputs []*ioswitch.StreamVar `json:"outputs"` | |||
| ChunkSize int `json:"chunkSize"` | |||
| PaddingZeros bool `json:"paddingZeros"` | |||
| } | |||
| func (o *ChunkedSplit) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer o.Input.Stream.Close() | |||
| outputs := io2.ChunkedSplit(o.Input.Stream, o.ChunkSize, len(o.Outputs), io2.ChunkedSplitOption{ | |||
| PaddingZeros: o.PaddingZeros, | |||
| }) | |||
| sem := semaphore.NewWeighted(int64(len(outputs))) | |||
| for i := range outputs { | |||
| sem.Acquire(ctx, 1) | |||
| o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { | |||
| sem.Release(1) | |||
| }) | |||
| } | |||
| ioswitch.PutArrayVars(sw, o.Outputs) | |||
| return sem.Acquire(ctx, int64(len(outputs))) | |||
| } | |||
| type ChunkedJoin struct { | |||
| Inputs []*ioswitch.StreamVar `json:"inputs"` | |||
| Output *ioswitch.StreamVar `json:"output"` | |||
| ChunkSize int `json:"chunkSize"` | |||
| } | |||
| func (o *ChunkedJoin) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := ioswitch.BindArrayVars(sw, ctx, o.Inputs) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| var strReaders []io.Reader | |||
| for _, s := range o.Inputs { | |||
| strReaders = append(strReaders, s.Stream) | |||
| } | |||
| defer func() { | |||
| for _, str := range o.Inputs { | |||
| str.Stream.Close() | |||
| } | |||
| }() | |||
| fut := future.NewSetVoid() | |||
| o.Output.Stream = io2.AfterReadClosedOnce(io2.ChunkedJoin(strReaders, o.ChunkSize), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }) | |||
| sw.PutVars(o.Output) | |||
| return fut.Wait(ctx) | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*ChunkedSplit)(nil)) | |||
| OpUnion.AddT((*ChunkedJoin)(nil)) | |||
| } | |||
| @@ -1,49 +0,0 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type ChunkedJoin struct { | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputID ioswitch.StreamID `json:"outputID"` | |||
| ChunkSize int `json:"chunkSize"` | |||
| } | |||
| func (o *ChunkedJoin) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| strs, err := sw.WaitStreams(planID, o.InputIDs...) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| var strReaders []io.Reader | |||
| for _, s := range strs { | |||
| strReaders = append(strReaders, s.Stream) | |||
| } | |||
| defer func() { | |||
| for _, str := range strs { | |||
| str.Stream.Close() | |||
| } | |||
| }() | |||
| fut := future.NewSetVoid() | |||
| sw.StreamReady(planID, | |||
| ioswitch.NewStream(o.OutputID, | |||
| io2.AfterReadClosedOnce(io2.ChunkedJoin(strReaders, o.ChunkSize), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }), | |||
| ), | |||
| ) | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*ChunkedJoin)(nil)) | |||
| } | |||
| @@ -1,49 +0,0 @@ | |||
| package ops | |||
| import ( | |||
| "io" | |||
| "sync" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type ChunkedSplit struct { | |||
| InputID ioswitch.StreamID `json:"inputID"` | |||
| OutputIDs []ioswitch.StreamID `json:"outputIDs"` | |||
| ChunkSize int `json:"chunkSize"` | |||
| StreamCount int `json:"streamCount"` | |||
| PaddingZeros bool `json:"paddingZeros"` | |||
| } | |||
| func (o *ChunkedSplit) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| str, err := sw.WaitStreams(planID, o.InputID) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer str[0].Stream.Close() | |||
| wg := sync.WaitGroup{} | |||
| outputs := io2.ChunkedSplit(str[0].Stream, o.ChunkSize, o.StreamCount, io2.ChunkedSplitOption{ | |||
| PaddingZeros: o.PaddingZeros, | |||
| }) | |||
| for i := range outputs { | |||
| wg.Add(1) | |||
| sw.StreamReady(planID, ioswitch.NewStream( | |||
| o.OutputIDs[i], | |||
| io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { | |||
| wg.Done() | |||
| }), | |||
| )) | |||
| } | |||
| wg.Wait() | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*ChunkedSplit)(nil)) | |||
| } | |||
| @@ -1,43 +1,64 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| "sync" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "golang.org/x/sync/semaphore" | |||
| ) | |||
| type Clone struct { | |||
| InputID ioswitch.StreamID `json:"inputID"` | |||
| OutputIDs []ioswitch.StreamID `json:"outputIDs"` | |||
| type CloneStream struct { | |||
| Input *ioswitch.StreamVar `json:"input"` | |||
| Outputs []*ioswitch.StreamVar `json:"outputs"` | |||
| } | |||
| func (o *Clone) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| strs, err := sw.WaitStreams(planID, o.InputID) | |||
| func (o *CloneStream) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer strs[0].Stream.Close() | |||
| defer o.Input.Stream.Close() | |||
| wg := sync.WaitGroup{} | |||
| cloned := io2.Clone(strs[0].Stream, len(o.OutputIDs)) | |||
| cloned := io2.Clone(o.Input.Stream, len(o.Outputs)) | |||
| sem := semaphore.NewWeighted(int64(len(o.Outputs))) | |||
| for i, s := range cloned { | |||
| wg.Add(1) | |||
| sw.StreamReady(planID, | |||
| ioswitch.NewStream(o.OutputIDs[i], | |||
| io2.AfterReadClosedOnce(s, func(closer io.ReadCloser) { | |||
| wg.Done() | |||
| }), | |||
| ), | |||
| ) | |||
| sem.Acquire(ctx, 1) | |||
| o.Outputs[i].Stream = io2.AfterReadClosedOnce(s, func(closer io.ReadCloser) { | |||
| sem.Release(1) | |||
| }) | |||
| } | |||
| ioswitch.PutArrayVars(sw, o.Outputs) | |||
| return sem.Acquire(ctx, int64(len(o.Outputs))) | |||
| } | |||
| type CloneVar struct { | |||
| Raw ioswitch.Var `json:"raw"` | |||
| Cloneds []ioswitch.Var `json:"cloneds"` | |||
| } | |||
| func (o *CloneVar) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Raw) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| for _, v := range o.Cloneds { | |||
| if err := ioswitch.AssignVar(o.Raw, v); err != nil { | |||
| return fmt.Errorf("clone var: %w", err) | |||
| } | |||
| } | |||
| sw.PutVars(o.Cloneds...) | |||
| wg.Wait() | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*Clone)(nil)) | |||
| OpUnion.AddT((*CloneStream)(nil)) | |||
| OpUnion.AddT((*CloneVar)(nil)) | |||
| } | |||
| @@ -1,102 +1,195 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| "sync" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| "gitlink.org.cn/cloudream/common/utils/sync2" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ec" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "golang.org/x/sync/semaphore" | |||
| ) | |||
| type ECReconstructAny struct { | |||
| EC cdssdk.ECRedundancy `json:"ec"` | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputIDs []ioswitch.StreamID `json:"outputIDs"` | |||
| InputBlockIndexes []int `json:"inputBlockIndexes"` | |||
| OutputBlockIndexes []int `json:"outputBlockIndexes"` | |||
| EC cdssdk.ECRedundancy `json:"ec"` | |||
| Inputs []*ioswitch.StreamVar `json:"inputs"` | |||
| Outputs []*ioswitch.StreamVar `json:"outputs"` | |||
| InputBlockIndexes []int `json:"inputBlockIndexes"` | |||
| OutputBlockIndexes []int `json:"outputBlockIndexes"` | |||
| } | |||
| func (o *ECReconstructAny) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| func (o *ECReconstructAny) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| rs, err := ec.NewStreamRs(o.EC.K, o.EC.N, o.EC.ChunkSize) | |||
| if err != nil { | |||
| return fmt.Errorf("new ec: %w", err) | |||
| } | |||
| strs, err := sw.WaitStreams(planID, o.InputIDs...) | |||
| err = ioswitch.BindArrayVars(sw, ctx, o.Inputs) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer func() { | |||
| for _, s := range strs { | |||
| for _, s := range o.Inputs { | |||
| s.Stream.Close() | |||
| } | |||
| }() | |||
| var inputs []io.Reader | |||
| for _, s := range strs { | |||
| for _, s := range o.Inputs { | |||
| inputs = append(inputs, s.Stream) | |||
| } | |||
| outputs := rs.ReconstructAny(inputs, o.InputBlockIndexes, o.OutputBlockIndexes) | |||
| wg := sync.WaitGroup{} | |||
| for i, id := range o.OutputIDs { | |||
| wg.Add(1) | |||
| sw.StreamReady(planID, ioswitch.NewStream(id, io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { | |||
| wg.Done() | |||
| }))) | |||
| sem := semaphore.NewWeighted(int64(len(o.Outputs))) | |||
| for i := range o.Outputs { | |||
| sem.Acquire(ctx, 1) | |||
| o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { | |||
| sem.Release(1) | |||
| }) | |||
| } | |||
| wg.Wait() | |||
| ioswitch.PutArrayVars(sw, o.Outputs) | |||
| return nil | |||
| return sem.Acquire(ctx, int64(len(o.Outputs))) | |||
| } | |||
| type ECReconstruct struct { | |||
| EC cdssdk.ECRedundancy `json:"ec"` | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputIDs []ioswitch.StreamID `json:"outputIDs"` | |||
| InputBlockIndexes []int `json:"inputBlockIndexes"` | |||
| EC cdssdk.ECRedundancy `json:"ec"` | |||
| Inputs []*ioswitch.StreamVar `json:"inputs"` | |||
| Outputs []*ioswitch.StreamVar `json:"outputs"` | |||
| InputBlockIndexes []int `json:"inputBlockIndexes"` | |||
| } | |||
| func (o *ECReconstruct) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| func (o *ECReconstruct) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| rs, err := ec.NewStreamRs(o.EC.K, o.EC.N, o.EC.ChunkSize) | |||
| if err != nil { | |||
| return fmt.Errorf("new ec: %w", err) | |||
| } | |||
| strs, err := sw.WaitStreams(planID, o.InputIDs...) | |||
| err = ioswitch.BindArrayVars(sw, ctx, o.Inputs) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer func() { | |||
| for _, s := range strs { | |||
| for _, s := range o.Inputs { | |||
| s.Stream.Close() | |||
| } | |||
| }() | |||
| var inputs []io.Reader | |||
| for _, s := range strs { | |||
| for _, s := range o.Inputs { | |||
| inputs = append(inputs, s.Stream) | |||
| } | |||
| outputs := rs.ReconstructData(inputs, o.InputBlockIndexes) | |||
| wg := sync.WaitGroup{} | |||
| for i, id := range o.OutputIDs { | |||
| wg.Add(1) | |||
| sw.StreamReady(planID, ioswitch.NewStream(id, io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { | |||
| wg.Done() | |||
| }))) | |||
| sem := semaphore.NewWeighted(int64(len(o.Outputs))) | |||
| for i := range o.Outputs { | |||
| sem.Acquire(ctx, 1) | |||
| o.Outputs[i].Stream = io2.AfterReadClosedOnce(outputs[i], func(closer io.ReadCloser) { | |||
| sem.Release(1) | |||
| }) | |||
| } | |||
| ioswitch.PutArrayVars(sw, o.Outputs) | |||
| return sem.Acquire(ctx, int64(len(o.Outputs))) | |||
| } | |||
| type ECMultiply struct { | |||
| Inputs []*ioswitch.StreamVar `json:"inputs"` | |||
| Coef [][]byte `json:"coef"` | |||
| Outputs []*ioswitch.StreamVar `json:"outputs"` | |||
| ChunkSize int64 `json:"chunkSize"` | |||
| } | |||
| func (o *ECMultiply) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := ioswitch.BindArrayVars(sw, ctx, o.Inputs) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer func() { | |||
| for _, s := range o.Inputs { | |||
| s.Stream.Close() | |||
| } | |||
| }() | |||
| outputVars := make([]*ioswitch.StreamVar, len(o.Outputs)) | |||
| outputWrs := make([]*io.PipeWriter, len(o.Outputs)) | |||
| for i := range o.Outputs { | |||
| rd, wr := io.Pipe() | |||
| outputVars[i] = &ioswitch.StreamVar{ | |||
| Stream: rd, | |||
| } | |||
| outputWrs[i] = wr | |||
| } | |||
| fut := future.NewSetVoid() | |||
| go func() { | |||
| mul := ec.GaloisMultiplier().BuildGalois() | |||
| inputChunks := make([][]byte, len(o.Inputs)) | |||
| for i := range o.Inputs { | |||
| inputChunks[i] = make([]byte, o.ChunkSize) | |||
| } | |||
| outputChunks := make([][]byte, len(o.Outputs)) | |||
| for i := range o.Outputs { | |||
| outputChunks[i] = make([]byte, o.ChunkSize) | |||
| } | |||
| for { | |||
| err := sync2.ParallelDo(o.Inputs, func(s *ioswitch.StreamVar, i int) error { | |||
| _, err := io.ReadFull(s.Stream, inputChunks[i]) | |||
| return err | |||
| }) | |||
| if err == io.EOF { | |||
| fut.SetVoid() | |||
| return | |||
| } | |||
| if err != nil { | |||
| fut.SetError(err) | |||
| return | |||
| } | |||
| err = mul.Multiply(o.Coef, inputChunks, outputChunks) | |||
| if err != nil { | |||
| fut.SetError(err) | |||
| return | |||
| } | |||
| for i := range o.Outputs { | |||
| err := io2.WriteAll(outputWrs[i], outputChunks[i]) | |||
| if err != nil { | |||
| fut.SetError(err) | |||
| return | |||
| } | |||
| } | |||
| } | |||
| }() | |||
| ioswitch.PutArrayVars(sw, outputVars) | |||
| err = fut.Wait(ctx) | |||
| if err != nil { | |||
| for _, wr := range outputWrs { | |||
| wr.CloseWithError(err) | |||
| } | |||
| return err | |||
| } | |||
| wg.Wait() | |||
| for _, wr := range outputWrs { | |||
| wr.Close() | |||
| } | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*ECReconstructAny)(nil)) | |||
| OpUnion.AddT((*ECReconstruct)(nil)) | |||
| OpUnion.AddT((*ECMultiply)(nil)) | |||
| } | |||
| @@ -13,16 +13,16 @@ import ( | |||
| ) | |||
| type FileWrite struct { | |||
| InputID ioswitch.StreamID `json:"inputID"` | |||
| FilePath string `json:"filePath"` | |||
| Input *ioswitch.StreamVar `json:"input"` | |||
| FilePath string `json:"filePath"` | |||
| } | |||
| func (o *FileWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| str, err := sw.WaitStreams(planID, o.InputID) | |||
| func (o *FileWrite) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer str[0].Stream.Close() | |||
| defer o.Input.Stream.Close() | |||
| dir := path.Dir(o.FilePath) | |||
| err = os.MkdirAll(dir, 0777) | |||
| @@ -36,7 +36,7 @@ func (o *FileWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| } | |||
| defer file.Close() | |||
| _, err = io.Copy(file, str[0].Stream) | |||
| _, err = io.Copy(file, o.Input.Stream) | |||
| if err != nil { | |||
| return fmt.Errorf("copying data to file: %w", err) | |||
| } | |||
| @@ -45,22 +45,22 @@ func (o *FileWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| } | |||
| type FileRead struct { | |||
| OutputID ioswitch.StreamID `json:"outputID"` | |||
| FilePath string `json:"filePath"` | |||
| Output *ioswitch.StreamVar `json:"output"` | |||
| FilePath string `json:"filePath"` | |||
| } | |||
| func (o *FileRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| func (o *FileRead) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| file, err := os.Open(o.FilePath) | |||
| if err != nil { | |||
| return fmt.Errorf("opening file: %w", err) | |||
| } | |||
| fut := future.NewSetVoid() | |||
| sw.StreamReady(planID, ioswitch.NewStream(o.OutputID, io2.AfterReadClosed(file, func(closer io.ReadCloser) { | |||
| o.Output.Stream = io2.AfterReadClosed(file, func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }))) | |||
| fut.Wait(context.TODO()) | |||
| }) | |||
| sw.PutVars(o.Output) | |||
| fut.Wait(ctx) | |||
| return nil | |||
| } | |||
| @@ -13,32 +13,27 @@ import ( | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type GRPCSend struct { | |||
| LocalID ioswitch.StreamID `json:"localID"` | |||
| RemoteID ioswitch.StreamID `json:"remoteID"` | |||
| Node cdssdk.Node `json:"node"` | |||
| type SendStream struct { | |||
| Stream *ioswitch.StreamVar `json:"stream"` | |||
| Node cdssdk.Node `json:"node"` | |||
| } | |||
| func (o *GRPCSend) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| logger. | |||
| WithField("LocalID", o.LocalID). | |||
| WithField("RemoteID", o.RemoteID). | |||
| Debugf("grpc send") | |||
| strs, err := sw.WaitStreams(planID, o.LocalID) | |||
| func (o *SendStream) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Stream) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer strs[0].Stream.Close() | |||
| defer o.Stream.Stream.Close() | |||
| // TODO 根据客户端地址选择IP和端口 | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(o.Node.ExternalIP, o.Node.ExternalGRPCPort) | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node)) | |||
| if err != nil { | |||
| return fmt.Errorf("new agent rpc client: %w", err) | |||
| } | |||
| defer stgglb.AgentRPCPool.Release(agtCli) | |||
| err = agtCli.SendStream(planID, o.RemoteID, strs[0].Stream) | |||
| logger.Debugf("sending stream %v to node %v", o.Stream.ID, o.Node) | |||
| err = agtCli.SendStream(ctx, sw.Plan().ID, o.Stream.ID, o.Stream.Stream) | |||
| if err != nil { | |||
| return fmt.Errorf("sending stream: %w", err) | |||
| } | |||
| @@ -46,39 +41,88 @@ func (o *GRPCSend) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| return nil | |||
| } | |||
| type GRPCFetch struct { | |||
| RemoteID ioswitch.StreamID `json:"remoteID"` | |||
| LocalID ioswitch.StreamID `json:"localID"` | |||
| Node cdssdk.Node `json:"node"` | |||
| type GetStream struct { | |||
| Stream *ioswitch.StreamVar `json:"stream"` | |||
| Node cdssdk.Node `json:"node"` | |||
| } | |||
| func (o *GRPCFetch) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| // TODO 根据客户端地址选择IP和端口 | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(o.Node.ExternalIP, o.Node.ExternalGRPCPort) | |||
| func (o *GetStream) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node)) | |||
| if err != nil { | |||
| return fmt.Errorf("new agent rpc client: %w", err) | |||
| } | |||
| defer stgglb.AgentRPCPool.Release(agtCli) | |||
| str, err := agtCli.FetchStream(planID, o.RemoteID) | |||
| logger.Debugf("getting stream %v from node %v", o.Stream.ID, o.Node) | |||
| str, err := agtCli.GetStream(sw.Plan().ID, o.Stream.ID) | |||
| if err != nil { | |||
| return fmt.Errorf("fetching stream: %w", err) | |||
| return fmt.Errorf("getting stream: %w", err) | |||
| } | |||
| fut := future.NewSetVoid() | |||
| str = io2.AfterReadClosedOnce(str, func(closer io.ReadCloser) { | |||
| o.Stream.Stream = io2.AfterReadClosedOnce(str, func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }) | |||
| sw.PutVars(o.Stream) | |||
| sw.StreamReady(planID, ioswitch.NewStream(o.LocalID, str)) | |||
| return fut.Wait(ctx) | |||
| } | |||
| // TODO | |||
| fut.Wait(context.TODO()) | |||
| type SendVar struct { | |||
| Var ioswitch.Var `json:"var"` | |||
| Node cdssdk.Node `json:"node"` | |||
| } | |||
| func (o *SendVar) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Var) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return err | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node)) | |||
| if err != nil { | |||
| return fmt.Errorf("new agent rpc client: %w", err) | |||
| } | |||
| defer stgglb.AgentRPCPool.Release(agtCli) | |||
| logger.Debugf("sending var %v to node %v", o.Var.GetID(), o.Node) | |||
| err = agtCli.SendVar(ctx, sw.Plan().ID, o.Var) | |||
| if err != nil { | |||
| return fmt.Errorf("sending var: %w", err) | |||
| } | |||
| return nil | |||
| } | |||
| type GetVar struct { | |||
| Var ioswitch.Var `json:"var"` | |||
| Node cdssdk.Node `json:"node"` | |||
| } | |||
| func (o *GetVar) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&o.Node)) | |||
| if err != nil { | |||
| return fmt.Errorf("new agent rpc client: %w", err) | |||
| } | |||
| defer stgglb.AgentRPCPool.Release(agtCli) | |||
| logger.Debugf("getting var %v from node %v", o.Var.GetID(), o.Node) | |||
| v2, err := agtCli.GetVar(ctx, sw.Plan().ID, o.Var) | |||
| if err != nil { | |||
| return fmt.Errorf("getting var: %w", err) | |||
| } | |||
| o.Var = v2 | |||
| sw.PutVars(o.Var) | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*GRPCSend)(nil)) | |||
| OpUnion.AddT((*GRPCFetch)(nil)) | |||
| OpUnion.AddT((*SendStream)(nil)) | |||
| OpUnion.AddT((*GetStream)(nil)) | |||
| OpUnion.AddT((*SendVar)(nil)) | |||
| OpUnion.AddT((*GetVar)(nil)) | |||
| } | |||
| @@ -14,15 +14,14 @@ import ( | |||
| ) | |||
| type IPFSRead struct { | |||
| Output ioswitch.StreamID `json:"output"` | |||
| FileHash string `json:"fileHash"` | |||
| Option ipfs.ReadOption `json:"option"` | |||
| Output *ioswitch.StreamVar `json:"output"` | |||
| FileHash string `json:"fileHash"` | |||
| Option ipfs.ReadOption `json:"option"` | |||
| } | |||
| func (o *IPFSRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| func (o *IPFSRead) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| logger. | |||
| WithField("FileHash", o.FileHash). | |||
| WithField("Output", o.Output). | |||
| Debugf("ipfs read op") | |||
| defer logger.Debugf("ipfs read op finished") | |||
| @@ -36,28 +35,26 @@ func (o *IPFSRead) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| if err != nil { | |||
| return fmt.Errorf("reading ipfs: %w", err) | |||
| } | |||
| defer file.Close() | |||
| fut := future.NewSetVoid() | |||
| file = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) { | |||
| o.Output.Stream = io2.AfterReadClosedOnce(file, func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }) | |||
| sw.PutVars(o.Output) | |||
| sw.StreamReady(planID, ioswitch.NewStream(o.Output, file)) | |||
| // TODO context | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| return fut.Wait(ctx) | |||
| } | |||
| type IPFSWrite struct { | |||
| Input ioswitch.StreamID `json:"input"` | |||
| ResultKey string `json:"resultKey"` | |||
| Input *ioswitch.StreamVar `json:"input"` | |||
| FileHash *ioswitch.StringVar `json:"fileHash"` | |||
| } | |||
| func (o *IPFSWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| func (o *IPFSWrite) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| logger. | |||
| WithField("ResultKey", o.ResultKey). | |||
| WithField("Input", o.Input). | |||
| WithField("Input", o.Input.ID). | |||
| WithField("FileHashVar", o.FileHash.ID). | |||
| Debugf("ipfs write op") | |||
| ipfsCli, err := stgglb.IPFSPool.Acquire() | |||
| @@ -66,23 +63,18 @@ func (o *IPFSWrite) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| } | |||
| defer stgglb.IPFSPool.Release(ipfsCli) | |||
| strs, err := sw.WaitStreams(planID, o.Input) | |||
| err = sw.BindVars(ctx, o.Input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer strs[0].Stream.Close() | |||
| defer o.Input.Stream.Close() | |||
| fileHash, err := ipfsCli.CreateFile(strs[0].Stream) | |||
| o.FileHash.Value, err = ipfsCli.CreateFile(o.Input.Stream) | |||
| if err != nil { | |||
| return fmt.Errorf("creating ipfs file: %w", err) | |||
| } | |||
| if o.ResultKey != "" { | |||
| sw.AddResultValue(planID, ioswitch.ResultKV{ | |||
| Key: o.ResultKey, | |||
| Value: fileHash, | |||
| }) | |||
| } | |||
| sw.PutVars(o.FileHash) | |||
| return nil | |||
| } | |||
| @@ -10,38 +10,34 @@ import ( | |||
| ) | |||
| type Join struct { | |||
| InputIDs []ioswitch.StreamID `json:"inputIDs"` | |||
| OutputID ioswitch.StreamID `json:"outputID"` | |||
| Length int64 `json:"length"` | |||
| Inputs []*ioswitch.StreamVar `json:"inputs"` | |||
| Output *ioswitch.StreamVar `json:"output"` | |||
| Length int64 `json:"length"` | |||
| } | |||
| func (o *Join) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| strs, err := sw.WaitStreams(planID, o.InputIDs...) | |||
| func (o *Join) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := ioswitch.BindArrayVars(sw, ctx, o.Inputs) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| var strReaders []io.Reader | |||
| for _, s := range strs { | |||
| for _, s := range o.Inputs { | |||
| strReaders = append(strReaders, s.Stream) | |||
| } | |||
| defer func() { | |||
| for _, str := range strs { | |||
| for _, str := range o.Inputs { | |||
| str.Stream.Close() | |||
| } | |||
| }() | |||
| fut := future.NewSetVoid() | |||
| sw.StreamReady(planID, | |||
| ioswitch.NewStream(o.OutputID, | |||
| io2.AfterReadClosedOnce(io2.Length(io2.Join(strReaders), o.Length), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }), | |||
| ), | |||
| ) | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| o.Output.Stream = io2.AfterReadClosedOnce(io2.Length(io2.Join(strReaders), o.Length), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }) | |||
| sw.PutVars(o.Output) | |||
| return fut.Wait(ctx) | |||
| } | |||
| func init() { | |||
| @@ -10,29 +10,25 @@ import ( | |||
| ) | |||
| type Length struct { | |||
| InputID ioswitch.StreamID `json:"inputID"` | |||
| OutputID ioswitch.StreamID `json:"outputID"` | |||
| Length int64 `json:"length"` | |||
| Input *ioswitch.StreamVar `json:"input"` | |||
| Output *ioswitch.StreamVar `json:"output"` | |||
| Length int64 `json:"length"` | |||
| } | |||
| func (o *Length) Execute(sw *ioswitch.Switch, planID ioswitch.PlanID) error { | |||
| strs, err := sw.WaitStreams(planID, o.InputID) | |||
| func (o *Length) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Input) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer strs[0].Stream.Close() | |||
| defer o.Input.Stream.Close() | |||
| fut := future.NewSetVoid() | |||
| sw.StreamReady(planID, | |||
| ioswitch.NewStream(o.OutputID, | |||
| io2.AfterReadClosedOnce(io2.Length(strs[0].Stream, o.Length), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }), | |||
| ), | |||
| ) | |||
| o.Output.Stream = io2.AfterReadClosedOnce(io2.Length(o.Input.Stream, o.Length), func(closer io.ReadCloser) { | |||
| fut.SetVoid() | |||
| }) | |||
| sw.PutVars(o.Output) | |||
| fut.Wait(context.TODO()) | |||
| return nil | |||
| return fut.Wait(ctx) | |||
| } | |||
| func init() { | |||
| @@ -0,0 +1,30 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "sync" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type Store struct { | |||
| Var ioswitch.Var | |||
| Key string | |||
| Store *sync.Map | |||
| } | |||
| func (o *Store) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Var) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| switch v := o.Var.(type) { | |||
| case *ioswitch.IntVar: | |||
| o.Store.Store(o.Key, v.Value) | |||
| case *ioswitch.StringVar: | |||
| o.Store.Store(o.Key, v.Value) | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,135 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "io" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type OnStreamBegin struct { | |||
| Raw *ioswitch.StreamVar `json:"raw"` | |||
| New *ioswitch.StreamVar `json:"new"` | |||
| Signal *ioswitch.SignalVar `json:"signal"` | |||
| } | |||
| func (o *OnStreamBegin) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Raw) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| o.New.Stream = o.Raw.Stream | |||
| sw.PutVars(o.New, o.Signal) | |||
| return nil | |||
| } | |||
| type OnStreamEnd struct { | |||
| Raw *ioswitch.StreamVar `json:"raw"` | |||
| New *ioswitch.StreamVar `json:"new"` | |||
| Signal *ioswitch.SignalVar `json:"signal"` | |||
| } | |||
| type onStreamEnd struct { | |||
| inner io.ReadCloser | |||
| callback *future.SetVoidFuture | |||
| } | |||
| func (o *onStreamEnd) Read(p []byte) (n int, err error) { | |||
| n, err = o.inner.Read(p) | |||
| if err == io.EOF { | |||
| o.callback.SetVoid() | |||
| } else if err != nil { | |||
| o.callback.SetError(err) | |||
| } | |||
| return n, err | |||
| } | |||
| func (o *onStreamEnd) Close() error { | |||
| o.callback.SetError(fmt.Errorf("stream closed early")) | |||
| return o.inner.Close() | |||
| } | |||
| func (o *OnStreamEnd) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, o.Raw) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| cb := future.NewSetVoid() | |||
| o.New.Stream = &onStreamEnd{ | |||
| inner: o.Raw.Stream, | |||
| callback: cb, | |||
| } | |||
| sw.PutVars(o.New) | |||
| err = cb.Wait(ctx) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| sw.PutVars(o.Signal) | |||
| return nil | |||
| } | |||
| type HoldUntil struct { | |||
| Waits []*ioswitch.SignalVar `json:"waits"` | |||
| Holds []ioswitch.Var `json:"holds"` | |||
| Emits []ioswitch.Var `json:"emits"` | |||
| } | |||
| func (w *HoldUntil) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, w.Holds...) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| err = ioswitch.BindArrayVars(sw, ctx, w.Waits) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| sw.PutVars(w.Emits...) | |||
| return nil | |||
| } | |||
| type HangUntil struct { | |||
| Waits []*ioswitch.SignalVar `json:"waits"` | |||
| Op ioswitch.Op `json:"op"` | |||
| } | |||
| func (h *HangUntil) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := ioswitch.BindArrayVars(sw, ctx, h.Waits) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| return h.Op.Execute(ctx, sw) | |||
| } | |||
| type Broadcast struct { | |||
| Source *ioswitch.SignalVar `json:"source"` | |||
| Targets []*ioswitch.SignalVar `json:"targets"` | |||
| } | |||
| func (b *Broadcast) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| err := sw.BindVars(ctx, b.Source) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| ioswitch.PutArrayVars(sw, b.Targets) | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*OnStreamBegin)(nil)) | |||
| OpUnion.AddT((*OnStreamEnd)(nil)) | |||
| OpUnion.AddT((*HoldUntil)(nil)) | |||
| OpUnion.AddT((*HangUntil)(nil)) | |||
| OpUnion.AddT((*Broadcast)(nil)) | |||
| } | |||
| @@ -0,0 +1,20 @@ | |||
| package ops | |||
| import ( | |||
| "context" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type ConstVar struct { | |||
| Var *ioswitch.StringVar `json:"var"` | |||
| } | |||
| func (o *ConstVar) Execute(ctx context.Context, sw *ioswitch.Switch) error { | |||
| sw.PutVars(o.Var) | |||
| return nil | |||
| } | |||
| func init() { | |||
| OpUnion.AddT((*ConstVar)(nil)) | |||
| } | |||
| @@ -0,0 +1,457 @@ | |||
| package plans | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/ipfs" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops" | |||
| ) | |||
| type AgentPlanBuilder struct { | |||
| blder *PlanBuilder | |||
| node cdssdk.Node | |||
| ops []ioswitch.Op | |||
| } | |||
| func (b *AgentPlanBuilder) IPFSRead(fileHash string, opts ...ipfs.ReadOption) *AgentStreamVar { | |||
| opt := ipfs.ReadOption{ | |||
| Offset: 0, | |||
| Length: -1, | |||
| } | |||
| if len(opts) > 0 { | |||
| opt = opts[0] | |||
| } | |||
| str := &AgentStreamVar{ | |||
| owner: b, | |||
| v: b.blder.newStreamVar(), | |||
| } | |||
| b.ops = append(b.ops, &ops.IPFSRead{ | |||
| Output: str.v, | |||
| FileHash: fileHash, | |||
| Option: opt, | |||
| }) | |||
| return str | |||
| } | |||
| func (b *AgentPlanBuilder) FileRead(filePath string) *AgentStreamVar { | |||
| agtStr := &AgentStreamVar{ | |||
| owner: b, | |||
| v: b.blder.newStreamVar(), | |||
| } | |||
| b.ops = append(b.ops, &ops.FileRead{ | |||
| Output: agtStr.v, | |||
| FilePath: filePath, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) ECReconstructAny(ec cdssdk.ECRedundancy, inBlockIndexes []int, outBlockIndexes []int, streams []*AgentStreamVar) []*AgentStreamVar { | |||
| var strs []*AgentStreamVar | |||
| var inputStrVars []*ioswitch.StreamVar | |||
| for _, str := range streams { | |||
| inputStrVars = append(inputStrVars, str.v) | |||
| } | |||
| var outputStrVars []*ioswitch.StreamVar | |||
| for i := 0; i < len(outBlockIndexes); i++ { | |||
| v := b.blder.newStreamVar() | |||
| strs = append(strs, &AgentStreamVar{ | |||
| owner: b, | |||
| v: v, | |||
| }) | |||
| outputStrVars = append(outputStrVars, v) | |||
| } | |||
| b.ops = append(b.ops, &ops.ECReconstructAny{ | |||
| EC: ec, | |||
| Inputs: inputStrVars, | |||
| Outputs: outputStrVars, | |||
| InputBlockIndexes: inBlockIndexes, | |||
| OutputBlockIndexes: outBlockIndexes, | |||
| }) | |||
| return strs | |||
| } | |||
| func (b *AgentPlanBuilder) ECReconstruct(ec cdssdk.ECRedundancy, inBlockIndexes []int, streams []*AgentStreamVar) []*AgentStreamVar { | |||
| var strs []*AgentStreamVar | |||
| var inputStrVars []*ioswitch.StreamVar | |||
| for _, str := range streams { | |||
| inputStrVars = append(inputStrVars, str.v) | |||
| } | |||
| var outputStrVars []*ioswitch.StreamVar | |||
| for i := 0; i < ec.K; i++ { | |||
| v := b.blder.newStreamVar() | |||
| strs = append(strs, &AgentStreamVar{ | |||
| owner: b, | |||
| v: v, | |||
| }) | |||
| outputStrVars = append(outputStrVars, v) | |||
| } | |||
| b.ops = append(b.ops, &ops.ECReconstruct{ | |||
| EC: ec, | |||
| Inputs: inputStrVars, | |||
| Outputs: outputStrVars, | |||
| InputBlockIndexes: inBlockIndexes, | |||
| }) | |||
| return strs | |||
| } | |||
| // 进行galois矩阵乘法运算,ecof * inputs | |||
| func (b *AgentPlanBuilder) ECMultiply(coef [][]byte, inputs []*AgentStreamVar, chunkSize int64) []*AgentStreamVar { | |||
| outs := make([]*AgentStreamVar, len(coef)) | |||
| outVars := make([]*ioswitch.StreamVar, len(coef)) | |||
| for i := 0; i < len(outs); i++ { | |||
| sv := b.blder.newStreamVar() | |||
| outs[i] = &AgentStreamVar{ | |||
| owner: b, | |||
| v: sv, | |||
| } | |||
| outVars[i] = sv | |||
| } | |||
| ins := make([]*ioswitch.StreamVar, len(inputs)) | |||
| for i := 0; i < len(inputs); i++ { | |||
| ins[i] = inputs[i].v | |||
| } | |||
| b.ops = append(b.ops, &ops.ECMultiply{ | |||
| Inputs: ins, | |||
| Outputs: outVars, | |||
| Coef: coef, | |||
| ChunkSize: chunkSize, | |||
| }) | |||
| return outs | |||
| } | |||
| func (b *AgentPlanBuilder) Join(length int64, streams []*AgentStreamVar) *AgentStreamVar { | |||
| agtStr := &AgentStreamVar{ | |||
| owner: b, | |||
| v: b.blder.newStreamVar(), | |||
| } | |||
| var inputStrVars []*ioswitch.StreamVar | |||
| for _, str := range streams { | |||
| inputStrVars = append(inputStrVars, str.v) | |||
| } | |||
| b.ops = append(b.ops, &ops.Join{ | |||
| Inputs: inputStrVars, | |||
| Output: agtStr.v, | |||
| Length: length, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) ChunkedJoin(chunkSize int, streams []*AgentStreamVar) *AgentStreamVar { | |||
| agtStr := &AgentStreamVar{ | |||
| owner: b, | |||
| v: b.blder.newStreamVar(), | |||
| } | |||
| var inputStrVars []*ioswitch.StreamVar | |||
| for _, str := range streams { | |||
| inputStrVars = append(inputStrVars, str.v) | |||
| } | |||
| b.ops = append(b.ops, &ops.ChunkedJoin{ | |||
| Inputs: inputStrVars, | |||
| Output: agtStr.v, | |||
| ChunkSize: chunkSize, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) NewString(str string) *AgentStringVar { | |||
| v := b.blder.newStringVar() | |||
| v.Value = str | |||
| return &AgentStringVar{ | |||
| owner: b, | |||
| v: v, | |||
| } | |||
| } | |||
| func (b *AgentPlanBuilder) NewSignal() *AgentSignalVar { | |||
| v := b.blder.newSignalVar() | |||
| return &AgentSignalVar{ | |||
| owner: b, | |||
| v: v, | |||
| } | |||
| } | |||
| // 字节流变量 | |||
| type AgentStreamVar struct { | |||
| owner *AgentPlanBuilder | |||
| v *ioswitch.StreamVar | |||
| } | |||
| func (s *AgentStreamVar) IPFSWrite() *AgentStringVar { | |||
| v := s.owner.blder.newStringVar() | |||
| s.owner.ops = append(s.owner.ops, &ops.IPFSWrite{ | |||
| Input: s.v, | |||
| FileHash: v, | |||
| }) | |||
| return &AgentStringVar{ | |||
| owner: s.owner, | |||
| v: v, | |||
| } | |||
| } | |||
| func (b *AgentStreamVar) FileWrite(filePath string) { | |||
| b.owner.ops = append(b.owner.ops, &ops.FileWrite{ | |||
| Input: b.v, | |||
| FilePath: filePath, | |||
| }) | |||
| } | |||
| func (b *AgentStreamVar) ChunkedSplit(chunkSize int, streamCount int, paddingZeros bool) []*AgentStreamVar { | |||
| var strs []*AgentStreamVar | |||
| var outputStrVars []*ioswitch.StreamVar | |||
| for i := 0; i < streamCount; i++ { | |||
| v := b.owner.blder.newStreamVar() | |||
| strs = append(strs, &AgentStreamVar{ | |||
| owner: b.owner, | |||
| v: v, | |||
| }) | |||
| outputStrVars = append(outputStrVars, v) | |||
| } | |||
| b.owner.ops = append(b.owner.ops, &ops.ChunkedSplit{ | |||
| Input: b.v, | |||
| Outputs: outputStrVars, | |||
| ChunkSize: chunkSize, | |||
| PaddingZeros: paddingZeros, | |||
| }) | |||
| return strs | |||
| } | |||
| func (s *AgentStreamVar) Length(length int64) *AgentStreamVar { | |||
| agtStr := &AgentStreamVar{ | |||
| owner: s.owner, | |||
| v: s.owner.blder.newStreamVar(), | |||
| } | |||
| s.owner.ops = append(s.owner.ops, &ops.Length{ | |||
| Input: s.v, | |||
| Output: agtStr.v, | |||
| Length: length, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (s *AgentStreamVar) To(node cdssdk.Node) *AgentStreamVar { | |||
| s.owner.ops = append(s.owner.ops, &ops.SendStream{Stream: s.v, Node: node}) | |||
| s.owner = s.owner.blder.AtAgent(node) | |||
| return s | |||
| } | |||
| func (s *AgentStreamVar) ToExecutor() *ExecutorStreamVar { | |||
| s.owner.blder.executorPlan.ops = append(s.owner.blder.executorPlan.ops, &ops.GetStream{ | |||
| Stream: s.v, | |||
| Node: s.owner.node, | |||
| }) | |||
| return &ExecutorStreamVar{ | |||
| blder: s.owner.blder, | |||
| v: s.v, | |||
| } | |||
| } | |||
| func (s *AgentStreamVar) Clone(cnt int) []*AgentStreamVar { | |||
| var strs []*AgentStreamVar | |||
| var outputStrVars []*ioswitch.StreamVar | |||
| for i := 0; i < cnt; i++ { | |||
| v := s.owner.blder.newStreamVar() | |||
| strs = append(strs, &AgentStreamVar{ | |||
| owner: s.owner, | |||
| v: v, | |||
| }) | |||
| outputStrVars = append(outputStrVars, v) | |||
| } | |||
| s.owner.ops = append(s.owner.ops, &ops.CloneStream{ | |||
| Input: s.v, | |||
| Outputs: outputStrVars, | |||
| }) | |||
| return strs | |||
| } | |||
| // 当流产生时发送一个信号 | |||
| func (v *AgentStreamVar) OnBegin() (*AgentStreamVar, *AgentSignalVar) { | |||
| ns := v.owner.blder.newStreamVar() | |||
| s := v.owner.blder.newSignalVar() | |||
| v.owner.ops = append(v.owner.ops, &ops.OnStreamBegin{ | |||
| Raw: v.v, | |||
| New: ns, | |||
| Signal: s, | |||
| }) | |||
| return &AgentStreamVar{owner: v.owner, v: ns}, &AgentSignalVar{owner: v.owner, v: s} | |||
| } | |||
| // 当流结束时发送一个信号 | |||
| func (v *AgentStreamVar) OnEnd() (*AgentStreamVar, *AgentSignalVar) { | |||
| ns := v.owner.blder.newStreamVar() | |||
| s := v.owner.blder.newSignalVar() | |||
| v.owner.ops = append(v.owner.ops, &ops.OnStreamEnd{ | |||
| Raw: v.v, | |||
| New: ns, | |||
| Signal: s, | |||
| }) | |||
| return &AgentStreamVar{owner: v.owner, v: ns}, &AgentSignalVar{owner: v.owner, v: s} | |||
| } | |||
| // 将此流暂存,直到一个信号产生后才释放(一个新流) | |||
| func (v *AgentStreamVar) HoldUntil(wait *AgentSignalVar) *AgentStreamVar { | |||
| nv := v.owner.blder.newStreamVar() | |||
| v.owner.ops = append(v.owner.ops, &ops.HoldUntil{ | |||
| Waits: []*ioswitch.SignalVar{wait.v}, | |||
| Holds: []ioswitch.Var{v.v}, | |||
| Emits: []ioswitch.Var{nv}, | |||
| }) | |||
| return &AgentStreamVar{owner: v.owner, v: nv} | |||
| } | |||
| // 字符串变量 | |||
| type AgentStringVar struct { | |||
| owner *AgentPlanBuilder | |||
| v *ioswitch.StringVar | |||
| } | |||
| func (v *AgentStringVar) To(node cdssdk.Node) *AgentStringVar { | |||
| v.owner.ops = append(v.owner.ops, &ops.SendVar{Var: v.v, Node: node}) | |||
| v.owner = v.owner.blder.AtAgent(node) | |||
| return v | |||
| } | |||
| func (v *AgentStringVar) ToExecutor() *ExecutorStringVar { | |||
| v.owner.blder.executorPlan.ops = append(v.owner.blder.executorPlan.ops, &ops.GetVar{ | |||
| Var: v.v, | |||
| Node: v.owner.node, | |||
| }) | |||
| return &ExecutorStringVar{ | |||
| blder: v.owner.blder, | |||
| v: v.v, | |||
| } | |||
| } | |||
| func (v *AgentStringVar) Clone() (*AgentStringVar, *AgentStringVar) { | |||
| c1 := v.owner.blder.newStringVar() | |||
| c2 := v.owner.blder.newStringVar() | |||
| v.owner.ops = append(v.owner.ops, &ops.CloneVar{ | |||
| Raw: v.v, | |||
| Cloneds: []ioswitch.Var{c1, c2}, | |||
| }) | |||
| return &AgentStringVar{owner: v.owner, v: c1}, &AgentStringVar{owner: v.owner, v: c2} | |||
| } | |||
| // 返回cnt+1个复制后的变量 | |||
| func (v *AgentStringVar) CloneN(cnt int) []*AgentStringVar { | |||
| var strs []*AgentStringVar | |||
| var cloned []ioswitch.Var | |||
| for i := 0; i < cnt+1; i++ { | |||
| c := v.owner.blder.newStringVar() | |||
| strs = append(strs, &AgentStringVar{ | |||
| owner: v.owner, | |||
| v: c, | |||
| }) | |||
| cloned = append(cloned, c) | |||
| } | |||
| v.owner.ops = append(v.owner.ops, &ops.CloneVar{ | |||
| Raw: v.v, | |||
| Cloneds: cloned, | |||
| }) | |||
| return strs | |||
| } | |||
| // 将此变量暂存,直到一个信号产生后才释放(一个新变量) | |||
| func (v *AgentStringVar) HoldUntil(wait *AgentSignalVar) *AgentStringVar { | |||
| nv := v.owner.blder.newStringVar() | |||
| v.owner.ops = append(v.owner.ops, &ops.HoldUntil{ | |||
| Waits: []*ioswitch.SignalVar{wait.v}, | |||
| Holds: []ioswitch.Var{v.v}, | |||
| Emits: []ioswitch.Var{nv}, | |||
| }) | |||
| return &AgentStringVar{owner: v.owner, v: nv} | |||
| } | |||
| type AgentIntVar struct { | |||
| owner *AgentPlanBuilder | |||
| v *ioswitch.IntVar | |||
| } | |||
| // 信号变量 | |||
| type AgentSignalVar struct { | |||
| owner *AgentPlanBuilder | |||
| v *ioswitch.SignalVar | |||
| } | |||
| func (v *AgentSignalVar) To(node cdssdk.Node) *AgentSignalVar { | |||
| v.owner.ops = append(v.owner.ops, &ops.SendVar{Var: v.v, Node: node}) | |||
| v.owner = v.owner.blder.AtAgent(node) | |||
| return v | |||
| } | |||
| func (v *AgentSignalVar) ToExecutor() *ExecutorSignalVar { | |||
| v.owner.blder.executorPlan.ops = append(v.owner.blder.executorPlan.ops, &ops.GetVar{ | |||
| Var: v.v, | |||
| Node: v.owner.node, | |||
| }) | |||
| return &ExecutorSignalVar{ | |||
| blder: v.owner.blder, | |||
| v: v.v, | |||
| } | |||
| } | |||
| // 当这个信号被产生时,同时产生另外n个信号 | |||
| func (v *AgentSignalVar) Broadcast(cnt int) []*AgentSignalVar { | |||
| var ss []*AgentSignalVar | |||
| var targets []*ioswitch.SignalVar | |||
| for i := 0; i < cnt; i++ { | |||
| c := v.owner.blder.newSignalVar() | |||
| ss = append(ss, &AgentSignalVar{ | |||
| owner: v.owner, | |||
| v: c, | |||
| }) | |||
| targets = append(targets, c) | |||
| } | |||
| v.owner.ops = append(v.owner.ops, &ops.Broadcast{ | |||
| Source: v.v, | |||
| Targets: targets, | |||
| }) | |||
| return ss | |||
| } | |||
| @@ -1,276 +0,0 @@ | |||
| package plans | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/ipfs" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops" | |||
| ) | |||
| type AgentPlanBuilder struct { | |||
| owner *PlanBuilder | |||
| node cdssdk.Node | |||
| ops []ioswitch.Op | |||
| } | |||
| type AgentStream struct { | |||
| owner *AgentPlanBuilder | |||
| info *StreamInfo | |||
| } | |||
| func (b *AgentPlanBuilder) Build(planID ioswitch.PlanID) (AgentPlan, error) { | |||
| plan := ioswitch.Plan{ | |||
| ID: planID, | |||
| Ops: b.ops, | |||
| } | |||
| return AgentPlan{ | |||
| Plan: plan, | |||
| Node: b.node, | |||
| }, nil | |||
| } | |||
| func (b *AgentPlanBuilder) GRCPFetch(node cdssdk.Node, str *AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| b.ops = append(b.ops, &ops.GRPCFetch{ | |||
| RemoteID: str.info.ID, | |||
| LocalID: agtStr.info.ID, | |||
| Node: node, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (s *AgentStream) GRPCSend(node cdssdk.Node) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: s.owner.owner.AtAgent(node), | |||
| info: s.owner.owner.newStream(), | |||
| } | |||
| s.owner.ops = append(s.owner.ops, &ops.GRPCSend{ | |||
| LocalID: s.info.ID, | |||
| RemoteID: agtStr.info.ID, | |||
| Node: node, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) IPFSRead(fileHash string, opts ...ipfs.ReadOption) *AgentStream { | |||
| opt := ipfs.ReadOption{ | |||
| Offset: 0, | |||
| Length: -1, | |||
| } | |||
| if len(opts) > 0 { | |||
| opt = opts[0] | |||
| } | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| b.ops = append(b.ops, &ops.IPFSRead{ | |||
| Output: agtStr.info.ID, | |||
| FileHash: fileHash, | |||
| Option: opt, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (s *AgentStream) IPFSWrite(resultKey string) { | |||
| s.owner.ops = append(s.owner.ops, &ops.IPFSWrite{ | |||
| Input: s.info.ID, | |||
| ResultKey: resultKey, | |||
| }) | |||
| } | |||
| func (b *AgentPlanBuilder) FileRead(filePath string) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| b.ops = append(b.ops, &ops.FileRead{ | |||
| OutputID: agtStr.info.ID, | |||
| FilePath: filePath, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentStream) FileWrite(filePath string) { | |||
| b.owner.ops = append(b.owner.ops, &ops.FileWrite{ | |||
| InputID: b.info.ID, | |||
| FilePath: filePath, | |||
| }) | |||
| } | |||
| func (b *AgentPlanBuilder) ECReconstructAny(ec cdssdk.ECRedundancy, inBlockIndexes []int, outBlockIndexes []int, streams ...*AgentStream) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| var outputStrIDs []ioswitch.StreamID | |||
| for i := 0; i < len(outBlockIndexes); i++ { | |||
| info := b.owner.newStream() | |||
| mstr.Streams = append(mstr.Streams, &AgentStream{ | |||
| owner: b, | |||
| info: info, | |||
| }) | |||
| outputStrIDs = append(outputStrIDs, info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.ECReconstructAny{ | |||
| EC: ec, | |||
| InputIDs: inputStrIDs, | |||
| OutputIDs: outputStrIDs, | |||
| InputBlockIndexes: inBlockIndexes, | |||
| OutputBlockIndexes: outBlockIndexes, | |||
| }) | |||
| return mstr | |||
| } | |||
| func (b *AgentPlanBuilder) ECReconstruct(ec cdssdk.ECRedundancy, inBlockIndexes []int, streams ...*AgentStream) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| var outputStrIDs []ioswitch.StreamID | |||
| for i := 0; i < ec.K; i++ { | |||
| info := b.owner.newStream() | |||
| mstr.Streams = append(mstr.Streams, &AgentStream{ | |||
| owner: b, | |||
| info: info, | |||
| }) | |||
| outputStrIDs = append(outputStrIDs, info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.ECReconstruct{ | |||
| EC: ec, | |||
| InputIDs: inputStrIDs, | |||
| OutputIDs: outputStrIDs, | |||
| InputBlockIndexes: inBlockIndexes, | |||
| }) | |||
| return mstr | |||
| } | |||
| func (b *AgentStream) ChunkedSplit(chunkSize int, streamCount int, paddingZeros bool) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var outputStrIDs []ioswitch.StreamID | |||
| for i := 0; i < streamCount; i++ { | |||
| info := b.owner.owner.newStream() | |||
| mstr.Streams = append(mstr.Streams, &AgentStream{ | |||
| owner: b.owner, | |||
| info: info, | |||
| }) | |||
| outputStrIDs = append(outputStrIDs, info.ID) | |||
| } | |||
| b.owner.ops = append(b.owner.ops, &ops.ChunkedSplit{ | |||
| InputID: b.info.ID, | |||
| OutputIDs: outputStrIDs, | |||
| ChunkSize: chunkSize, | |||
| StreamCount: streamCount, | |||
| PaddingZeros: paddingZeros, | |||
| }) | |||
| return mstr | |||
| } | |||
| func (s *AgentStream) Length(length int64) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: s.owner, | |||
| info: s.owner.owner.newStream(), | |||
| } | |||
| s.owner.ops = append(s.owner.ops, &ops.Length{ | |||
| InputID: s.info.ID, | |||
| OutputID: agtStr.info.ID, | |||
| Length: length, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (s *AgentStream) ToExecutor() *ToExecutorStream { | |||
| return &ToExecutorStream{ | |||
| info: s.info, | |||
| fromNode: &s.owner.node, | |||
| } | |||
| } | |||
| func (b *AgentPlanBuilder) Join(length int64, streams ...*AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.Join{ | |||
| InputIDs: inputStrIDs, | |||
| OutputID: agtStr.info.ID, | |||
| Length: length, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (b *AgentPlanBuilder) ChunkedJoin(chunkSize int, streams ...*AgentStream) *AgentStream { | |||
| agtStr := &AgentStream{ | |||
| owner: b, | |||
| info: b.owner.newStream(), | |||
| } | |||
| var inputStrIDs []ioswitch.StreamID | |||
| for _, str := range streams { | |||
| inputStrIDs = append(inputStrIDs, str.info.ID) | |||
| } | |||
| b.ops = append(b.ops, &ops.ChunkedJoin{ | |||
| InputIDs: inputStrIDs, | |||
| OutputID: agtStr.info.ID, | |||
| ChunkSize: chunkSize, | |||
| }) | |||
| return agtStr | |||
| } | |||
| func (s *AgentStream) Clone(cnt int) *MultiStream { | |||
| mstr := &MultiStream{} | |||
| var outputStrIDs []ioswitch.StreamID | |||
| for i := 0; i < cnt; i++ { | |||
| info := s.owner.owner.newStream() | |||
| mstr.Streams = append(mstr.Streams, &AgentStream{ | |||
| owner: s.owner, | |||
| info: info, | |||
| }) | |||
| outputStrIDs = append(outputStrIDs, info.ID) | |||
| } | |||
| s.owner.ops = append(s.owner.ops, &ops.Clone{ | |||
| InputID: s.info.ID, | |||
| OutputIDs: outputStrIDs, | |||
| }) | |||
| return mstr | |||
| } | |||
| @@ -2,190 +2,165 @@ package plans | |||
| import ( | |||
| "context" | |||
| "errors" | |||
| "fmt" | |||
| "io" | |||
| "sync" | |||
| "sync/atomic" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/common/utils/io2" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| agtmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/agent" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/ops" | |||
| ) | |||
| type ExecutorResult struct { | |||
| ResultValues map[string]any | |||
| type Executor struct { | |||
| planID ioswitch.PlanID | |||
| plan *PlanBuilder | |||
| callback *future.SetVoidFuture | |||
| ctx context.Context | |||
| cancel context.CancelFunc | |||
| executorSw *ioswitch.Switch | |||
| } | |||
| type Executor struct { | |||
| plan ComposedPlan | |||
| callback *future.SetValueFuture[ExecutorResult] | |||
| mqClis []*agtmq.Client | |||
| planTaskIDs []string | |||
| func (e *Executor) BeginWrite(str io.ReadCloser, target ExecutorWriteStream) { | |||
| target.stream.Stream = str | |||
| e.executorSw.PutVars(target.stream) | |||
| } | |||
| func Execute(plan ComposedPlan) (*Executor, error) { | |||
| executor := Executor{ | |||
| plan: plan, | |||
| callback: future.NewSetValue[ExecutorResult](), | |||
| func (e *Executor) BeginRead(target ExecutorReadStream) (io.ReadCloser, error) { | |||
| err := e.executorSw.BindVars(e.ctx, target.stream) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("bind vars: %w", err) | |||
| } | |||
| var err error | |||
| for _, a := range plan.AgentPlans { | |||
| var cli *agtmq.Client | |||
| cli, err = stgglb.AgentMQPool.Acquire(a.Node.NodeID) | |||
| if err != nil { | |||
| executor.Close() | |||
| return nil, fmt.Errorf("new mq client for %d: %w", a.Node.NodeID, err) | |||
| } | |||
| executor.mqClis = append(executor.mqClis, cli) | |||
| } | |||
| return target.stream.Stream, nil | |||
| } | |||
| for i, a := range plan.AgentPlans { | |||
| cli := executor.mqClis[i] | |||
| func (e *Executor) Signal(signal ExecutorSignalVar) { | |||
| e.executorSw.PutVars(signal.v) | |||
| } | |||
| _, err := cli.SetupIOPlan(agtmq.NewSetupIOPlan(a.Plan)) | |||
| if err != nil { | |||
| for i -= 1; i >= 0; i-- { | |||
| executor.mqClis[i].CancelIOPlan(agtmq.NewCancelIOPlan(plan.ID)) | |||
| } | |||
| executor.Close() | |||
| return nil, fmt.Errorf("setup plan at %d: %w", a.Node.NodeID, err) | |||
| } | |||
| func (e *Executor) Wait(ctx context.Context) (map[string]any, error) { | |||
| err := e.callback.Wait(ctx) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| for i, a := range plan.AgentPlans { | |||
| cli := executor.mqClis[i] | |||
| ret := make(map[string]any) | |||
| e.plan.storeMap.Range(func(k, v any) bool { | |||
| ret[k.(string)] = v | |||
| return true | |||
| }) | |||
| resp, err := cli.StartIOPlan(agtmq.NewStartIOPlan(a.Plan.ID)) | |||
| if err != nil { | |||
| executor.cancelAll() | |||
| executor.Close() | |||
| return nil, fmt.Errorf("setup plan at %d: %w", a.Node.NodeID, err) | |||
| } | |||
| executor.planTaskIDs = append(executor.planTaskIDs, resp.TaskID) | |||
| } | |||
| return ret, nil | |||
| } | |||
| go executor.pollResult() | |||
| func (e *Executor) execute() { | |||
| wg := sync.WaitGroup{} | |||
| return &executor, nil | |||
| } | |||
| for _, p := range e.plan.agentPlans { | |||
| wg.Add(1) | |||
| func (e *Executor) SendStream(info *FromExecutorStream, stream io.Reader) error { | |||
| // TODO 考虑不使用stgglb的Local | |||
| nodeIP := info.toNode.ExternalIP | |||
| grpcPort := info.toNode.ExternalGRPCPort | |||
| if info.toNode.LocationID == stgglb.Local.LocationID { | |||
| nodeIP = info.toNode.LocalIP | |||
| grpcPort = info.toNode.LocalGRPCPort | |||
| } | |||
| go func(p *AgentPlanBuilder) { | |||
| defer wg.Done() | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort) | |||
| if err != nil { | |||
| return fmt.Errorf("new agent rpc client: %w", err) | |||
| } | |||
| defer stgglb.AgentRPCPool.Release(agtCli) | |||
| plan := ioswitch.Plan{ | |||
| ID: e.planID, | |||
| Ops: p.ops, | |||
| } | |||
| return agtCli.SendStream(e.plan.ID, info.info.ID, stream) | |||
| } | |||
| cli, err := stgglb.AgentRPCPool.Acquire(stgglb.SelectGRPCAddress(&p.node)) | |||
| if err != nil { | |||
| e.stopWith(fmt.Errorf("new agent rpc client of node %v: %w", p.node.NodeID, err)) | |||
| return | |||
| } | |||
| defer stgglb.AgentRPCPool.Release(cli) | |||
| func (e *Executor) ReadStream(info *ToExecutorStream) (io.ReadCloser, error) { | |||
| // TODO 考虑不使用stgglb的Local | |||
| nodeIP := info.fromNode.ExternalIP | |||
| grpcPort := info.fromNode.ExternalGRPCPort | |||
| if info.fromNode.LocationID == stgglb.Local.LocationID { | |||
| nodeIP = info.fromNode.LocalIP | |||
| grpcPort = info.fromNode.LocalGRPCPort | |||
| err = cli.ExecuteIOPlan(e.ctx, plan) | |||
| if err != nil { | |||
| e.stopWith(fmt.Errorf("execute plan at %v: %w", p.node.NodeID, err)) | |||
| return | |||
| } | |||
| }(p) | |||
| } | |||
| agtCli, err := stgglb.AgentRPCPool.Acquire(nodeIP, grpcPort) | |||
| err := e.executorSw.Run(e.ctx) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("new agent rpc client: %w", err) | |||
| e.stopWith(fmt.Errorf("run executor switch: %w", err)) | |||
| return | |||
| } | |||
| str, err := agtCli.FetchStream(e.plan.ID, info.info.ID) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| wg.Wait() | |||
| return io2.AfterReadClosed(str, func(closer io.ReadCloser) { | |||
| stgglb.AgentRPCPool.Release(agtCli) | |||
| }), nil | |||
| e.callback.SetVoid() | |||
| } | |||
| func (e *Executor) Wait() (ExecutorResult, error) { | |||
| return e.callback.WaitValue(context.TODO()) | |||
| func (e *Executor) stopWith(err error) { | |||
| e.callback.SetError(err) | |||
| e.cancel() | |||
| } | |||
| func (e *Executor) cancelAll() { | |||
| for _, cli := range e.mqClis { | |||
| cli.CancelIOPlan(agtmq.NewCancelIOPlan(e.plan.ID)) | |||
| } | |||
| type ExecutorPlanBuilder struct { | |||
| blder *PlanBuilder | |||
| ops []ioswitch.Op | |||
| } | |||
| func (e *Executor) Close() { | |||
| for _, c := range e.mqClis { | |||
| stgglb.AgentMQPool.Release(c) | |||
| } | |||
| type ExecutorStreamVar struct { | |||
| blder *PlanBuilder | |||
| v *ioswitch.StreamVar | |||
| } | |||
| type ExecutorWriteStream struct { | |||
| stream *ioswitch.StreamVar | |||
| } | |||
| func (e *Executor) pollResult() { | |||
| wg := sync.WaitGroup{} | |||
| var anyErr error | |||
| var done atomic.Bool | |||
| rets := make([]*ioswitch.PlanResult, len(e.plan.AgentPlans)) | |||
| func (b *ExecutorPlanBuilder) WillWrite() (ExecutorWriteStream, *ExecutorStreamVar) { | |||
| stream := b.blder.newStreamVar() | |||
| return ExecutorWriteStream{stream}, &ExecutorStreamVar{blder: b.blder, v: stream} | |||
| } | |||
| for i, id := range e.planTaskIDs { | |||
| idx := i | |||
| taskID := id | |||
| func (b *ExecutorPlanBuilder) WillSignal() *ExecutorSignalVar { | |||
| s := b.blder.newSignalVar() | |||
| return &ExecutorSignalVar{blder: b.blder, v: s} | |||
| } | |||
| wg.Add(1) | |||
| go func() { | |||
| defer wg.Done() | |||
| type ExecutorReadStream struct { | |||
| stream *ioswitch.StreamVar | |||
| } | |||
| for { | |||
| resp, err := e.mqClis[idx].WaitIOPlan(agtmq.NewWaitIOPlan(taskID, 5000)) | |||
| if err != nil { | |||
| anyErr = err | |||
| break | |||
| } | |||
| if resp.IsComplete { | |||
| if resp.Error != "" { | |||
| anyErr = errors.New(resp.Error) | |||
| done.Store(true) | |||
| } else { | |||
| rets[idx] = &resp.Result | |||
| } | |||
| break | |||
| } | |||
| if done.Load() { | |||
| break | |||
| } | |||
| } | |||
| }() | |||
| func (v *ExecutorStreamVar) WillRead() ExecutorReadStream { | |||
| return ExecutorReadStream{v.v} | |||
| } | |||
| func (s *ExecutorStreamVar) To(node cdssdk.Node) *AgentStreamVar { | |||
| s.blder.executorPlan.ops = append(s.blder.executorPlan.ops, &ops.SendStream{Stream: s.v, Node: node}) | |||
| return &AgentStreamVar{ | |||
| owner: s.blder.AtAgent(node), | |||
| v: s.v, | |||
| } | |||
| } | |||
| wg.Wait() | |||
| type ExecutorStringVar struct { | |||
| blder *PlanBuilder | |||
| v *ioswitch.StringVar | |||
| } | |||
| if anyErr != nil { | |||
| e.callback.SetError(anyErr) | |||
| return | |||
| } | |||
| func (s *ExecutorStringVar) Store(key string) { | |||
| s.blder.executorPlan.ops = append(s.blder.executorPlan.ops, &ops.Store{ | |||
| Var: s.v, | |||
| Key: key, | |||
| Store: s.blder.storeMap, | |||
| }) | |||
| } | |||
| reducedRet := ExecutorResult{ | |||
| ResultValues: make(map[string]any), | |||
| } | |||
| for _, ret := range rets { | |||
| for k, v := range ret.Values { | |||
| reducedRet.ResultValues[k] = v | |||
| } | |||
| } | |||
| type ExecutorSignalVar struct { | |||
| blder *PlanBuilder | |||
| v *ioswitch.SignalVar | |||
| } | |||
| e.callback.SetValue(reducedRet) | |||
| func (s *ExecutorSignalVar) To(node cdssdk.Node) *AgentSignalVar { | |||
| s.blder.executorPlan.ops = append(s.blder.executorPlan.ops, &ops.SendVar{Var: s.v, Node: node}) | |||
| return &AgentSignalVar{ | |||
| owner: s.blder.AtAgent(node), | |||
| v: s.v, | |||
| } | |||
| } | |||
| @@ -1,69 +1,40 @@ | |||
| package plans | |||
| import ( | |||
| "fmt" | |||
| "context" | |||
| "sync" | |||
| "github.com/google/uuid" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| cdssdk "gitlink.org.cn/cloudream/common/sdks/storage" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type StreamInfo struct { | |||
| ID ioswitch.StreamID | |||
| } | |||
| type PlanBuilder struct { | |||
| streams []*StreamInfo | |||
| agentPlans map[cdssdk.NodeID]*AgentPlanBuilder | |||
| } | |||
| func (b *PlanBuilder) Build() (*ComposedPlan, error) { | |||
| planID := uuid.NewString() | |||
| var agentPlans []AgentPlan | |||
| for _, b := range b.agentPlans { | |||
| plan, err := b.Build(ioswitch.PlanID(planID)) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| agentPlans = append(agentPlans, plan) | |||
| } | |||
| return &ComposedPlan{ | |||
| ID: ioswitch.PlanID(planID), | |||
| AgentPlans: agentPlans, | |||
| }, nil | |||
| } | |||
| func (b *PlanBuilder) newStream() *StreamInfo { | |||
| str := &StreamInfo{ | |||
| ID: ioswitch.StreamID(fmt.Sprintf("%d", len(b.streams)+1)), | |||
| } | |||
| b.streams = append(b.streams, str) | |||
| return str | |||
| vars []ioswitch.Var | |||
| agentPlans map[cdssdk.NodeID]*AgentPlanBuilder | |||
| executorPlan ExecutorPlanBuilder | |||
| storeMap *sync.Map | |||
| } | |||
| func NewPlanBuilder() PlanBuilder { | |||
| return PlanBuilder{ | |||
| func NewPlanBuilder() *PlanBuilder { | |||
| bld := &PlanBuilder{ | |||
| agentPlans: make(map[cdssdk.NodeID]*AgentPlanBuilder), | |||
| storeMap: &sync.Map{}, | |||
| } | |||
| bld.executorPlan.blder = bld | |||
| return bld | |||
| } | |||
| func (b *PlanBuilder) FromExecutor() *FromExecutorStream { | |||
| return &FromExecutorStream{ | |||
| owner: b, | |||
| info: b.newStream(), | |||
| } | |||
| func (b *PlanBuilder) AtExecutor() *ExecutorPlanBuilder { | |||
| return &b.executorPlan | |||
| } | |||
| func (b *PlanBuilder) AtAgent(node cdssdk.Node) *AgentPlanBuilder { | |||
| agtPlan, ok := b.agentPlans[node.NodeID] | |||
| if !ok { | |||
| agtPlan = &AgentPlanBuilder{ | |||
| owner: b, | |||
| blder: b, | |||
| node: node, | |||
| } | |||
| b.agentPlans[node.NodeID] = agtPlan | |||
| @@ -72,33 +43,59 @@ func (b *PlanBuilder) AtAgent(node cdssdk.Node) *AgentPlanBuilder { | |||
| return agtPlan | |||
| } | |||
| type FromExecutorStream struct { | |||
| owner *PlanBuilder | |||
| info *StreamInfo | |||
| toNode *cdssdk.Node | |||
| } | |||
| func (b *PlanBuilder) Execute() *Executor { | |||
| ctx, cancel := context.WithCancel(context.Background()) | |||
| planID := genRandomPlanID() | |||
| func (s *FromExecutorStream) ToNode(node cdssdk.Node) *AgentStream { | |||
| s.toNode = &node | |||
| return &AgentStream{ | |||
| owner: s.owner.AtAgent(node), | |||
| info: s.info, | |||
| execPlan := ioswitch.Plan{ | |||
| ID: planID, | |||
| Ops: b.executorPlan.ops, | |||
| } | |||
| exec := Executor{ | |||
| planID: planID, | |||
| plan: b, | |||
| callback: future.NewSetVoid(), | |||
| ctx: ctx, | |||
| cancel: cancel, | |||
| executorSw: ioswitch.NewSwitch(execPlan), | |||
| } | |||
| go exec.execute() | |||
| return &exec | |||
| } | |||
| type ToExecutorStream struct { | |||
| info *StreamInfo | |||
| fromNode *cdssdk.Node | |||
| func (b *PlanBuilder) newStreamVar() *ioswitch.StreamVar { | |||
| v := &ioswitch.StreamVar{ | |||
| ID: ioswitch.VarID(len(b.vars)), | |||
| } | |||
| b.vars = append(b.vars, v) | |||
| return v | |||
| } | |||
| type MultiStream struct { | |||
| Streams []*AgentStream | |||
| func (b *PlanBuilder) newIntVar() *ioswitch.IntVar { | |||
| v := &ioswitch.IntVar{ | |||
| ID: ioswitch.VarID(len(b.vars)), | |||
| } | |||
| b.vars = append(b.vars, v) | |||
| return v | |||
| } | |||
| func (m *MultiStream) Count() int { | |||
| return len(m.Streams) | |||
| func (b *PlanBuilder) newStringVar() *ioswitch.StringVar { | |||
| v := &ioswitch.StringVar{ | |||
| ID: ioswitch.VarID(len(b.vars)), | |||
| } | |||
| b.vars = append(b.vars, v) | |||
| return v | |||
| } | |||
| func (b *PlanBuilder) newSignalVar() *ioswitch.SignalVar { | |||
| v := &ioswitch.SignalVar{ | |||
| ID: ioswitch.VarID(len(b.vars)), | |||
| } | |||
| b.vars = append(b.vars, v) | |||
| func (m *MultiStream) Stream(index int) *AgentStream { | |||
| return m.Streams[index] | |||
| return v | |||
| } | |||
| @@ -0,0 +1,10 @@ | |||
| package plans | |||
| import ( | |||
| "github.com/google/uuid" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| func genRandomPlanID() ioswitch.PlanID { | |||
| return ioswitch.PlanID(uuid.NewString()) | |||
| } | |||
| @@ -2,293 +2,150 @@ package ioswitch | |||
| import ( | |||
| "context" | |||
| "errors" | |||
| "fmt" | |||
| "sync" | |||
| "gitlink.org.cn/cloudream/common/pkgs/future" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/utils/lo2" | |||
| "gitlink.org.cn/cloudream/common/utils/sync2" | |||
| ) | |||
| var ErrPlanFinished = errors.New("plan is finished") | |||
| var ErrPlanNotFound = errors.New("plan not found") | |||
| type OpState string | |||
| const ( | |||
| OpPending OpState = "Pending" | |||
| OpFinished OpState = "Finished" | |||
| ) | |||
| type Oping struct { | |||
| State OpState | |||
| } | |||
| type PlanResult struct { | |||
| Values map[string]any `json:"values"` | |||
| } | |||
| type Planning struct { | |||
| plan Plan | |||
| opings []Oping | |||
| resultValues map[string]any | |||
| callback *future.SetValueFuture[PlanResult] | |||
| readys map[StreamID]Stream | |||
| waittings []*Watting | |||
| type bindingVars struct { | |||
| Waittings []Var | |||
| Bindeds []Var | |||
| Callback *future.SetVoidFuture | |||
| } | |||
| func NewPlanning(plan Plan) Planning { | |||
| planning := Planning{ | |||
| plan: plan, | |||
| resultValues: make(map[string]any), | |||
| callback: future.NewSetValue[PlanResult](), | |||
| readys: make(map[StreamID]Stream), | |||
| } | |||
| for range plan.Ops { | |||
| oping := Oping{ | |||
| State: OpPending, | |||
| } | |||
| planning.opings = append(planning.opings, oping) | |||
| } | |||
| return planning | |||
| type Switch struct { | |||
| plan Plan | |||
| vars map[VarID]Var | |||
| bindings []*bindingVars | |||
| lock sync.Mutex | |||
| } | |||
| func (p *Planning) IsCompleted() bool { | |||
| for _, oping := range p.opings { | |||
| if oping.State != OpFinished { | |||
| return false | |||
| } | |||
| func NewSwitch(plan Plan) *Switch { | |||
| planning := Switch{ | |||
| plan: plan, | |||
| vars: make(map[VarID]Var), | |||
| } | |||
| return true | |||
| return &planning | |||
| } | |||
| func (p *Planning) MakeResult() PlanResult { | |||
| return PlanResult{ | |||
| Values: p.resultValues, | |||
| } | |||
| func (s *Switch) Plan() *Plan { | |||
| return &s.plan | |||
| } | |||
| type Watting struct { | |||
| WaitIDs []StreamID | |||
| Readys []Stream | |||
| Callback *future.SetValueFuture[[]Stream] | |||
| } | |||
| func (s *Switch) Run(ctx context.Context) error { | |||
| ctx2, cancel := context.WithCancel(ctx) | |||
| defer cancel() | |||
| func (w *Watting) TryReady(str Stream) bool { | |||
| for i, id := range w.WaitIDs { | |||
| if id == str.ID { | |||
| w.Readys[i] = str | |||
| return true | |||
| } | |||
| } | |||
| return sync2.ParallelDo(s.plan.Ops, func(o Op, idx int) error { | |||
| err := o.Execute(ctx2, s) | |||
| return false | |||
| } | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| func (c *Watting) IsAllReady() bool { | |||
| for _, s := range c.Readys { | |||
| if s.Stream == nil { | |||
| return false | |||
| if err != nil { | |||
| cancel() | |||
| return err | |||
| } | |||
| } | |||
| return true | |||
| } | |||
| func (w *Watting) Complete() { | |||
| w.Callback.SetValue(w.Readys) | |||
| } | |||
| func (w *Watting) Cancel(err error) { | |||
| w.Callback.SetError(err) | |||
| } | |||
| type Switch struct { | |||
| lock sync.Mutex | |||
| plannings map[PlanID]*Planning | |||
| } | |||
| func NewSwitch() Switch { | |||
| return Switch{ | |||
| plannings: make(map[PlanID]*Planning), | |||
| } | |||
| return nil | |||
| }) | |||
| } | |||
| func (s *Switch) SetupPlan(plan Plan) error { | |||
| func (s *Switch) BindVars(ctx context.Context, vs ...Var) error { | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| if _, ok := s.plannings[plan.ID]; ok { | |||
| return fmt.Errorf("plan id exists") | |||
| callback := future.NewSetVoid() | |||
| binding := &bindingVars{ | |||
| Callback: callback, | |||
| } | |||
| planning := NewPlanning(plan) | |||
| s.plannings[plan.ID] = &planning | |||
| return nil | |||
| } | |||
| for _, v := range vs { | |||
| v2 := s.vars[v.GetID()] | |||
| if v2 == nil { | |||
| binding.Waittings = append(binding.Waittings, v) | |||
| continue | |||
| } | |||
| func (s *Switch) ExecutePlan(id PlanID) (PlanResult, error) { | |||
| s.lock.Lock() | |||
| if err := AssignVar(v2, v); err != nil { | |||
| s.lock.Unlock() | |||
| return fmt.Errorf("assign var %v to %v: %w", v2.GetID(), v.GetID(), err) | |||
| } | |||
| planning, ok := s.plannings[id] | |||
| if !ok { | |||
| s.lock.Unlock() | |||
| return PlanResult{}, fmt.Errorf("plan not found") | |||
| binding.Bindeds = append(binding.Bindeds, v) | |||
| } | |||
| for i, op := range planning.plan.Ops { | |||
| idx := i | |||
| o := op | |||
| go func() { | |||
| err := o.Execute(s, id) | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| if err != nil { | |||
| logger.Std.Warnf("exeucting op: %s", err.Error()) | |||
| s.cancelPlan(id) | |||
| return | |||
| } | |||
| planning.opings[idx].State = OpFinished | |||
| if planning.IsCompleted() { | |||
| s.completePlan(id) | |||
| } | |||
| }() | |||
| if len(binding.Waittings) == 0 { | |||
| s.lock.Unlock() | |||
| return nil | |||
| } | |||
| s.bindings = append(s.bindings, binding) | |||
| s.lock.Unlock() | |||
| return planning.callback.WaitValue(context.TODO()) | |||
| } | |||
| err := callback.Wait(ctx) | |||
| func (s *Switch) CancelPlan(id PlanID) { | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| s.cancelPlan(id) | |||
| } | |||
| func (s *Switch) cancelPlan(id PlanID) { | |||
| plan, ok := s.plannings[id] | |||
| if !ok { | |||
| return | |||
| } | |||
| delete(s.plannings, id) | |||
| for _, s := range plan.readys { | |||
| s.Stream.Close() | |||
| } | |||
| for _, c := range plan.waittings { | |||
| c.Callback.SetError(ErrPlanFinished) | |||
| } | |||
| s.bindings = lo2.Remove(s.bindings, binding) | |||
| plan.callback.SetError(fmt.Errorf("plan cancelled")) | |||
| return err | |||
| } | |||
| func (s *Switch) completePlan(id PlanID) { | |||
| plan, ok := s.plannings[id] | |||
| if !ok { | |||
| return | |||
| } | |||
| delete(s.plannings, id) | |||
| for _, s := range plan.readys { | |||
| s.Stream.Close() | |||
| } | |||
| for _, c := range plan.waittings { | |||
| c.Callback.SetError(ErrPlanFinished) | |||
| } | |||
| plan.callback.SetValue(plan.MakeResult()) | |||
| } | |||
| func (s *Switch) StreamReady(planID PlanID, stream Stream) { | |||
| func (s *Switch) PutVars(vs ...Var) { | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| plan, ok := s.plannings[planID] | |||
| if !ok { | |||
| //TODO 处理错误 | |||
| return | |||
| } | |||
| for i, wa := range plan.waittings { | |||
| if !wa.TryReady(stream) { | |||
| continue | |||
| } | |||
| loop: | |||
| for _, v := range vs { | |||
| for ib, b := range s.bindings { | |||
| for iw, w := range b.Waittings { | |||
| if w.GetID() != v.GetID() { | |||
| continue | |||
| } | |||
| if err := AssignVar(v, w); err != nil { | |||
| b.Callback.SetError(fmt.Errorf("assign var %v to %v: %w", v.GetID(), w.GetID(), err)) | |||
| // 绑定类型不对,说明生成的执行计划有问题,怎么处理都可以,因为最终会执行失败 | |||
| continue loop | |||
| } | |||
| b.Bindeds = append(b.Bindeds, w) | |||
| b.Waittings = lo2.RemoveAt(b.Waittings, iw) | |||
| if len(b.Waittings) == 0 { | |||
| b.Callback.SetVoid() | |||
| s.bindings = lo2.RemoveAt(s.bindings, ib) | |||
| } | |||
| // 绑定成功,继续最外层循环 | |||
| continue loop | |||
| } | |||
| if !wa.IsAllReady() { | |||
| return | |||
| } | |||
| plan.waittings = lo2.RemoveAt(plan.waittings, i) | |||
| wa.Complete() | |||
| return | |||
| // 如果没有绑定,则直接放入变量表中 | |||
| s.vars[v.GetID()] = v | |||
| } | |||
| plan.readys[stream.ID] = stream | |||
| } | |||
| func (s *Switch) WaitStreams(planID PlanID, streamIDs ...StreamID) ([]Stream, error) { | |||
| s.lock.Lock() | |||
| plan, ok := s.plannings[planID] | |||
| if !ok { | |||
| s.lock.Unlock() | |||
| return nil, ErrPlanNotFound | |||
| } | |||
| allReady := true | |||
| readys := make([]Stream, len(streamIDs)) | |||
| for i, id := range streamIDs { | |||
| str, ok := plan.readys[id] | |||
| if !ok { | |||
| allReady = false | |||
| continue | |||
| } | |||
| readys[i] = str | |||
| delete(plan.readys, id) | |||
| func BindArrayVars[T Var](sw *Switch, ctx context.Context, vs []T) error { | |||
| var vs2 []Var | |||
| for _, v := range vs { | |||
| vs2 = append(vs2, v) | |||
| } | |||
| if allReady { | |||
| s.lock.Unlock() | |||
| return readys, nil | |||
| } | |||
| callback := future.NewSetValue[[]Stream]() | |||
| plan.waittings = append(plan.waittings, &Watting{ | |||
| WaitIDs: streamIDs, | |||
| Readys: readys, | |||
| Callback: callback, | |||
| }) | |||
| s.lock.Unlock() | |||
| return callback.WaitValue(context.TODO()) | |||
| return sw.BindVars(ctx, vs2...) | |||
| } | |||
| func (s *Switch) AddResultValue(planID PlanID, rets ...ResultKV) { | |||
| s.lock.Lock() | |||
| defer s.lock.Unlock() | |||
| plan, ok := s.plannings[planID] | |||
| if !ok { | |||
| return | |||
| func PutArrayVars[T Var](sw *Switch, vs []T) { | |||
| var vs2 []Var | |||
| for _, v := range vs { | |||
| vs2 = append(vs2, v) | |||
| } | |||
| for _, ret := range rets { | |||
| plan.resultValues[ret.Key] = ret.Value | |||
| } | |||
| sw.PutVars(vs2...) | |||
| } | |||
| @@ -0,0 +1,24 @@ | |||
| package ioswitch | |||
| import ( | |||
| "fmt" | |||
| "reflect" | |||
| ) | |||
| func AssignVar(from Var, to Var) error { | |||
| if reflect.TypeOf(from) != reflect.TypeOf(to) { | |||
| return fmt.Errorf("cannot assign %T to %T", from, to) | |||
| } | |||
| switch from := from.(type) { | |||
| case *StreamVar: | |||
| to.(*StreamVar).Stream = from.Stream | |||
| case *IntVar: | |||
| to.(*IntVar).Value = from.Value | |||
| case *StringVar: | |||
| to.(*StringVar).Value = from.Value | |||
| case *SignalVar: | |||
| } | |||
| return nil | |||
| } | |||
| @@ -1,120 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch" | |||
| ) | |||
| type IOService interface { | |||
| SetupIOPlan(msg *SetupIOPlan) (*SetupIOPlanResp, *mq.CodeMessage) | |||
| StartIOPlan(msg *StartIOPlan) (*StartIOPlanResp, *mq.CodeMessage) | |||
| WaitIOPlan(msg *WaitIOPlan) (*WaitIOPlanResp, *mq.CodeMessage) | |||
| CancelIOPlan(msg *CancelIOPlan) (*CancelIOPlanResp, *mq.CodeMessage) | |||
| } | |||
| // 设置io计划 | |||
| var _ = Register(Service.SetupIOPlan) | |||
| type SetupIOPlan struct { | |||
| mq.MessageBodyBase | |||
| Plan ioswitch.Plan `json:"plan"` | |||
| } | |||
| type SetupIOPlanResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| func NewSetupIOPlan(plan ioswitch.Plan) *SetupIOPlan { | |||
| return &SetupIOPlan{ | |||
| Plan: plan, | |||
| } | |||
| } | |||
| func NewSetupIOPlanResp() *SetupIOPlanResp { | |||
| return &SetupIOPlanResp{} | |||
| } | |||
| func (client *Client) SetupIOPlan(msg *SetupIOPlan, opts ...mq.RequestOption) (*SetupIOPlanResp, error) { | |||
| return mq.Request(Service.SetupIOPlan, client.rabbitCli, msg, opts...) | |||
| } | |||
| // 启动io计划 | |||
| var _ = Register(Service.StartIOPlan) | |||
| type StartIOPlan struct { | |||
| mq.MessageBodyBase | |||
| PlanID ioswitch.PlanID `json:"planID"` | |||
| } | |||
| type StartIOPlanResp struct { | |||
| mq.MessageBodyBase | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartIOPlan(planID ioswitch.PlanID) *StartIOPlan { | |||
| return &StartIOPlan{ | |||
| PlanID: planID, | |||
| } | |||
| } | |||
| func NewStartIOPlanResp(taskID string) *StartIOPlanResp { | |||
| return &StartIOPlanResp{ | |||
| TaskID: taskID, | |||
| } | |||
| } | |||
| func (client *Client) StartIOPlan(msg *StartIOPlan, opts ...mq.RequestOption) (*StartIOPlanResp, error) { | |||
| return mq.Request(Service.StartIOPlan, client.rabbitCli, msg, opts...) | |||
| } | |||
| // 启动io计划 | |||
| var _ = Register(Service.WaitIOPlan) | |||
| type WaitIOPlan struct { | |||
| mq.MessageBodyBase | |||
| TaskID string `json:"taskID"` | |||
| WaitTimeoutMs int64 `json:"waitTimeout"` | |||
| } | |||
| type WaitIOPlanResp struct { | |||
| mq.MessageBodyBase | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| Result ioswitch.PlanResult `json:"result"` | |||
| } | |||
| func NewWaitIOPlan(taskID string, waitTimeoutMs int64) *WaitIOPlan { | |||
| return &WaitIOPlan{ | |||
| TaskID: taskID, | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| func NewWaitIOPlanResp(isComplete bool, err string, result ioswitch.PlanResult) *WaitIOPlanResp { | |||
| return &WaitIOPlanResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| Result: result, | |||
| } | |||
| } | |||
| func (client *Client) WaitIOPlan(msg *WaitIOPlan, opts ...mq.RequestOption) (*WaitIOPlanResp, error) { | |||
| return mq.Request(Service.WaitIOPlan, client.rabbitCli, msg, opts...) | |||
| } | |||
| // 取消io计划 | |||
| var _ = Register(Service.CancelIOPlan) | |||
| type CancelIOPlan struct { | |||
| mq.MessageBodyBase | |||
| PlanID ioswitch.PlanID `json:"planID"` | |||
| } | |||
| type CancelIOPlanResp struct { | |||
| mq.MessageBodyBase | |||
| } | |||
| func NewCancelIOPlan(planID ioswitch.PlanID) *CancelIOPlan { | |||
| return &CancelIOPlan{ | |||
| PlanID: planID, | |||
| } | |||
| } | |||
| func NewCancelIOPlanResp() *CancelIOPlanResp { | |||
| return &CancelIOPlanResp{} | |||
| } | |||
| func (client *Client) CancelIOPlan(msg *CancelIOPlan, opts ...mq.RequestOption) (*CancelIOPlanResp, error) { | |||
| return mq.Request(Service.CancelIOPlan, client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -6,8 +6,6 @@ import ( | |||
| ) | |||
| type Service interface { | |||
| IOService | |||
| ObjectService | |||
| StorageService | |||
| @@ -92,6 +92,7 @@ require ( | |||
| golang.org/x/crypto v0.9.0 // indirect | |||
| golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect | |||
| golang.org/x/net v0.10.0 // indirect | |||
| golang.org/x/sync v0.1.0 | |||
| golang.org/x/sys v0.8.0 // indirect | |||
| golang.org/x/text v0.9.0 // indirect | |||
| google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 // indirect | |||
| @@ -217,6 +217,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= | |||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= | |||
| golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | |||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
| @@ -9,13 +9,12 @@ import ( | |||
| ) | |||
| type Config struct { | |||
| ECFileSizeThreshold int64 `json:"ecFileSizeThreshold"` | |||
| NodeUnavailableSeconds int `json:"nodeUnavailableSeconds"` // 如果节点上次上报时间超过这个值,则认为节点已经不可用 | |||
| Logger log.Config `json:"logger"` | |||
| DB db.Config `json:"db"` | |||
| RabbitMQ stgmq.Config `json:"rabbitMQ"` | |||
| DistLock distlock.Config `json:"distlock"` | |||
| ECFileSizeThreshold int64 `json:"ecFileSizeThreshold"` | |||
| NodeUnavailableSeconds int `json:"nodeUnavailableSeconds"` // 如果节点上次上报时间超过这个值,则认为节点已经不可用 | |||
| Logger log.Config `json:"logger"` | |||
| DB db.Config `json:"db"` | |||
| RabbitMQ stgmq.Config `json:"rabbitMQ"` | |||
| DistLock distlock.Config `json:"distlock"` | |||
| } | |||
| var cfg Config | |||
| @@ -1,6 +1,7 @@ | |||
| package event | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "strconv" | |||
| "time" | |||
| @@ -414,21 +415,12 @@ func (t *CheckPackageRedundancy) noneToEC(obj stgmod.ObjectDetail, red *cdssdk.E | |||
| planBlder := plans.NewPlanBuilder() | |||
| inputStrs := planBlder.AtAgent(getNodes.Nodes[0]).IPFSRead(obj.Object.FileHash).ChunkedSplit(red.ChunkSize, red.K, true) | |||
| outputStrs := planBlder.AtAgent(getNodes.Nodes[0]).ECReconstructAny(*red, lo.Range(red.K), lo.Range(red.N), inputStrs.Streams...) | |||
| outputStrs := planBlder.AtAgent(getNodes.Nodes[0]).ECReconstructAny(*red, lo.Range(red.K), lo.Range(red.N), inputStrs) | |||
| for i := 0; i < red.N; i++ { | |||
| outputStrs.Stream(i).GRPCSend(uploadNodes[i].Node).IPFSWrite(fmt.Sprintf("%d", i)) | |||
| } | |||
| plan, err := planBlder.Build() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("building io plan: %w", err) | |||
| } | |||
| exec, err := plans.Execute(*plan) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("executing io plan: %w", err) | |||
| outputStrs[i].To(uploadNodes[i].Node).IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d", i)) | |||
| } | |||
| ioRet, err := exec.Wait() | |||
| ioRet, err := planBlder.Execute().Wait(context.TODO()) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("executing io plan: %w", err) | |||
| } | |||
| @@ -439,7 +431,7 @@ func (t *CheckPackageRedundancy) noneToEC(obj stgmod.ObjectDetail, red *cdssdk.E | |||
| ObjectID: obj.Object.ObjectID, | |||
| Index: i, | |||
| NodeID: uploadNodes[i].Node.NodeID, | |||
| FileHash: ioRet.ResultValues[fmt.Sprintf("%d", i)].(string), | |||
| FileHash: ioRet[fmt.Sprintf("%d", i)].(string), | |||
| }) | |||
| } | |||
| @@ -522,26 +514,16 @@ func (t *CheckPackageRedundancy) ecToRep(obj stgmod.ObjectDetail, srcRed *cdssdk | |||
| for i := range uploadNodes { | |||
| tarNode := planBlder.AtAgent(uploadNodes[i].Node) | |||
| var inputs []*plans.AgentStream | |||
| var inputs []*plans.AgentStreamVar | |||
| for _, block := range chosenBlocks { | |||
| inputs = append(inputs, tarNode.IPFSRead(block.FileHash)) | |||
| } | |||
| outputs := tarNode.ECReconstruct(*srcRed, chosenBlockIndexes, inputs...) | |||
| tarNode.ChunkedJoin(srcRed.ChunkSize, outputs.Streams...).Length(obj.Object.Size).IPFSWrite(fmt.Sprintf("%d", i)) | |||
| } | |||
| plan, err := planBlder.Build() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("building io plan: %w", err) | |||
| } | |||
| exec, err := plans.Execute(*plan) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("executing io plan: %w", err) | |||
| outputs := tarNode.ECReconstruct(*srcRed, chosenBlockIndexes, inputs) | |||
| tarNode.ChunkedJoin(srcRed.ChunkSize, outputs).Length(obj.Object.Size).IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d", i)) | |||
| } | |||
| ioRet, err := exec.Wait() | |||
| ioRet, err := planBlder.Execute().Wait(context.TODO()) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("executing io plan: %w", err) | |||
| } | |||
| @@ -552,7 +534,7 @@ func (t *CheckPackageRedundancy) ecToRep(obj stgmod.ObjectDetail, srcRed *cdssdk | |||
| ObjectID: obj.Object.ObjectID, | |||
| Index: 0, | |||
| NodeID: uploadNodes[i].Node.NodeID, | |||
| FileHash: ioRet.ResultValues[fmt.Sprintf("%d", i)].(string), | |||
| FileHash: ioRet[fmt.Sprintf("%d", i)].(string), | |||
| }) | |||
| } | |||
| @@ -615,28 +597,18 @@ func (t *CheckPackageRedundancy) ecToEC(obj stgmod.ObjectDetail, srcRed *cdssdk. | |||
| // 否则就要重建出这个节点需要的块 | |||
| tarNode := planBlder.AtAgent(node.Node) | |||
| var inputs []*plans.AgentStream | |||
| var inputs []*plans.AgentStreamVar | |||
| for _, block := range chosenBlocks { | |||
| inputs = append(inputs, tarNode.IPFSRead(block.FileHash)) | |||
| } | |||
| // 输出只需要自己要保存的那一块 | |||
| tarNode.ECReconstructAny(*srcRed, chosenBlockIndexes, []int{i}, inputs...).Stream(0).IPFSWrite(fmt.Sprintf("%d", i)) | |||
| tarNode.ECReconstructAny(*srcRed, chosenBlockIndexes, []int{i}, inputs)[0].IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d", i)) | |||
| newBlocks = append(newBlocks, newBlock) | |||
| } | |||
| plan, err := planBlder.Build() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("building io plan: %w", err) | |||
| } | |||
| exec, err := plans.Execute(*plan) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("executing io plan: %w", err) | |||
| } | |||
| // 如果没有任何Plan,Wait会直接返回成功 | |||
| ret, err := exec.Wait() | |||
| ret, err := planBlder.Execute().Wait(context.TODO()) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("executing io plan: %w", err) | |||
| } | |||
| @@ -645,7 +617,7 @@ func (t *CheckPackageRedundancy) ecToEC(obj stgmod.ObjectDetail, srcRed *cdssdk. | |||
| return nil, nil | |||
| } | |||
| for k, v := range ret.ResultValues { | |||
| for k, v := range ret { | |||
| idx, err := strconv.ParseInt(k, 10, 64) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("parsing result key %s as index: %w", k, err) | |||
| @@ -1,6 +1,7 @@ | |||
| package event | |||
| import ( | |||
| "context" | |||
| "fmt" | |||
| "math" | |||
| "math/rand" | |||
| @@ -107,6 +108,7 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) { | |||
| planBld := plans.NewPlanBuilder() | |||
| pinPlans := make(map[cdssdk.NodeID]*[]string) | |||
| plnningNodeIDs := make(map[cdssdk.NodeID]bool) | |||
| // 对于rep对象,统计出所有对象块分布最多的两个节点,用这两个节点代表所有rep对象块的分布,去进行退火算法 | |||
| var repObjectsUpdating []coormq.UpdatingObjectRedundancy | |||
| @@ -131,10 +133,10 @@ func (t *CleanPinned) Execute(execCtx ExecuteContext) { | |||
| pinnedAt: obj.PinnedAt, | |||
| blocks: obj.Blocks, | |||
| }) | |||
| ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(allNodeInfos, solu, obj, &planBld)) | |||
| ecObjectsUpdating = append(ecObjectsUpdating, t.makePlansForECObject(allNodeInfos, solu, obj, planBld, plnningNodeIDs)) | |||
| } | |||
| ioSwRets, err := t.executePlans(execCtx, pinPlans, &planBld) | |||
| ioSwRets, err := t.executePlans(execCtx, pinPlans, planBld, plnningNodeIDs) | |||
| if err != nil { | |||
| log.Warn(err.Error()) | |||
| return | |||
| @@ -748,7 +750,7 @@ func (t *CleanPinned) makePlansForRepObject(solu annealingSolution, obj stgmod.O | |||
| return entry | |||
| } | |||
| func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node, solu annealingSolution, obj stgmod.ObjectDetail, planBld *plans.PlanBuilder) coormq.UpdatingObjectRedundancy { | |||
| func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssdk.Node, solu annealingSolution, obj stgmod.ObjectDetail, planBld *plans.PlanBuilder, planningNodeIDs map[cdssdk.NodeID]bool) coormq.UpdatingObjectRedundancy { | |||
| entry := coormq.UpdatingObjectRedundancy{ | |||
| ObjectID: obj.Object.ObjectID, | |||
| Redundancy: obj.Object.Redundancy, | |||
| @@ -784,29 +786,26 @@ func (t *CleanPinned) makePlansForECObject(allNodeInfos map[cdssdk.NodeID]*cdssd | |||
| agt := planBld.AtAgent(*allNodeInfos[id]) | |||
| strs := agt.IPFSRead(obj.Object.FileHash).ChunkedSplit(ecRed.ChunkSize, ecRed.K, true) | |||
| ss := agt.ECReconstructAny(*ecRed, lo.Range(ecRed.K), *idxs, strs.Streams...) | |||
| for i, s := range ss.Streams { | |||
| s.IPFSWrite(fmt.Sprintf("%d.%d", obj.Object.ObjectID, (*idxs)[i])) | |||
| ss := agt.ECReconstructAny(*ecRed, lo.Range(ecRed.K), *idxs, strs) | |||
| for i, s := range ss { | |||
| s.IPFSWrite().ToExecutor().Store(fmt.Sprintf("%d.%d", obj.Object.ObjectID, (*idxs)[i])) | |||
| } | |||
| planningNodeIDs[id] = true | |||
| } | |||
| return entry | |||
| } | |||
| func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.NodeID]*[]string, planBld *plans.PlanBuilder) (map[string]any, error) { | |||
| func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.NodeID]*[]string, planBld *plans.PlanBuilder, plnningNodeIDs map[cdssdk.NodeID]bool) (map[string]any, error) { | |||
| log := logger.WithType[CleanPinned]("Event") | |||
| ioPlan, err := planBld.Build() | |||
| if err != nil { | |||
| return nil, fmt.Errorf("building io switch plan: %w", err) | |||
| } | |||
| // 统一加锁,有重复也没关系 | |||
| lockBld := reqbuilder.NewBuilder() | |||
| for nodeID := range pinPlans { | |||
| lockBld.IPFS().Buzy(nodeID) | |||
| } | |||
| for _, plan := range ioPlan.AgentPlans { | |||
| lockBld.IPFS().Buzy(plan.Node.NodeID) | |||
| for id := range plnningNodeIDs { | |||
| lockBld.IPFS().Buzy(id) | |||
| } | |||
| lock, err := lockBld.MutexLock(execCtx.Args.DistLock) | |||
| if err != nil { | |||
| @@ -845,17 +844,12 @@ func (t *CleanPinned) executePlans(execCtx ExecuteContext, pinPlans map[cdssdk.N | |||
| go func() { | |||
| defer wg.Done() | |||
| exec, err := plans.Execute(*ioPlan) | |||
| ret, err := planBld.Execute().Wait(context.TODO()) | |||
| if err != nil { | |||
| ioSwErr = fmt.Errorf("executing io switch plan: %w", err) | |||
| return | |||
| } | |||
| ret, err := exec.Wait() | |||
| if err != nil { | |||
| ioSwErr = fmt.Errorf("waiting io switch plan: %w", err) | |||
| return | |||
| } | |||
| ioSwRets = ret.ResultValues | |||
| ioSwRets = ret | |||
| }() | |||
| wg.Wait() | |||
| @@ -8,6 +8,7 @@ import ( | |||
| stgglb "gitlink.org.cn/cloudream/storage/common/globals" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/db" | |||
| "gitlink.org.cn/cloudream/storage/common/pkgs/distlock" | |||
| agtrpc "gitlink.org.cn/cloudream/storage/common/pkgs/grpc/agent" | |||
| scmq "gitlink.org.cn/cloudream/storage/common/pkgs/mq/scanner" | |||
| "gitlink.org.cn/cloudream/storage/scanner/internal/config" | |||
| "gitlink.org.cn/cloudream/storage/scanner/internal/event" | |||
| @@ -35,6 +36,8 @@ func main() { | |||
| stgglb.InitMQPool(&config.Cfg().RabbitMQ) | |||
| stgglb.InitAgentRPCPool(&agtrpc.PoolConfig{}) | |||
| distlockSvc, err := distlock.NewService(&config.Cfg().DistLock) | |||
| if err != nil { | |||
| logger.Warnf("new distlock service failed, err: %s", err.Error()) | |||