You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

parser.go 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. package plans
  2. import (
  3. "fmt"
  4. "math"
  5. "gitlink.org.cn/cloudream/common/pkgs/ipfs"
  6. cdssdk "gitlink.org.cn/cloudream/common/sdks/storage"
  7. "gitlink.org.cn/cloudream/common/utils/lo2"
  8. "gitlink.org.cn/cloudream/common/utils/math2"
  9. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch"
  10. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/dag"
  11. "gitlink.org.cn/cloudream/storage/common/pkgs/ioswitch/exec"
  12. )
  13. type NodeProps struct {
  14. From From
  15. To To
  16. }
  17. type ValueVarType int
  18. const (
  19. StringValueVar ValueVarType = iota
  20. SignalValueVar
  21. )
  22. type VarProps struct {
  23. StreamIndex int // 流的编号,只在StreamVar上有意义
  24. ValueType ValueVarType // 值类型,只在ValueVar上有意义
  25. Var ioswitch.Var // 生成Plan的时候创建的对应的Var
  26. }
  27. type Graph = dag.Graph[NodeProps, VarProps]
  28. type Node = dag.Node[NodeProps, VarProps]
  29. type StreamVar = dag.StreamVar[NodeProps, VarProps]
  30. type ValueVar = dag.ValueVar[NodeProps, VarProps]
  31. type AgentWorker struct {
  32. Node cdssdk.Node
  33. }
  34. func (w *AgentWorker) GetAddress() string {
  35. // TODO 选择地址
  36. return fmt.Sprintf("%v:%v", w.Node.ExternalIP, w.Node.ExternalGRPCPort)
  37. }
  38. func (w *AgentWorker) Equals(worker exec.Worker) bool {
  39. aw, ok := worker.(*AgentWorker)
  40. if !ok {
  41. return false
  42. }
  43. return w.Node.NodeID == aw.Node.NodeID
  44. }
  45. type FromToParser interface {
  46. Parse(ft FromTo, blder *builder.PlanBuilder) error
  47. }
  48. type DefaultParser struct {
  49. EC cdssdk.ECRedundancy
  50. }
  51. func NewParser(ec cdssdk.ECRedundancy) *DefaultParser {
  52. return &DefaultParser{
  53. EC: ec,
  54. }
  55. }
  56. type ParseContext struct {
  57. Ft FromTo
  58. DAG *Graph
  59. // 为了产生所有To所需的数据范围,而需要From打开的范围。
  60. // 这个范围是基于整个文件的,且上下界都取整到条带大小的整数倍,因此上界是有可能超过文件大小的。
  61. StreamRange Range
  62. }
  63. func (p *DefaultParser) Parse(ft FromTo, blder *builder.PlanBuilder) error {
  64. ctx := ParseContext{Ft: ft}
  65. // 分成两个阶段:
  66. // 1. 基于From和To生成更多指令,初步匹配to的需求
  67. // 计算一下打开流的范围
  68. p.calcStreamRange(&ctx)
  69. err := p.extend(&ctx, ft)
  70. if err != nil {
  71. return err
  72. }
  73. // 2. 优化上一步生成的指令
  74. // 对于删除指令的优化,需要反复进行,直到没有变化为止。
  75. // 从目前实现上来说不会死循环
  76. for {
  77. opted := false
  78. if p.removeUnusedJoin(&ctx) {
  79. opted = true
  80. }
  81. if p.removeUnusedMultiplyOutput(&ctx) {
  82. opted = true
  83. }
  84. if p.removeUnusedSplit(&ctx) {
  85. opted = true
  86. }
  87. if p.omitSplitJoin(&ctx) {
  88. opted = true
  89. }
  90. if !opted {
  91. break
  92. }
  93. }
  94. // 确定指令执行位置的过程,也需要反复进行,直到没有变化为止。
  95. for p.pin(&ctx) {
  96. }
  97. // 下面这些只需要执行一次,但需要按顺序
  98. p.dropUnused(&ctx)
  99. p.storeIPFSWriteResult(&ctx)
  100. p.generateClone(&ctx)
  101. p.generateRange(&ctx)
  102. p.generateSend(&ctx)
  103. return p.buildPlan(&ctx, blder)
  104. }
  105. func (p *DefaultParser) findOutputStream(ctx *ParseContext, streamIndex int) *StreamVar {
  106. var ret *StreamVar
  107. ctx.DAG.Walk(func(n *dag.Node[NodeProps, VarProps]) bool {
  108. for _, o := range n.OutputStreams {
  109. if o != nil && o.Props.StreamIndex == streamIndex {
  110. ret = o
  111. return false
  112. }
  113. }
  114. return true
  115. })
  116. return ret
  117. }
  118. // 计算输入流的打开范围。会把流的范围按条带大小取整
  119. func (p *DefaultParser) calcStreamRange(ctx *ParseContext) {
  120. stripSize := int64(p.EC.ChunkSize * p.EC.K)
  121. rng := Range{
  122. Offset: math.MaxInt64,
  123. }
  124. for _, to := range ctx.Ft.Toes {
  125. if to.GetDataIndex() == -1 {
  126. toRng := to.GetRange()
  127. rng.ExtendStart(math2.Floor(toRng.Offset, stripSize))
  128. if toRng.Length != nil {
  129. rng.ExtendEnd(math2.Ceil(toRng.Offset+*toRng.Length, stripSize))
  130. } else {
  131. rng.Length = nil
  132. }
  133. } else {
  134. toRng := to.GetRange()
  135. blkStartIndex := math2.FloorDiv(toRng.Offset, int64(p.EC.ChunkSize))
  136. rng.ExtendStart(blkStartIndex * stripSize)
  137. if toRng.Length != nil {
  138. blkEndIndex := math2.CeilDiv(toRng.Offset+*toRng.Length, int64(p.EC.ChunkSize))
  139. rng.ExtendEnd(blkEndIndex * stripSize)
  140. } else {
  141. rng.Length = nil
  142. }
  143. }
  144. }
  145. ctx.StreamRange = rng
  146. }
  147. func (p *DefaultParser) extend(ctx *ParseContext, ft FromTo) error {
  148. for _, f := range ft.Froms {
  149. _, err := p.buildFromNode(ctx, &ft, f)
  150. if err != nil {
  151. return err
  152. }
  153. // 对于完整文件的From,生成Split指令
  154. if f.GetDataIndex() == -1 {
  155. n, _ := dag.NewNode(ctx.DAG, &ChunkedSplitType{ChunkSize: p.EC.ChunkSize, OutputCount: p.EC.K}, NodeProps{})
  156. for i := 0; i < p.EC.K; i++ {
  157. n.OutputStreams[i].Props.StreamIndex = i
  158. }
  159. }
  160. }
  161. // 如果有K个不同的文件块流,则生成Multiply指令,同时针对其生成的流,生成Join指令
  162. ecInputStrs := make(map[int]*StreamVar)
  163. loop:
  164. for _, o := range ctx.DAG.Nodes {
  165. for _, s := range o.OutputStreams {
  166. if s.Props.StreamIndex >= 0 && ecInputStrs[s.Props.StreamIndex] == nil {
  167. ecInputStrs[s.Props.StreamIndex] = s
  168. if len(ecInputStrs) == p.EC.K {
  169. break loop
  170. }
  171. }
  172. }
  173. }
  174. if len(ecInputStrs) == p.EC.K {
  175. mulNode, mulType := dag.NewNode(ctx.DAG, &MultiplyType{
  176. EC: p.EC,
  177. }, NodeProps{})
  178. for _, s := range ecInputStrs {
  179. mulType.AddInput(mulNode, s)
  180. }
  181. for i := 0; i < p.EC.N; i++ {
  182. mulType.NewOutput(mulNode, i)
  183. }
  184. joinNode, _ := dag.NewNode(ctx.DAG, &ChunkedJoinType{
  185. InputCount: p.EC.K,
  186. ChunkSize: p.EC.ChunkSize,
  187. }, NodeProps{})
  188. for i := 0; i < p.EC.K; i++ {
  189. // 不可能找不到流
  190. p.findOutputStream(ctx, i).To(joinNode, i)
  191. }
  192. joinNode.OutputStreams[0].Props.StreamIndex = -1
  193. }
  194. // 为每一个To找到一个输入流
  195. for _, t := range ft.Toes {
  196. n, err := p.buildToNode(ctx, &ft, t)
  197. if err != nil {
  198. return err
  199. }
  200. str := p.findOutputStream(ctx, t.GetDataIndex())
  201. if str == nil {
  202. return fmt.Errorf("no output stream found for data index %d", t.GetDataIndex())
  203. }
  204. str.To(n, 0)
  205. }
  206. return nil
  207. }
  208. func (p *DefaultParser) buildFromNode(ctx *ParseContext, ft *FromTo, f From) (*Node, error) {
  209. var repRange Range
  210. var blkRange Range
  211. repRange.Offset = ctx.StreamRange.Offset
  212. blkRange.Offset = ctx.StreamRange.Offset / int64(p.EC.ChunkSize*p.EC.K) * int64(p.EC.ChunkSize)
  213. if ctx.StreamRange.Length != nil {
  214. repRngLen := *ctx.StreamRange.Length
  215. repRange.Length = &repRngLen
  216. blkRngLen := *ctx.StreamRange.Length / int64(p.EC.ChunkSize*p.EC.K) * int64(p.EC.ChunkSize)
  217. blkRange.Length = &blkRngLen
  218. }
  219. switch f := f.(type) {
  220. case *FromWorker:
  221. n, t := dag.NewNode(ctx.DAG, &IPFSReadType{
  222. FileHash: f.FileHash,
  223. Option: ipfs.ReadOption{
  224. Offset: 0,
  225. Length: -1,
  226. },
  227. }, NodeProps{
  228. From: f,
  229. })
  230. n.OutputStreams[0].Props.StreamIndex = f.DataIndex
  231. if f.DataIndex == -1 {
  232. t.Option.Offset = repRange.Offset
  233. if repRange.Length != nil {
  234. t.Option.Length = *repRange.Length
  235. }
  236. } else {
  237. t.Option.Offset = blkRange.Offset
  238. if blkRange.Length != nil {
  239. t.Option.Length = *blkRange.Length
  240. }
  241. }
  242. if f.Node != nil {
  243. n.Env.ToEnvWorker(&AgentWorker{*f.Node})
  244. }
  245. return n, nil
  246. case *FromExecutor:
  247. n, _ := dag.NewNode(ctx.DAG, &FromExecutorType{Handle: f.Handle}, NodeProps{From: f})
  248. n.Env.ToEnvExecutor()
  249. n.OutputStreams[0].Props.StreamIndex = f.DataIndex
  250. if f.DataIndex == -1 {
  251. f.Handle.RangeHint.Offset = repRange.Offset
  252. f.Handle.RangeHint.Length = repRange.Length
  253. } else {
  254. f.Handle.RangeHint.Offset = blkRange.Offset
  255. f.Handle.RangeHint.Length = blkRange.Length
  256. }
  257. return n, nil
  258. default:
  259. return nil, fmt.Errorf("unsupported from type %T", f)
  260. }
  261. }
  262. func (p *DefaultParser) buildToNode(ctx *ParseContext, ft *FromTo, t To) (*Node, error) {
  263. switch t := t.(type) {
  264. case *ToNode:
  265. n, _ := dag.NewNode(ctx.DAG, &IPFSWriteType{
  266. FileHashStoreKey: t.FileHashStoreKey,
  267. Range: t.Range,
  268. }, NodeProps{
  269. To: t,
  270. })
  271. return n, nil
  272. case *ToExecutor:
  273. n, _ := dag.NewNode(ctx.DAG, &ToExecutorType{Handle: t.Handle, Range: t.Range}, NodeProps{To: t})
  274. n.Env.ToEnvExecutor()
  275. return n, nil
  276. default:
  277. return nil, fmt.Errorf("unsupported to type %T", t)
  278. }
  279. }
  280. // 删除输出流未被使用的Join指令
  281. func (p *DefaultParser) removeUnusedJoin(ctx *ParseContext) bool {
  282. changed := false
  283. dag.WalkOnlyType[*ChunkedJoinType](ctx.DAG, func(node *Node, typ *ChunkedJoinType) bool {
  284. if len(node.OutputStreams[0].Toes) > 0 {
  285. return true
  286. }
  287. for _, in := range node.InputStreams {
  288. in.NotTo(node)
  289. }
  290. ctx.DAG.RemoveNode(node)
  291. return true
  292. })
  293. return changed
  294. }
  295. // 减少未使用的Multiply指令的输出流。如果减少到0,则删除该指令
  296. func (p *DefaultParser) removeUnusedMultiplyOutput(ctx *ParseContext) bool {
  297. changed := false
  298. dag.WalkOnlyType[*MultiplyType](ctx.DAG, func(node *Node, typ *MultiplyType) bool {
  299. for i2, out := range node.OutputStreams {
  300. if len(out.Toes) > 0 {
  301. continue
  302. }
  303. node.OutputStreams[i2] = nil
  304. changed = true
  305. }
  306. node.OutputStreams = lo2.RemoveAllDefault(node.OutputStreams)
  307. // 如果所有输出流都被删除,则删除该指令
  308. if len(node.OutputStreams) == 0 {
  309. for _, in := range node.InputStreams {
  310. in.NotTo(node)
  311. }
  312. ctx.DAG.RemoveNode(node)
  313. changed = true
  314. }
  315. return true
  316. })
  317. return changed
  318. }
  319. // 删除未使用的Split指令
  320. func (p *DefaultParser) removeUnusedSplit(ctx *ParseContext) bool {
  321. changed := false
  322. dag.WalkOnlyType[*ChunkedSplitType](ctx.DAG, func(node *Node, typ *ChunkedSplitType) bool {
  323. // Split出来的每一个流都没有被使用,才能删除这个指令
  324. for _, out := range node.OutputStreams {
  325. if len(out.Toes) > 0 {
  326. return true
  327. }
  328. }
  329. node.InputStreams[0].NotTo(node)
  330. ctx.DAG.RemoveNode(node)
  331. changed = true
  332. return true
  333. })
  334. return changed
  335. }
  336. // 如果Split的结果被完全用于Join,则省略Split和Join指令
  337. func (p *DefaultParser) omitSplitJoin(ctx *ParseContext) bool {
  338. changed := false
  339. dag.WalkOnlyType[*ChunkedSplitType](ctx.DAG, func(splitNode *Node, typ *ChunkedSplitType) bool {
  340. // Split指令的每一个输出都有且只有一个目的地
  341. var joinNode *Node
  342. for _, out := range splitNode.OutputStreams {
  343. if len(out.Toes) != 1 {
  344. continue
  345. }
  346. if joinNode == nil {
  347. joinNode = out.Toes[0].Node
  348. } else if joinNode != out.Toes[0].Node {
  349. return true
  350. }
  351. }
  352. if joinNode == nil {
  353. return true
  354. }
  355. // 且这个目的地要是一个Join指令
  356. _, ok := joinNode.Type.(*ChunkedJoinType)
  357. if !ok {
  358. return true
  359. }
  360. // 同时这个Join指令的输入也必须全部来自Split指令的输出。
  361. // 由于上面判断了Split指令的输出目的地都相同,所以这里只要判断Join指令的输入数量是否与Split指令的输出数量相同即可
  362. if len(joinNode.InputStreams) != len(splitNode.OutputStreams) {
  363. return true
  364. }
  365. // 所有条件都满足,可以开始省略操作,将Join操作的目的地的输入流替换为Split操作的输入流:
  366. // F->Split->Join->T 变换为:F->T
  367. splitNode.InputStreams[0].NotTo(splitNode)
  368. for _, out := range joinNode.OutputStreams[0].Toes {
  369. splitNode.InputStreams[0].To(out.Node, out.SlotIndex)
  370. }
  371. // 并删除这两个指令
  372. ctx.DAG.RemoveNode(joinNode)
  373. ctx.DAG.RemoveNode(splitNode)
  374. changed = true
  375. return true
  376. })
  377. return changed
  378. }
  379. // 通过流的输入输出位置来确定指令的执行位置。
  380. // To系列的指令都会有固定的执行位置,这些位置会随着pin操作逐步扩散到整个DAG,
  381. // 所以理论上不会出现有指令的位置始终无法确定的情况。
  382. func (p *DefaultParser) pin(ctx *ParseContext) bool {
  383. changed := false
  384. ctx.DAG.Walk(func(node *Node) bool {
  385. var toEnv *dag.NodeEnv
  386. for _, out := range node.OutputStreams {
  387. for _, to := range out.Toes {
  388. if to.Node.Env.Type == dag.EnvUnknown {
  389. continue
  390. }
  391. if toEnv == nil {
  392. toEnv = &to.Node.Env
  393. } else if !toEnv.Equals(to.Node.Env) {
  394. toEnv = nil
  395. break
  396. }
  397. }
  398. }
  399. if toEnv != nil {
  400. if !node.Env.Equals(*toEnv) {
  401. changed = true
  402. }
  403. node.Env = *toEnv
  404. return true
  405. }
  406. // 否则根据输入流的始发地来固定
  407. var fromEnv *dag.NodeEnv
  408. for _, in := range node.InputStreams {
  409. if in.From.Node.Env.Type == dag.EnvUnknown {
  410. continue
  411. }
  412. if fromEnv == nil {
  413. fromEnv = &in.From.Node.Env
  414. } else if !fromEnv.Equals(in.From.Node.Env) {
  415. fromEnv = nil
  416. break
  417. }
  418. }
  419. if fromEnv != nil {
  420. if !node.Env.Equals(*fromEnv) {
  421. changed = true
  422. }
  423. node.Env = *fromEnv
  424. }
  425. return true
  426. })
  427. return changed
  428. }
  429. // 对于所有未使用的流,增加Drop指令
  430. func (p *DefaultParser) dropUnused(ctx *ParseContext) {
  431. ctx.DAG.Walk(func(node *Node) bool {
  432. for _, out := range node.OutputStreams {
  433. if len(out.Toes) == 0 {
  434. n := ctx.DAG.NewNode(&DropType{}, NodeProps{})
  435. n.Env = node.Env
  436. out.To(n, 0)
  437. }
  438. }
  439. return true
  440. })
  441. }
  442. // 为IPFS写入指令存储结果
  443. func (p *DefaultParser) storeIPFSWriteResult(ctx *ParseContext) {
  444. dag.WalkOnlyType[*IPFSWriteType](ctx.DAG, func(node *Node, typ *IPFSWriteType) bool {
  445. if typ.FileHashStoreKey == "" {
  446. return true
  447. }
  448. n := ctx.DAG.NewNode(&StoreType{
  449. StoreKey: typ.FileHashStoreKey,
  450. }, NodeProps{})
  451. n.Env.ToEnvExecutor()
  452. node.OutputValues[0].To(n, 0)
  453. return true
  454. })
  455. }
  456. // 生成Range指令。StreamRange可能超过文件总大小,但Range指令会在数据量不够时不报错而是正常返回
  457. func (p *DefaultParser) generateRange(ctx *ParseContext) {
  458. ctx.DAG.Walk(func(node *dag.Node[NodeProps, VarProps]) bool {
  459. if node.Props.To == nil {
  460. return true
  461. }
  462. toDataIdx := node.Props.To.GetDataIndex()
  463. toRng := node.Props.To.GetRange()
  464. if toDataIdx == -1 {
  465. n := ctx.DAG.NewNode(&RangeType{
  466. Range: Range{
  467. Offset: toRng.Offset - ctx.StreamRange.Offset,
  468. Length: toRng.Length,
  469. },
  470. }, NodeProps{})
  471. n.Env = node.InputStreams[0].From.Node.Env
  472. node.InputStreams[0].To(n, 0)
  473. node.InputStreams[0].NotTo(node)
  474. n.OutputStreams[0].To(node, 0)
  475. } else {
  476. stripSize := int64(p.EC.ChunkSize * p.EC.K)
  477. blkStartIdx := ctx.StreamRange.Offset / stripSize
  478. blkStart := blkStartIdx * int64(p.EC.ChunkSize)
  479. n := ctx.DAG.NewNode(&RangeType{
  480. Range: Range{
  481. Offset: toRng.Offset - blkStart,
  482. Length: toRng.Length,
  483. },
  484. }, NodeProps{})
  485. n.Env = node.InputStreams[0].From.Node.Env
  486. node.InputStreams[0].To(n, 0)
  487. node.InputStreams[0].NotTo(node)
  488. n.OutputStreams[0].To(node, 0)
  489. }
  490. return true
  491. })
  492. }
  493. // 生成Clone指令
  494. func (p *DefaultParser) generateClone(ctx *ParseContext) {
  495. ctx.DAG.Walk(func(node *dag.Node[NodeProps, VarProps]) bool {
  496. for _, out := range node.OutputStreams {
  497. if len(out.Toes) <= 1 {
  498. continue
  499. }
  500. n, t := dag.NewNode(ctx.DAG, &CloneStreamType{}, NodeProps{})
  501. n.Env = node.Env
  502. for _, to := range out.Toes {
  503. t.NewOutput(node).To(to.Node, to.SlotIndex)
  504. }
  505. out.Toes = nil
  506. out.To(n, 0)
  507. }
  508. for _, out := range node.OutputValues {
  509. if len(out.Toes) <= 1 {
  510. continue
  511. }
  512. n, t := dag.NewNode(ctx.DAG, &CloneVarType{}, NodeProps{})
  513. n.Env = node.Env
  514. for _, to := range out.Toes {
  515. t.NewOutput(node).To(to.Node, to.SlotIndex)
  516. }
  517. out.Toes = nil
  518. out.To(n, 0)
  519. }
  520. return true
  521. })
  522. }
  523. // 生成Send指令
  524. func (p *DefaultParser) generateSend(ctx *ParseContext) {
  525. ctx.DAG.Walk(func(node *dag.Node[NodeProps, VarProps]) bool {
  526. for _, out := range node.OutputStreams {
  527. to := out.Toes[0]
  528. if to.Node.Env.Equals(node.Env) {
  529. continue
  530. }
  531. switch to.Node.Env.Type {
  532. case dag.EnvExecutor:
  533. // // 如果是要送到Executor,则只能由Executor主动去拉取
  534. getNode := ctx.DAG.NewNode(&GetStreamType{}, NodeProps{})
  535. getNode.Env.ToEnvExecutor()
  536. // // 同时需要对此变量生成HoldUntil指令,避免Plan结束时Get指令还未到达
  537. holdNode := ctx.DAG.NewNode(&HoldUntilType{}, NodeProps{})
  538. holdNode.Env = node.Env
  539. // 将Get指令的信号送到Hold指令
  540. getNode.OutputValues[0].To(holdNode, 0)
  541. // 将Get指令的输出送到目的地
  542. getNode.OutputStreams[0].To(to.Node, to.SlotIndex)
  543. out.Toes = nil
  544. // 将源节点的输出送到Hold指令
  545. out.To(holdNode, 0)
  546. // 将Hold指令的输出送到Get指令
  547. holdNode.OutputStreams[0].To(getNode, 0)
  548. case dag.EnvWorker:
  549. // 如果是要送到Agent,则可以直接发送
  550. n := ctx.DAG.NewNode(&SendStreamType{}, NodeProps{})
  551. n.Env = node.Env
  552. n.OutputStreams[0].To(to.Node, to.SlotIndex)
  553. out.Toes = nil
  554. out.To(n, 0)
  555. }
  556. }
  557. for _, out := range node.OutputValues {
  558. to := out.Toes[0]
  559. if to.Node.Env.Equals(node.Env) {
  560. continue
  561. }
  562. switch to.Node.Env.Type {
  563. case dag.EnvExecutor:
  564. // // 如果是要送到Executor,则只能由Executor主动去拉取
  565. getNode := ctx.DAG.NewNode(&GetVaType{}, NodeProps{})
  566. getNode.Env.ToEnvExecutor()
  567. // // 同时需要对此变量生成HoldUntil指令,避免Plan结束时Get指令还未到达
  568. holdNode := ctx.DAG.NewNode(&HoldUntilType{}, NodeProps{})
  569. holdNode.Env = node.Env
  570. // 将Get指令的信号送到Hold指令
  571. getNode.OutputValues[0].To(holdNode, 0)
  572. // 将Get指令的输出送到目的地
  573. getNode.OutputValues[1].To(to.Node, to.SlotIndex)
  574. out.Toes = nil
  575. // 将源节点的输出送到Hold指令
  576. out.To(holdNode, 0)
  577. // 将Hold指令的输出送到Get指令
  578. holdNode.OutputValues[0].To(getNode, 0)
  579. case dag.EnvWorker:
  580. // 如果是要送到Agent,则可以直接发送
  581. n := ctx.DAG.NewNode(&SendVarType{}, NodeProps{})
  582. n.Env = node.Env
  583. n.OutputValues[0].To(to.Node, to.SlotIndex)
  584. out.Toes = nil
  585. out.To(n, 0)
  586. }
  587. }
  588. return true
  589. })
  590. }
  591. // 生成Plan
  592. func (p *DefaultParser) buildPlan(ctx *ParseContext, blder *builder.PlanBuilder) error {
  593. var retErr error
  594. ctx.DAG.Walk(func(node *dag.Node[NodeProps, VarProps]) bool {
  595. for _, out := range node.OutputStreams {
  596. if out.Props.Var != nil {
  597. continue
  598. }
  599. out.Props.Var = blder.NewStreamVar()
  600. }
  601. for _, in := range node.InputStreams {
  602. if in.Props.Var != nil {
  603. continue
  604. }
  605. in.Props.Var = blder.NewStreamVar()
  606. }
  607. for _, out := range node.OutputValues {
  608. if out.Props.Var != nil {
  609. continue
  610. }
  611. switch out.Props.ValueType {
  612. case StringValueVar:
  613. out.Props.Var = blder.NewStringVar()
  614. case SignalValueVar:
  615. out.Props.Var = blder.NewSignalVar()
  616. }
  617. }
  618. for _, in := range node.InputValues {
  619. if in.Props.Var != nil {
  620. continue
  621. }
  622. switch in.Props.ValueType {
  623. case StringValueVar:
  624. in.Props.Var = blder.NewStringVar()
  625. case SignalValueVar:
  626. in.Props.Var = blder.NewSignalVar()
  627. }
  628. }
  629. if err := node.Type.GenerateOp(node, blder); err != nil {
  630. retErr = err
  631. return false
  632. }
  633. return true
  634. })
  635. return retErr
  636. }

本项目旨在将云际存储公共基础设施化,使个人及企业可低门槛使用高效的云际存储服务(安装开箱即用云际存储客户端即可,无需关注其他组件的部署),同时支持用户灵活便捷定制云际存储的功能细节。