You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_graphdata_distributed.py 5.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. import random
  16. import time
  17. from multiprocessing import Process
  18. import numpy as np
  19. import mindspore.dataset as ds
  20. from mindspore import log as logger
  21. DATASET_FILE = "../data/mindrecord/testGraphData/testdata"
  22. def graphdata_startserver():
  23. """
  24. start graphdata server
  25. """
  26. logger.info('test start server.\n')
  27. ds.GraphData(DATASET_FILE, 1, 'server')
  28. class RandomBatchedSampler(ds.Sampler):
  29. # RandomBatchedSampler generate random sequence without replacement in a batched manner
  30. def __init__(self, index_range, num_edges_per_sample):
  31. super().__init__()
  32. self.index_range = index_range
  33. self.num_edges_per_sample = num_edges_per_sample
  34. def __iter__(self):
  35. indices = [i+1 for i in range(self.index_range)]
  36. # Reset random seed here if necessary
  37. # random.seed(0)
  38. random.shuffle(indices)
  39. for i in range(0, self.index_range, self.num_edges_per_sample):
  40. # Drop reminder
  41. if i + self.num_edges_per_sample <= self.index_range:
  42. yield indices[i: i + self.num_edges_per_sample]
  43. class GNNGraphDataset():
  44. def __init__(self, g, batch_num):
  45. self.g = g
  46. self.batch_num = batch_num
  47. def __len__(self):
  48. # Total sample size of GNN dataset
  49. # In this case, the size should be total_num_edges/num_edges_per_sample
  50. return self.g.graph_info()['edge_num'][0] // self.batch_num
  51. def __getitem__(self, index):
  52. # index will be a list of indices yielded from RandomBatchedSampler
  53. # Fetch edges/nodes/samples/features based on indices
  54. nodes = self.g.get_nodes_from_edges(index.astype(np.int32))
  55. nodes = nodes[:, 0]
  56. neg_nodes = self.g.get_neg_sampled_neighbors(
  57. node_list=nodes, neg_neighbor_num=3, neg_neighbor_type=1)
  58. nodes_neighbors = self.g.get_sampled_neighbors(node_list=nodes, neighbor_nums=[
  59. 2, 2], neighbor_types=[2, 1])
  60. neg_nodes_neighbors = self.g.get_sampled_neighbors(
  61. node_list=neg_nodes[:, 1:].reshape(-1), neighbor_nums=[2, 2], neighbor_types=[2, 2])
  62. nodes_neighbors_features = self.g.get_node_feature(
  63. node_list=nodes_neighbors, feature_types=[2, 3])
  64. neg_neighbors_features = self.g.get_node_feature(
  65. node_list=neg_nodes_neighbors, feature_types=[2, 3])
  66. return nodes_neighbors, neg_nodes_neighbors, nodes_neighbors_features[0], neg_neighbors_features[1]
  67. def test_graphdata_distributed():
  68. """
  69. Test distributed
  70. """
  71. logger.info('test distributed.\n')
  72. p1 = Process(target=graphdata_startserver)
  73. p1.start()
  74. time.sleep(2)
  75. g = ds.GraphData(DATASET_FILE, 1, 'client')
  76. nodes = g.get_all_nodes(1)
  77. assert nodes.tolist() == [101, 102, 103, 104, 105, 106, 107, 108, 109, 110]
  78. row_tensor = g.get_node_feature(nodes.tolist(), [1, 2, 3])
  79. assert row_tensor[0].tolist() == [[0, 1, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 1, 1, 0], [0, 0, 0, 0, 0],
  80. [1, 1, 0, 1, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0], [0, 0, 0, 1, 1],
  81. [0, 1, 1, 0, 0], [0, 1, 0, 1, 0]]
  82. assert row_tensor[2].tolist() == [1, 2, 3, 1, 4, 3, 5, 3, 5, 4]
  83. edges = g.get_all_edges(0)
  84. assert edges.tolist() == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
  85. 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]
  86. features = g.get_edge_feature(edges, [1, 2])
  87. assert features[0].tolist() == [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0,
  88. 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0]
  89. batch_num = 2
  90. edge_num = g.graph_info()['edge_num'][0]
  91. out_column_names = ["neighbors", "neg_neighbors", "neighbors_features", "neg_neighbors_features"]
  92. dataset = ds.GeneratorDataset(source=GNNGraphDataset(g, batch_num), column_names=out_column_names,
  93. sampler=RandomBatchedSampler(edge_num, batch_num), num_parallel_workers=4,
  94. python_multiprocessing=False)
  95. dataset = dataset.repeat(2)
  96. itr = dataset.create_dict_iterator()
  97. i = 0
  98. for data in itr:
  99. assert data['neighbors'].shape == (2, 7)
  100. assert data['neg_neighbors'].shape == (6, 7)
  101. assert data['neighbors_features'].shape == (2, 7)
  102. assert data['neg_neighbors_features'].shape == (6, 7)
  103. i += 1
  104. assert i == 40
  105. if __name__ == '__main__':
  106. test_graphdata_distributed()