Browse Source

!6018 Set the number of epochs in mode.train() non-sink Mode

Merge pull request !6018 from h.farahat/ms_nonsink_epochs
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
2a799fe90e
40 changed files with 50 additions and 49 deletions
  1. +1
    -1
      mindspore/nn/probability/infer/variational/svi.py
  2. +4
    -3
      mindspore/train/dataset_helper.py
  3. +2
    -1
      mindspore/train/model.py
  4. +1
    -1
      model_zoo/official/cv/faster_rcnn/eval.py
  5. +1
    -1
      model_zoo/official/cv/maskrcnn/eval.py
  6. +1
    -1
      model_zoo/official/cv/resnext50/eval.py
  7. +1
    -1
      model_zoo/official/cv/ssd/eval.py
  8. +1
    -1
      model_zoo/official/cv/vgg16/eval.py
  9. +1
    -1
      model_zoo/official/cv/yolov3_darknet53/eval.py
  10. +1
    -1
      model_zoo/official/cv/yolov3_darknet53/train.py
  11. +1
    -1
      model_zoo/official/cv/yolov3_darknet53_quant/eval.py
  12. +1
    -1
      model_zoo/official/cv/yolov3_darknet53_quant/train.py
  13. +1
    -1
      model_zoo/official/cv/yolov3_resnet18/eval.py
  14. +1
    -1
      model_zoo/official/nlp/bert/run_classifier.py
  15. +1
    -1
      model_zoo/official/nlp/bert/run_ner.py
  16. +1
    -1
      model_zoo/official/nlp/bert/run_squad.py
  17. +2
    -2
      model_zoo/official/nlp/mass/src/transformer/infer_mass.py
  18. +1
    -1
      model_zoo/official/nlp/tinybert/run_task_distill.py
  19. +1
    -1
      model_zoo/official/nlp/tinybert/src/utils.py
  20. +1
    -1
      model_zoo/official/nlp/transformer/eval.py
  21. +1
    -1
      model_zoo/utils/cv_to_mindrecord/Caltech-UCSD-Birds-200-2011/create_dataset.py
  22. +1
    -1
      model_zoo/utils/graph_to_mindrecord/reader.py
  23. +1
    -1
      model_zoo/utils/nlp_to_mindrecord/aclImdb/create_dataset.py
  24. +1
    -1
      model_zoo/utils/nlp_to_mindrecord/aclImdb_preprocess/create_dataset.py
  25. +1
    -1
      tests/dataset_mock.py
  26. +2
    -2
      tests/perf_test/mindrecord/imagenet/perf_read_imagenet.py
  27. +1
    -1
      tests/st/ops/ascend/test_tdt_data_ms.py
  28. +2
    -2
      tests/st/probability/bnn_layers/test_bnn_layer.py
  29. +1
    -1
      tests/st/probability/dpn/test_gpu_svi_cvae.py
  30. +1
    -1
      tests/st/probability/dpn/test_gpu_svi_vae.py
  31. +1
    -1
      tests/st/probability/toolbox/test_uncertainty.py
  32. +2
    -2
      tests/st/probability/transforms/test_transform_bnn_layer.py
  33. +2
    -2
      tests/st/probability/transforms/test_transform_bnn_model.py
  34. +1
    -1
      tests/st/pynative/loss_scale/test_loss_scale.py
  35. +1
    -2
      tests/st/pynative/test_pynative_resnet50.py
  36. +2
    -2
      tests/ut/python/dataset/test_random_posterize.py
  37. +1
    -1
      tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py
  38. +1
    -1
      tests/ut/python/parallel/test_auto_parallel_resnet.py
  39. +1
    -1
      tests/ut/python/parallel/test_bias_add.py
  40. +1
    -1
      tests/ut/python/parallel/test_gather_v2_primitive.py

+ 1
- 1
mindspore/nn/probability/infer/variational/svi.py View File

@@ -63,7 +63,7 @@ class SVI:
for _ in range(1, epochs+1):
train_loss = 0
dataset_size = 0
for data in train_dataset.create_dict_iterator():
for data in train_dataset.create_dict_iterator(num_epochs=1):
x = Tensor(data['image'], dtype=mstype.float32)
y = Tensor(data['label'], dtype=mstype.int32)
dataset_size += len(x)


+ 4
- 3
mindspore/train/dataset_helper.py View File

@@ -145,7 +145,7 @@ class DatasetHelper:
self.iter = iterclass(dataset, sink_size, epoch_num)
else:
iterclass = _DatasetIterNormal
self.iter = iterclass(dataset)
self.iter = iterclass(dataset, epoch_num=epoch_num)

def __iter__(self):
return self.iter.__iter__()
@@ -290,11 +290,12 @@ class _DatasetIterPSLite(_DatasetIter):

class _DatasetIterNormal:
"""Iter for normal(non sink) mode, feed the data from host."""
def __init__(self, dataset):

def __init__(self, dataset, epoch_num=-1):
self.dataset = dataset
self.device_num = _get_device_num()
self.global_rank = _get_global_rank()
self.iter = self.dataset.create_tuple_iterator()
self.iter = self.dataset.create_tuple_iterator(num_epochs=epoch_num)

def __iter__(self):
return self


+ 2
- 1
mindspore/train/model.py View File

@@ -460,7 +460,8 @@ class Model:
is_train=True,
phase='train',
dataset=train_dataset,
dataset_sink_mode=False)
dataset_sink_mode=False,
epoch_num=epoch)
cb_params.cur_step_num = 0
run_context = RunContext(cb_params)
list_callback.begin(run_context)


+ 1
- 1
model_zoo/official/cv/faster_rcnn/eval.py View File

@@ -57,7 +57,7 @@ def FasterRcnn_eval(dataset_path, ckpt_path, ann_file):
print("total images num: ", total)
print("Processing, please wait a moment.")
max_num = 128
for data in ds.create_dict_iterator():
for data in ds.create_dict_iterator(num_epochs=1):
eval_iter = eval_iter + 1

img_data = data['image']


+ 1
- 1
model_zoo/official/cv/maskrcnn/eval.py View File

@@ -57,7 +57,7 @@ def MaskRcnn_eval(dataset_path, ckpt_path, ann_file):
print("total images num: ", total)
print("Processing, please wait a moment.")
max_num = 128
for data in ds.create_dict_iterator(output_numpy=True):
for data in ds.create_dict_iterator(output_numpy=True, num_epochs=1):
eval_iter = eval_iter + 1

img_data = data['image']


+ 1
- 1
model_zoo/official/cv/resnext50/eval.py View File

@@ -200,7 +200,7 @@ def test(cloud_args=None):
per_batch_size=args.per_batch_size,
max_epoch=1, rank=args.rank, group_size=args.group_size,
mode='eval')
eval_dataloader = de_dataset.create_tuple_iterator(output_numpy=True)
eval_dataloader = de_dataset.create_tuple_iterator(output_numpy=True, num_epochs=1)
network = get_network(args.backbone, num_classes=args.num_classes, platform=args.platform)
if network is None:
raise NotImplementedError('not implement {}'.format(args.backbone))


+ 1
- 1
model_zoo/official/cv/ssd/eval.py View File

@@ -44,7 +44,7 @@ def ssd_eval(dataset_path, ckpt_path):
print("\n========================================\n")
print("total images num: ", total)
print("Processing, please wait a moment.")
for data in ds.create_dict_iterator(output_numpy=True):
for data in ds.create_dict_iterator(output_numpy=True, num_epochs=1):
img_id = data['img_id']
img_np = data['image']
image_shape = data['image_shape']


+ 1
- 1
model_zoo/official/cv/vgg16/eval.py View File

@@ -159,7 +159,7 @@ def test(cloud_args=None):

for model in args.models:
dataset = classification_dataset(args.data_path, args.image_size, args.per_batch_size, mode='eval')
eval_dataloader = dataset.create_tuple_iterator(output_numpy=True)
eval_dataloader = dataset.create_tuple_iterator(output_numpy=True, num_epochs=1)
network = vgg16(args.num_classes, args, phase="test")

# pre_trained


+ 1
- 1
model_zoo/official/cv/yolov3_darknet53/eval.py View File

@@ -299,7 +299,7 @@ def test():

input_shape = Tensor(tuple(config.test_img_shape), ms.float32)
args.logger.info('Start inference....')
for i, data in enumerate(ds.create_dict_iterator()):
for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):
image = data["image"]

image_shape = data["image_shape"]


+ 1
- 1
model_zoo/official/cv/yolov3_darknet53/train.py View File

@@ -239,7 +239,7 @@ def train():

old_progress = -1
t_end = time.time()
data_loader = ds.create_dict_iterator(output_numpy=True)
data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=1)

for i, data in enumerate(data_loader):
images = data["image"]


+ 1
- 1
model_zoo/official/cv/yolov3_darknet53_quant/eval.py View File

@@ -305,7 +305,7 @@ def test():

input_shape = Tensor(tuple(config.test_img_shape), ms.float32)
args.logger.info('Start inference....')
for i, data in enumerate(ds.create_dict_iterator()):
for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):
image = data["image"]

image_shape = data["image_shape"]


+ 1
- 1
model_zoo/official/cv/yolov3_darknet53_quant/train.py View File

@@ -224,7 +224,7 @@ def train():

old_progress = -1
t_end = time.time()
data_loader = ds.create_dict_iterator(output_numpy=True)
data_loader = ds.create_dict_iterator(output_numpy=True, num_epochs=1)

shape_record = ShapeRecord()
for i, data in enumerate(data_loader):


+ 1
- 1
model_zoo/official/cv/yolov3_resnet18/eval.py View File

@@ -44,7 +44,7 @@ def yolo_eval(dataset_path, ckpt_path):
print("\n========================================\n")
print("total images num: ", total)
print("Processing, please wait a moment.")
for data in ds.create_dict_iterator(output_numpy=True):
for data in ds.create_dict_iterator(output_numpy=True, num_epochs=1):
img_np = data['image']
image_shape = data['image_shape']
annotation = data['annotation']


+ 1
- 1
model_zoo/official/nlp/bert/run_classifier.py View File

@@ -119,7 +119,7 @@ def do_eval(dataset=None, network=None, num_class=2, assessment_method="accuracy
raise ValueError("Assessment method not supported, support: [accuracy, f1, mcc, spearman_correlation]")

columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
for data in dataset.create_dict_iterator():
for data in dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])


+ 1
- 1
model_zoo/official/nlp/bert/run_ner.py View File

@@ -128,7 +128,7 @@ def do_eval(dataset=None, network=None, use_crf="", num_class=2, assessment_meth
raise ValueError("Assessment method not supported, support: [accuracy, f1, mcc, spearman_correlation]")

columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
for data in dataset.create_dict_iterator():
for data in dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])


+ 1
- 1
model_zoo/official/nlp/bert/run_squad.py View File

@@ -109,7 +109,7 @@ def do_eval(dataset=None, vocab_file="", eval_json="", load_checkpoint_path="",
output = []
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"])
columns_list = ["input_ids", "input_mask", "segment_ids", "unique_ids"]
for data in dataset.create_dict_iterator():
for data in dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])


+ 2
- 2
model_zoo/official/nlp/mass/src/transformer/infer_mass.py View File

@@ -107,7 +107,7 @@ def transformer_infer(config, dataset):
probs = []
source_sentences = []
target_sentences = []
for batch in dataset.create_dict_iterator(output_numpy=True):
for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
source_sentences.append(batch["source_eos_ids"])
target_sentences.append(batch["target_eos_ids"])

@@ -232,7 +232,7 @@ def transformer_infer_ppl(config, dataset):
lengths = []
source_sentences = []
target_sentences = []
for batch in dataset.create_dict_iterator(output_numpy=True):
for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
source_sentences.append(batch["source_eos_ids"])
target_sentences.append(batch["target_eos_ids"])



+ 1
- 1
model_zoo/official/nlp/tinybert/run_task_distill.py View File

@@ -278,7 +278,7 @@ def do_eval_standalone():

callback = Accuracy()
columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
for data in eval_dataset.create_dict_iterator():
for data in eval_dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])


+ 1
- 1
model_zoo/official/nlp/tinybert/src/utils.py View File

@@ -93,7 +93,7 @@ class EvalCallBack(Callback):
if cb_params.cur_step_num % 100 == 0:
callback = Accuracy()
columns_list = ["input_ids", "input_mask", "segment_ids", "label_ids"]
for data in self.dataset.create_dict_iterator():
for data in self.dataset.create_dict_iterator(num_epochs=1):
input_data = []
for i in columns_list:
input_data.append(data[i])


+ 1
- 1
model_zoo/official/nlp/transformer/eval.py View File

@@ -113,7 +113,7 @@ def run_transformer_eval():
predictions = []
source_sents = []
target_sents = []
for batch in dataset.create_dict_iterator(output_numpy=True):
for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
source_sents.append(batch["source_eos_ids"])
target_sents.append(batch["target_eos_ids"])
source_ids = Tensor(batch["source_eos_ids"], mstype.int32)


+ 1
- 1
model_zoo/utils/cv_to_mindrecord/Caltech-UCSD-Birds-200-2011/create_dataset.py View File

@@ -22,7 +22,7 @@ def create_dataset(data_file):
num_parallel_workers=num_readers,
shuffle=True)
index = 0
for item in data_set.create_dict_iterator(output_numpy=True):
for item in data_set.create_dict_iterator(output_numpy=True, num_epochs=1):
print("example {}: {}".format(index, item))
index += 1
if index % 1000 == 0:


+ 1
- 1
model_zoo/utils/graph_to_mindrecord/reader.py View File

@@ -28,7 +28,7 @@ args = parser.parse_args()

data_set = ds.MindDataset(args.path)
num_iter = 0
for item in data_set.create_dict_iterator(output_numpy=True):
for item in data_set.create_dict_iterator(output_numpy=True, num_epochs=1):
print(item)
num_iter += 1
print("Total items # is {}".format(num_iter))

+ 1
- 1
model_zoo/utils/nlp_to_mindrecord/aclImdb/create_dataset.py View File

@@ -22,7 +22,7 @@ def create_dataset(data_file):
num_parallel_workers=num_readers,
shuffle=True)
index = 0
for item in data_set.create_dict_iterator(output_numpy=True):
for item in data_set.create_dict_iterator(output_numpy=True, num_epochs=1):
print("example {}: {}".format(index, item))
index += 1
if index % 1000 == 0:


+ 1
- 1
model_zoo/utils/nlp_to_mindrecord/aclImdb_preprocess/create_dataset.py View File

@@ -22,7 +22,7 @@ def create_dataset(data_file):
num_parallel_workers=num_readers,
shuffle=True)
index = 0
for item in data_set.create_dict_iterator(output_numpy=True):
for item in data_set.create_dict_iterator(output_numpy=True, num_epochs=1):
print("example {}: {}".format(index, item))
index += 1
if index % 1000 == 0:


+ 1
- 1
tests/dataset_mock.py View File

@@ -55,7 +55,7 @@ class MindData:
self.send_epoch_end = send_epoch_end
return self

def create_tuple_iterator(self):
def create_tuple_iterator(self, num_epochs=-1):
return self.__iter__()

def send(self, num_epochs=-1):


+ 2
- 2
tests/perf_test/mindrecord/imagenet/perf_read_imagenet.py View File

@@ -48,7 +48,7 @@ def use_minddataset(mindrecord):
columns_list=columns_list,
num_parallel_workers=4)
num_iter = 0
for _ in data_set.create_dict_iterator():
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
print_log(num_iter)
end = time.time()
@@ -64,7 +64,7 @@ def use_tfrecorddataset(tfrecord):
shuffle=ds.Shuffle.GLOBAL)
data_set = data_set.shuffle(10000)
num_iter = 0
for _ in data_set.create_dict_iterator():
for _ in data_set.create_dict_iterator(num_epochs=1):
num_iter += 1
print_log(num_iter)
end = time.time()


+ 1
- 1
tests/st/ops/ascend/test_tdt_data_ms.py View File

@@ -96,7 +96,7 @@ if __name__ == '__main__':
dataset_types, dataset_shapes, (), 'dataset')
ds1.send()

for data in data_set.create_tuple_iterator(output_numpy=True):
for data in data_set.create_tuple_iterator(output_numpy=True, num_epochs=1):
output = net()
print(data[0].any())
print(


+ 2
- 2
tests/st/probability/bnn_layers/test_bnn_layer.py View File

@@ -92,7 +92,7 @@ class BNNLeNet5(nn.Cell):
def train_model(train_net, net, dataset):
accs = []
loss_sum = 0
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
loss = train_net(train_x, label)
@@ -109,7 +109,7 @@ def train_model(train_net, net, dataset):

def validate_model(net, dataset):
accs = []
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
output = net(train_x)


+ 1
- 1
tests/st/probability/dpn/test_gpu_svi_cvae.py View File

@@ -122,7 +122,7 @@ def test_svi_cvae():
sample_label = Tensor([i for i in range(0, 8)] * 8, dtype=mstype.int32)
generated_sample = cvae.generate_sample(sample_label, 64, IMAGE_SHAPE)
# test function: reconstruct_sample
for sample in ds_train.create_dict_iterator(output_numpy=True):
for sample in ds_train.create_dict_iterator(output_numpy=True, num_epochs=1):
sample_x = Tensor(sample['image'], dtype=mstype.float32)
sample_y = Tensor(sample['label'], dtype=mstype.int32)
reconstructed_sample = cvae.reconstruct_sample(sample_x, sample_y)


+ 1
- 1
tests/st/probability/dpn/test_gpu_svi_vae.py View File

@@ -110,7 +110,7 @@ def test_svi_vae():
# test function: generate_sample
generated_sample = vae.generate_sample(64, IMAGE_SHAPE)
# test function: reconstruct_sample
for sample in ds_train.create_dict_iterator(output_numpy=True):
for sample in ds_train.create_dict_iterator(output_numpy=True, num_epochs=1):
sample_x = Tensor(sample['image'], dtype=mstype.float32)
reconstructed_sample = vae.reconstruct_sample(sample_x)
print('The loss of the trained network is ', trained_loss)


+ 1
- 1
tests/st/probability/toolbox/test_uncertainty.py View File

@@ -129,7 +129,7 @@ if __name__ == '__main__':
epi_uncer_model_path=None,
ale_uncer_model_path=None,
save_model=False)
for eval_data in ds_eval.create_dict_iterator(output_numpy=True):
for eval_data in ds_eval.create_dict_iterator(output_numpy=True, num_epochs=1):
eval_data = Tensor(eval_data['image'], mstype.float32)
epistemic_uncertainty = evaluation.eval_epistemic_uncertainty(eval_data)
aleatoric_uncertainty = evaluation.eval_aleatoric_uncertainty(eval_data)

+ 2
- 2
tests/st/probability/transforms/test_transform_bnn_layer.py View File

@@ -93,7 +93,7 @@ class LeNet5(nn.Cell):
def train_model(train_net, net, dataset):
accs = []
loss_sum = 0
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
loss = train_net(train_x, label)
@@ -110,7 +110,7 @@ def train_model(train_net, net, dataset):

def validate_model(net, dataset):
accs = []
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
output = net(train_x)


+ 2
- 2
tests/st/probability/transforms/test_transform_bnn_model.py View File

@@ -92,7 +92,7 @@ class LeNet5(nn.Cell):
def train_model(train_net, net, dataset):
accs = []
loss_sum = 0
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
loss = train_net(train_x, label)
@@ -109,7 +109,7 @@ def train_model(train_net, net, dataset):

def validate_model(net, dataset):
accs = []
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True)):
for _, data in enumerate(dataset.create_dict_iterator(output_numpy=True, num_epochs=1)):
train_x = Tensor(data['image'].astype(np.float32))
label = Tensor(data['label'].astype(np.int32))
output = net(train_x)


+ 1
- 1
tests/st/pynative/loss_scale/test_loss_scale.py View File

@@ -60,7 +60,7 @@ class MindData:
def output_shapes(self):
return self._output_shapes

def create_tuple_iterator(self):
def create_tuple_iterator(self, num_epochs=-1):
return self

@property


+ 1
- 2
tests/st/pynative/test_pynative_resnet50.py View File

@@ -418,7 +418,7 @@ def test_pynative_resnet50():
max_step = 21
exceed_num = 0
data_set = create_dataset(repeat_num=1, training=True, batch_size=batch_size)
for element in data_set.create_dict_iterator():
for element in data_set.create_dict_iterator(num_epochs=1):
step = step + 1
if step > max_step:
break
@@ -434,4 +434,3 @@ def test_pynative_resnet50():
if step > 1 and cost_time > 0.25:
exceed_num = exceed_num + 1
assert exceed_num < 20

+ 2
- 2
tests/ut/python/dataset/test_random_posterize.py View File

@@ -138,8 +138,8 @@ def test_random_posterize_default_c_md5(plot=False, run_golden=True):

image_posterize = []
image_original = []
for item1, item2 in zip(data1.create_dict_iterator(output_numpy=True),
data2.create_dict_iterator(output_numpy=True)):
for item1, item2 in zip(data1.create_dict_iterator(output_numpy=True, num_epochs=1),
data2.create_dict_iterator(output_numpy=True, num_epochs=1)):
image1 = item1["image"]
image2 = item2["image"]
image_posterize.append(image1)


+ 1
- 1
tests/ut/python/parallel/test_auto_parallel_double_subgraphs.py View File

@@ -146,7 +146,7 @@ class DatasetLenet():
def get_repeat_count(self):
return 1

def create_tuple_iterator(self):
def create_tuple_iterator(self, num_epochs=-1):
return self

def test_double_subgraphs_train():


+ 1
- 1
tests/ut/python/parallel/test_auto_parallel_resnet.py View File

@@ -275,7 +275,7 @@ class DatasetLenet():
def get_repeat_count(self):
return 1

def create_tuple_iterator(self):
def create_tuple_iterator(self, num_epochs=-1):
return self




+ 1
- 1
tests/ut/python/parallel/test_bias_add.py View File

@@ -61,7 +61,7 @@ class DatasetLenet():
def get_repeat_count(self):
return 1

def create_tuple_iterator(self):
def create_tuple_iterator(self, num_epochs=-1):
return self




+ 1
- 1
tests/ut/python/parallel/test_gather_v2_primitive.py View File

@@ -59,7 +59,7 @@ class Dataset():
def get_repeat_count(self):
return 1

def create_tuple_iterator(self):
def create_tuple_iterator(self, num_epochs=-1):
return self




Loading…
Cancel
Save