You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

filereader.py 3.0 kB

4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. # Copyright 2019-2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """
  16. This module is to read data from MindRecord.
  17. """
  18. from .shardreader import ShardReader
  19. from .shardheader import ShardHeader
  20. from .shardutils import populate_data
  21. from .shardutils import check_parameter
  22. from .common.exceptions import ParamTypeError
  23. __all__ = ['FileReader']
  24. class FileReader:
  25. """
  26. Class to read MindRecord files.
  27. Note:
  28. If `file_name` is a filename string, it tries to load all MindRecord files generated \
  29. in a conversion, and throws an exceptions if a MindRecord file is missing.
  30. If `file_name` is a filename list, only the MindRecord files in the list are loaded.
  31. Args:
  32. file_name (str, list[str]): One of MindRecord file or a file list.
  33. num_consumer(int, optional): Number of reader workers which load data. Default: 4.
  34. It should not be smaller than 1 or larger than the number of processor cores.
  35. columns (list[str], optional): A list of fields where corresponding data would be read. Default: None.
  36. operator(int, optional): Reserved parameter for operators. Default: None.
  37. Raises:
  38. ParamValueError: If `file_name`, `num_consumer` or `columns` is invalid.
  39. """
  40. @check_parameter
  41. def __init__(self, file_name, num_consumer=4, columns=None, operator=None):
  42. if columns:
  43. if isinstance(columns, list):
  44. self._columns = columns
  45. else:
  46. raise ParamTypeError('columns', 'list')
  47. else:
  48. self._columns = None
  49. self._reader = ShardReader()
  50. self._reader.open(file_name, num_consumer, columns, operator)
  51. self._header = ShardHeader(self._reader.get_header())
  52. self._reader.launch()
  53. def get_next(self):
  54. """
  55. Yield a batch of data according to columns at a time.
  56. Yields:
  57. Dict: a batch whose keys are the same as columns.
  58. Raises:
  59. MRMUnsupportedSchemaError: If schema is invalid.
  60. """
  61. iterator = self._reader.get_next()
  62. while iterator:
  63. for blob, raw in iterator:
  64. yield populate_data(raw, blob, self._columns, self._header.blob_fields, self._header.schema)
  65. iterator = self._reader.get_next()
  66. def close(self):
  67. """Stop reader worker and close File."""
  68. self._reader.close()