Browse Source

!469 add comment for export lite model

Merge pull request !469 from yangjie159/add-comment-of-export-lite-model
tags/v0.2.0-alpha
mindspore-ci-bot Gitee 6 years ago
parent
commit
b554a86832
2 changed files with 3 additions and 3 deletions
  1. +2
    -2
      mindspore/context.py
  2. +1
    -1
      mindspore/train/serialization.py

+ 2
- 2
mindspore/context.py View File

@@ -487,8 +487,8 @@ def set_context(**kwargs):
enable_loop_sink (bool): Whether to enable loop sink. Default: False.
enable_task_sink (bool): Whether to enable task sink. Default: True.
enable_mem_reuse (bool): Whether to enable memory reuse. Default: True.
save_ms_model (bool): Whether to save model converted by graph. Default: False.
save_ms_model_path (str): Path to save converted model. Default: "."
save_ms_model (bool): Whether to save lite model converted by graph. Default: False.
save_ms_model_path (str): Path to save converted lite model. Default: "."
enable_gpu_summary (bool): Whether to enable gpu summary. Default: True.
save_graphs_path (str): Path to save graphs. Default: "."
enable_auto_mixed_precision (bool): Whether to enable auto mixed precision. Default: True.


+ 1
- 1
mindspore/train/serialization.py View File

@@ -426,7 +426,7 @@ def export(net, *inputs, file_name, file_format='GEIR'):
- GEIR: Graph Engine Intermidiate Representation. An intermidiate representation format of
Ascend model.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
- LITE: Huawei model format for mobile.
- LITE: Huawei model format for mobile. A lite model only for the MindSpore Lite
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)


Loading…
Cancel
Save