# Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing) enable_modelarts: False # Url for modelarts data_url: "" train_url: "" checkpoint_url: "" # Path for local data_path: "/cache/data" output_path: "/cache/train" load_path: "/cache/checkpoint_path/" device_target: 'Ascend' enable_profiling: False # ============================================================================== # Training options dataset: 'SUBJ' pre_trained: False num_classes: 2 batch_size: 64 epoch_size: 4 weight_decay: 3e-5 keep_checkpoint_max: 1 checkpoint_path: './checkpoint/' checkpoint_file_path: 'train_textcnn-4_149.ckpt' word_len: 51 vec_length: 40 base_lr: 1e-3 # Export options device_id: 0 ckpt_file: "" file_name: "" file_format: "" --- # Help description for each configuration enable_modelarts: 'Whether training on modelarts, default: False' data_url: 'Dataset url for obs' train_url: 'Training output url for obs' checkpoint_url: 'The location of checkpoint for obs' data_path: 'Dataset path for local' output_path: 'Training output path for local' load_path: 'The location of checkpoint for obs' device_target: 'Target device type, available: [Ascend, GPU, CPU]' enable_profiling: 'Whether enable profiling while training, default: False' dataset: "Dataset to be trained and evaluated, choice: ['MR, SUBJ, SST2']" train_epochs: "The number of epochs used to train." pre_trained: 'If need load pre_trained checkpoint, default: False' num_classes: 'Class for dataset' batch_size: "Batch size for training and evaluation" epoch_size: "Total training epochs." weight_decay: "Weight decay." keep_checkpoint_max: "keep the last keep_checkpoint_max checkpoint" num_factors: "The Embedding size of MF model." checkpoint_path: "The location of the checkpoint file." eval_file_name: "Eval output file." checkpoint_file_path: "The location of the checkpoint file."