diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 42b695ba8..b5e1c8627 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -72,6 +72,7 @@ const ( ModelArtsStopping ModelArtsJobStatus = "STOPPING" //停止中 ModelArtsStopped ModelArtsJobStatus = "STOPPED" //停止 ModelArtsUnavailable ModelArtsJobStatus = "UNAVAILABLE" //故障 + ModelArtsDeleting ModelArtsJobStatus = "DELETING" //删除中 ModelArtsDeleted ModelArtsJobStatus = "DELETED" //已删除 ModelArtsResizing ModelArtsJobStatus = "RESIZING" //规格变更中 ModelArtsResizFailed ModelArtsJobStatus = "RESIZE_FAILED" //规格变更失败 @@ -1085,6 +1086,7 @@ type DatasetDownload struct { DatasetName string `json:"dataset_name"` DatasetDownloadLink string `json:"dataset_download_link"` RepositoryLink string `json:"repository_link"` + IsDelete bool `json:"is_delete"` } type DataSource struct { @@ -1920,9 +1922,9 @@ func GetCloudbrainCountByUserID(userID int64, jobType string) (int, error) { func GetCloudbrainRunCountByRepoID(repoID int64) (int, error) { count, err := x.In("status", JobWaiting, JobRunning, ModelArtsCreateQueue, ModelArtsCreating, ModelArtsStarting, - ModelArtsReadyToStart, ModelArtsResizing, ModelArtsStartQueuing, ModelArtsRunning, ModelArtsRestarting, ModelArtsTrainJobInit, + ModelArtsReadyToStart, ModelArtsResizing, ModelArtsStartQueuing, ModelArtsRunning, ModelArtsDeleting, ModelArtsRestarting, ModelArtsTrainJobInit, ModelArtsTrainJobImageCreating, ModelArtsTrainJobSubmitTrying, ModelArtsTrainJobWaiting, ModelArtsTrainJobRunning, ModelArtsStopping, ModelArtsResizing, - ModelArtsTrainJobScaling, ModelArtsTrainJobCheckInit, ModelArtsTrainJobCheckRunning, ModelArtsTrainJobCheckRunningCompleted).And("repo_id = ?", repoID).Count(new(Cloudbrain)) + ModelArtsTrainJobScaling, ModelArtsTrainJobCheckInit, ModelArtsTrainJobCheckRunning, ModelArtsTrainJobKilling, ModelArtsTrainJobCheckRunningCompleted).And("repo_id = ?", repoID).Count(new(Cloudbrain)) return int(count), err } diff --git a/models/dataset.go b/models/dataset.go index e91adb7d2..22a20e328 100755 --- a/models/dataset.go +++ b/models/dataset.go @@ -121,8 +121,10 @@ func (datasets DatasetList) loadAttachmentAttributes(opts *SearchDatasetOptions) for i := range datasets { if attachment.DatasetID == datasets[i].ID { - if opts.StarByMe { + if !attachment.IsPrivate{ + datasets[i].Attachments = append(datasets[i].Attachments, attachment) + }else{ permission, ok := permissionMap[datasets[i].ID] if !ok { @@ -136,7 +138,7 @@ func (datasets DatasetList) loadAttachmentAttributes(opts *SearchDatasetOptions) } if !permission { isCollaborator, _ := datasets[i].Repo.IsCollaborator(opts.User.ID) - if isCollaborator { + if isCollaborator ||datasets[i].Repo.IsOwnedBy(opts.User.ID){ log.Info("Collaborator user may visit the attach.") permission = true } @@ -147,11 +149,7 @@ func (datasets DatasetList) loadAttachmentAttributes(opts *SearchDatasetOptions) if permission { datasets[i].Attachments = append(datasets[i].Attachments, attachment) - } else if !attachment.IsPrivate { - datasets[i].Attachments = append(datasets[i].Attachments, attachment) } - } else { - datasets[i].Attachments = append(datasets[i].Attachments, attachment) } } @@ -171,16 +169,17 @@ func (datasets DatasetList) loadAttachmentAttributes(opts *SearchDatasetOptions) } type SearchDatasetOptions struct { - Keyword string - OwnerID int64 - User *User - RepoID int64 - IncludePublic bool - RecommendOnly bool - Category string - Task string - License string - DatasetIDs []int64 + Keyword string + OwnerID int64 + User *User + RepoID int64 + IncludePublic bool + RecommendOnly bool + Category string + Task string + License string + DatasetIDs []int64 + ExcludeDatasetId int64 ListOptions SearchOrderBy IsOwner bool @@ -240,6 +239,10 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond { cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID}) } + if opts.ExcludeDatasetId > 0 { + cond = cond.And(builder.Neq{"dataset.id": opts.ExcludeDatasetId}) + } + if opts.PublicOnly { cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic}) cond = cond.And(builder.Eq{"attachment.is_private": false}) diff --git a/modules/cloudbrain/resty.go b/modules/cloudbrain/resty.go index 8387d481a..170154643 100755 --- a/modules/cloudbrain/resty.go +++ b/modules/cloudbrain/resty.go @@ -93,7 +93,7 @@ sendjob: return nil, fmt.Errorf("resty get queues detail failed: %s", err) } - if jobResult.Code == errInvalidToken && retry < 1 { + if (res.StatusCode() == http.StatusUnauthorized || jobResult.Code == errInvalidToken) && retry < 1 { retry++ _ = loginCloudbrain() goto sendjob diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index a2e714465..5eac4cf2e 100755 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -924,7 +924,7 @@ dataset_name_tooltips = Please enter letters, numbers, _ and - up to 100 charact dataset_no_create = No dataset has been created yet dataset_explain = Dataset: CloudBrain I provides CPU/GPU resources, Cloudbrain II provides Ascend NPU resources, and the data set used for debugging also needs to be uploaded to the corresponding environment; dataset_instructions_for_use = Instructions for use: You can refer to Openi AI Collaboration Platform -dataset_camp_course = Newcomer Training Camp Course; +dataset_camp_course = OpenI_Learning; dataset_upload = Upload dataset_upload_status= Upload Status dataset_file_name = File Name @@ -959,6 +959,7 @@ unfavorite=Unlike favorite=Like disassociate=Disassociate benchmark_dataset_tip=Note: first use the dataset function to upload the model, and then select the model from the dataset list. +file_deleted=The file has been deleted [repo] owner = Owner @@ -1140,7 +1141,7 @@ modelarts.train_job.compute_node=Compute Node modelarts.create_model = Create Model modelarts.model_label=Model Label modelarts.infer_dataset = Inference Dataset - +modelarts.train_job.label_place=Input labels, multiple labels are separated by spaces modelarts.train_job.basic_info=Basic Info modelarts.train_job.job_status=Job Status @@ -1221,7 +1222,10 @@ model_Evaluation_not_created = Model evaluation has not been created repo_not_initialized = Code version: You have not initialized the code repository, please initialized first ; debug_task_running_limit =Running time: no more than 4 hours, it will automatically stop if it exceeds 4 hours; dataset_desc = Dataset: Cloud Brain 1 provides CPU/GPU,Cloud Brain 2 provides Ascend NPU.And dataset also needs to be uploaded to the corresponding environment; -platform_instructions = Instructions for use: You can refer to the Xiaobai training camp course of Openi AI collaboration platform. +platform_instructions = Instructions for use: You can refer to the OpenI_Learning course of Qizhi AI collaboration platform. +platform_instructions1 = Instructions for use: You can refer to the +platform_instructions2 = OpenI_Learning +platform_instructions3 = course of Openi AI collaboration platform. model_not_exist = Model file: You do not have a model file yet, please generate and export the model through the training task first ; benchmark_leaderboards = Benchmark leaderboards @@ -1244,11 +1248,11 @@ model.convert=Model Transformation model.list=Model List model.manage.create_new_convert_task=Create Model Transformation Task -model.manage.notcreatemodel=No model has been created. +model.manage.notcreatemodel=No model has been created model.manage.init1=Code version: You have not initialized the code repository, please model.manage.init2=initialized first ; model.manage.createtrainjob_tip=Training task: you haven't created a training task, please create it first -model.manage.createtrainjob=Training task +model.manage.createtrainjob=Training task. model.manage.delete=Delete Model model.manage.delete_confirm=Are you sure to delete this model? Once this model is deleted, it cannot be restored. model.manage.select.trainjob=Select train task @@ -1260,9 +1264,9 @@ model.manage.modellabel=Model label model.manage.modeldesc=Model description model.manage.baseinfo=Base Information modelconvert.notcreate=No model conversion task has been created. -modelconvert.importfirst1=Please import first -modelconvert.importfirst2=download model -modelconvert.importfirst3=, then converts it. +modelconvert.importfirst1=Please import the +modelconvert.importfirst2=model +modelconvert.importfirst3=first, then converts it. modelconvert.download=Download modelconvert.taskname=Task name modelconvert.modelname=Model name diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini index c6cb6a576..2fbd3ab52 100755 --- a/options/locale/locale_zh-CN.ini +++ b/options/locale/locale_zh-CN.ini @@ -965,6 +965,7 @@ unfavorite=取消收藏 favorite=收藏 disassociate=取消关联 benchmark_dataset_tip=说明:先使用数据集功能上传模型,然后从数据集列表选模型。 +file_deleted=文件已经被删除 [repo] owner=拥有者 @@ -1235,6 +1236,10 @@ repo_not_initialized = 代码版本:您还没有初始化代码仓库,请先 debug_task_running_limit = 运行时长:最长不超过4个小时,超过4个小时将自动停止; dataset_desc = 数据集:云脑1提供 CPU / GPU 资源,云脑2提供 Ascend NPU 资源,调试使用的数据集也需要上传到对应的环境; platform_instructions = 使用说明:可以参考启智AI协作平台小白训练营课程。 +platform_instructions1 = 使用说明:可以参考启智AI协作平台 +platform_instructions2 = 小白训练营课程 +platform_instructions3 = 。 + model_not_exist = 模型文件:您还没有模型文件,请先通过训练任务产生并 导出模型 ; benchmark_leaderboards = 基准测试排行榜 @@ -1261,7 +1266,7 @@ model.manage.notcreatemodel=未创建过模型 model.manage.init1=代码版本:您还没有初始化代码仓库,请先 model.manage.init2=创建代码版本; model.manage.createtrainjob_tip=训练任务:您还没创建过训练任务,请先创建 -model.manage.createtrainjob=训练任务 +model.manage.createtrainjob=训练任务。 model.manage.delete=删除模型 model.manage.delete_confirm=你确认删除该模型么?此模型一旦删除不可恢复。 model.manage.select.trainjob=选择训练任务 @@ -1274,7 +1279,7 @@ model.manage.modeldesc=模型描述 model.manage.baseinfo=基本信息 modelconvert.notcreate=未创建过模型转换任务 modelconvert.importfirst1=请您先导入 -modelconvert.importfirst2=模型下载 +modelconvert.importfirst2=模型 modelconvert.importfirst3=,然后再对其进行转换。 modelconvert.download=下载 modelconvert.taskname=任务名称 diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go index c1e89dde5..2d8bebf4b 100755 --- a/routers/repo/cloudbrain.go +++ b/routers/repo/cloudbrain.go @@ -928,7 +928,7 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo } } - ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false) + ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false) ctx.Data["task"] = task labelName := strings.Fields(task.LabelName) ctx.Data["LabelName"] = labelName diff --git a/routers/repo/dataset.go b/routers/repo/dataset.go index f047cdaa9..d65a9f2aa 100755 --- a/routers/repo/dataset.go +++ b/routers/repo/dataset.go @@ -529,6 +529,10 @@ func ReferenceDatasetAvailable(ctx *context.Context) { NeedAttachment: false, CloudBrainType: models.TypeCloudBrainAll, } + dataset, _ := models.GetDatasetByRepo(&models.Repository{ID: ctx.Repo.Repository.ID}) + if dataset != nil { + opts.ExcludeDatasetId = dataset.ID + } datasetMultiple(ctx, opts) } diff --git a/routers/repo/grampus.go b/routers/repo/grampus.go index 33e111df2..6fc77a454 100755 --- a/routers/repo/grampus.go +++ b/routers/repo/grampus.go @@ -713,7 +713,7 @@ func GrampusTrainJobShow(ctx *context.Context) { taskList := make([]*models.Cloudbrain, 0) taskList = append(taskList, task) ctx.Data["version_list_task"] = taskList - ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false) + ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false) ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) ctx.Data["displayJobName"] = task.DisplayJobName diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go index e230c57c2..bff00f0c5 100755 --- a/routers/repo/modelarts.go +++ b/routers/repo/modelarts.go @@ -238,7 +238,7 @@ func NotebookShow(ctx *context.Context) { datasetDownload := make([]models.DatasetDownload, 0) if ctx.IsSigned { if task.Uuid != "" && task.UserID == ctx.User.ID { - datasetDownload = GetCloudBrainDataSetInfo(task.Uuid, true) + datasetDownload = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, true) } } user, err := models.GetUserByID(task.UserID) @@ -281,36 +281,52 @@ func NotebookShow(ctx *context.Context) { ctx.HTML(200, tplModelArtsNotebookShow) } -func GetCloudBrainDataSetInfo(uuid string, isNeedDown bool) []models.DatasetDownload { +func GetCloudBrainDataSetInfo(uuid string, datasetname string, isNeedDown bool) []models.DatasetDownload { datasetDownload := make([]models.DatasetDownload, 0) - + if len(uuid) == 0 { + return datasetDownload + } uuidList := strings.Split(uuid, ";") - for _, uuidStr := range uuidList { + datasetnameList := strings.Split(datasetname, ";") + for i, uuidStr := range uuidList { + name := "" + link := "" + url := "" + isDelete := false attachment, err := models.GetAttachmentByUUID(uuidStr) if err != nil { log.Error("GetAttachmentByUUID failed:%v", err.Error()) - return datasetDownload - } - dataset, err := models.GetDatasetByID(attachment.DatasetID) - if err != nil { - log.Error("GetDatasetByID failed:%v", err.Error()) - return datasetDownload - } - repo, err := models.GetRepositoryByID(dataset.RepoID) - if err != nil { - log.Error("GetRepositoryByID failed:%v", err.Error()) - return datasetDownload - } - url := "" - if isNeedDown { - url = attachment.S3DownloadURL() + if len(datasetnameList) <= i || len(datasetname) == 0 { + continue + } + name = datasetnameList[i] + isDelete = true + } else { + name = attachment.Name + dataset, err := models.GetDatasetByID(attachment.DatasetID) + if err != nil { + log.Error("GetDatasetByID failed:%v", err.Error()) + } else { + repo, err := models.GetRepositoryByID(dataset.RepoID) + if err != nil { + log.Error("GetRepositoryByID failed:%v", err.Error()) + } else { + link = repo.Link() + "/datasets" + } + } + if isNeedDown { + url = attachment.S3DownloadURL() + } } + datasetDownload = append(datasetDownload, models.DatasetDownload{ - DatasetName: attachment.Name, + DatasetName: name, DatasetDownloadLink: url, - RepositoryLink: repo.Link() + "/datasets", + RepositoryLink: link, + IsDelete: isDelete, }) } + log.Info("dataset length=" + fmt.Sprint(len(datasetDownload))) return datasetDownload } @@ -898,14 +914,17 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error { } _, _, datasetNames, _, err := getDatasUrlListByUUIDS(task.Uuid) if err != nil { - ctx.ServerError("GetAllUserAttachments failed:", err) - return err + log.Info("query dataset error," + err.Error()) + //ctx.ServerError("GetAllUserAttachments failed:", err) + //return err + } else { + ctx.Data["dataset_name"] = datasetNames } ctx.Data["branches"] = branches ctx.Data["branch_name"] = task.BranchName ctx.Data["description"] = task.Description ctx.Data["boot_file"] = task.BootFile - ctx.Data["dataset_name"] = datasetNames + ctx.Data["work_server_number"] = task.WorkServerNumber ctx.Data["flavor_name"] = task.FlavorName ctx.Data["engine_name"] = task.EngineName @@ -1759,7 +1778,7 @@ func TrainJobShow(ctx *context.Context) { } else { VersionListTasks[i].Parameters = "" } - datasetList = append(datasetList, GetCloudBrainDataSetInfo(task.Uuid, false)) + datasetList = append(datasetList, GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false)) VersionListTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain) VersionListTasks[i].CanModify = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain) } @@ -2475,7 +2494,7 @@ func InferenceJobShow(ctx *context.Context) { ctx.Data["displayJobName"] = task.DisplayJobName ctx.Data["task"] = task ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task) - ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false) + ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, task.DatasetName, false) tempUids := []int64{} tempUids = append(tempUids, task.UserID) JobCreater, err := models.GetUserNamesByIDs(tempUids) diff --git a/templates/custom/wait_count_train.tmpl b/templates/custom/wait_count_train.tmpl index 4b7e2dac3..9249b5676 100644 --- a/templates/custom/wait_count_train.tmpl +++ b/templates/custom/wait_count_train.tmpl @@ -18,7 +18,7 @@ > {{.ctx.i18n.Tr "repo.wait_count_start"}} - {{if .type}} + {{if .ctx.QueuesDetail}} {{ $gpuQueue }} {{else}} {{.ctx.WaitCount}} diff --git a/templates/repo/cloudbrain/inference/show.tmpl b/templates/repo/cloudbrain/inference/show.tmpl index 848ccfc20..3ec01417e 100644 --- a/templates/repo/cloudbrain/inference/show.tmpl +++ b/templates/repo/cloudbrain/inference/show.tmpl @@ -500,7 +500,13 @@ {{range $m ,$n := $.datasetDownload}} - {{.DatasetName}} + + {{if eq .IsDelete true}} + {{.DatasetName}}({{$.i18n.Tr "dataset.file_deleted"}}) + {{else}} + {{.DatasetName}} + {{end}} + {{end}} diff --git a/templates/repo/cloudbrain/show.tmpl b/templates/repo/cloudbrain/show.tmpl index bdde80772..76363314d 100755 --- a/templates/repo/cloudbrain/show.tmpl +++ b/templates/repo/cloudbrain/show.tmpl @@ -498,7 +498,13 @@ {{range $m ,$n := $.datasetDownload}} - {{.DatasetName}} + + {{if eq .IsDelete true}} + {{.DatasetName}}({{$.i18n.Tr "dataset.file_deleted"}}) + {{else}} + {{.DatasetName}} + {{end}} + {{end}} diff --git a/templates/repo/cloudbrain/trainjob/show.tmpl b/templates/repo/cloudbrain/trainjob/show.tmpl index 14967c900..ba886cd2f 100644 --- a/templates/repo/cloudbrain/trainjob/show.tmpl +++ b/templates/repo/cloudbrain/trainjob/show.tmpl @@ -464,7 +464,13 @@ {{range $m ,$n := $.datasetDownload}} - {{.DatasetName}} + + {{if eq .IsDelete true}} + {{.DatasetName}}({{$.i18n.Tr "dataset.file_deleted"}}) + {{else}} + {{.DatasetName}} + {{end}} + {{end}} @@ -594,7 +600,7 @@
- + diff --git a/templates/repo/datasets/index.tmpl b/templates/repo/datasets/index.tmpl index 405758f50..eb7bcec21 100755 --- a/templates/repo/datasets/index.tmpl +++ b/templates/repo/datasets/index.tmpl @@ -417,7 +417,7 @@
{{.i18n.Tr "dataset.dataset_explain"}}
{{.i18n.Tr "dataset.dataset_instructions_for_use"}}{{.i18n.Tr "dataset.dataset_camp_course"}}
+ href="https://git.openi.org.cn/zeizei/OpenI_Learning"> {{.i18n.Tr "dataset.dataset_camp_course"}}
diff --git a/templates/repo/grampus/trainjob/show.tmpl b/templates/repo/grampus/trainjob/show.tmpl index 148de401c..1b6dfc901 100755 --- a/templates/repo/grampus/trainjob/show.tmpl +++ b/templates/repo/grampus/trainjob/show.tmpl @@ -419,7 +419,12 @@
{{range $m ,$n := $.datasetDownload}} - {{.DatasetName}} + {{if eq .IsDelete true}} + {{.DatasetName}}({{$.i18n.Tr "dataset.file_deleted"}}) + {{else}} + {{.DatasetName}} + {{end}} + {{end}}
@@ -551,7 +556,7 @@
- + diff --git a/templates/repo/modelarts/inferencejob/show.tmpl b/templates/repo/modelarts/inferencejob/show.tmpl index e578e139c..c3855cafd 100644 --- a/templates/repo/modelarts/inferencejob/show.tmpl +++ b/templates/repo/modelarts/inferencejob/show.tmpl @@ -441,7 +441,14 @@ td, th { {{range $m ,$n := $.datasetDownload}} - {{.DatasetName}} + + {{if eq .IsDelete true}} + {{.DatasetName}}({{$.i18n.Tr "dataset.file_deleted"}}) + {{else}} + {{.DatasetName}} + {{end}} + + {{end}} diff --git a/templates/repo/modelarts/notebook/show.tmpl b/templates/repo/modelarts/notebook/show.tmpl index a75fb6f89..a86724380 100755 --- a/templates/repo/modelarts/notebook/show.tmpl +++ b/templates/repo/modelarts/notebook/show.tmpl @@ -439,7 +439,14 @@ {{range $.datasetDownload}} - {{.DatasetName}} + + + {{if eq .IsDelete true}} + {{.DatasetName}}({{$.i18n.Tr "dataset.file_deleted"}}) + {{else}} + {{.DatasetName}} + {{end}} + {{.DatasetDownloadLink}} {{$.i18n.Tr "dataset.download_copy"}} diff --git a/templates/repo/modelarts/trainjob/show.tmpl b/templates/repo/modelarts/trainjob/show.tmpl index 9cc7332d6..73b8ac61a 100755 --- a/templates/repo/modelarts/trainjob/show.tmpl +++ b/templates/repo/modelarts/trainjob/show.tmpl @@ -489,7 +489,13 @@ {{if eq $k $m}} {{range $f ,$g := $n}} - {{.DatasetName}} + + {{if eq .IsDelete true}} + {{.DatasetName}}({{$.i18n.Tr "dataset.file_deleted"}}) + {{else}} + {{.DatasetName}} + {{end}} + {{end}} {{end}} @@ -604,7 +610,7 @@
- + diff --git a/templates/repo/modelarts/trainjob/version_new.tmpl b/templates/repo/modelarts/trainjob/version_new.tmpl index c135bcd44..7343245df 100644 --- a/templates/repo/modelarts/trainjob/version_new.tmpl +++ b/templates/repo/modelarts/trainjob/version_new.tmpl @@ -73,12 +73,50 @@ - {{template "custom/wait_count_train" Dict "ctx" $}} -
+ +

{{.i18n.Tr "repo.modelarts.train_job.basic_info"}}:

+ +
+ + +
+
+ {{template "custom/wait_count_train" Dict "ctx" $}} +
+
{{.i18n.Tr "cloudbrain.train_dataset_path_rule" | Safe}}
-

{{.i18n.Tr "repo.modelarts.train_job.basic_info"}}:

diff --git a/templates/repo/modelmanage/convertIndex.tmpl b/templates/repo/modelmanage/convertIndex.tmpl index 78806dff7..2f5ee6c35 100644 --- a/templates/repo/modelmanage/convertIndex.tmpl +++ b/templates/repo/modelmanage/convertIndex.tmpl @@ -50,9 +50,9 @@
{{$.i18n.Tr "repo.modelconvert.notcreate"}}
{{if eq .MODEL_COUNT 0}} -
{{$.i18n.Tr "repo.modelconvert.importfirst1"}}{{$.i18n.Tr "repo.modelconvert.importfirst2"}}{{$.i18n.Tr "repo.modelconvert.importfirst3"}}
+
{{$.i18n.Tr "repo.modelconvert.importfirst1"}} {{$.i18n.Tr "repo.modelconvert.importfirst2"}} {{$.i18n.Tr "repo.modelconvert.importfirst3"}}
{{end}} -
{{$.i18n.Tr "repo.platform_instructions"}}
+
{{$.i18n.Tr "repo.platform_instructions1"}} {{$.i18n.Tr "repo.platform_instructions2"}} {{$.i18n.Tr "repo.platform_instructions3"}}
diff --git a/templates/repo/modelmanage/index.tmpl b/templates/repo/modelmanage/index.tmpl index 76739d80f..3a5240768 100644 --- a/templates/repo/modelmanage/index.tmpl +++ b/templates/repo/modelmanage/index.tmpl @@ -71,9 +71,9 @@ {{end}} {{if eq $.TRAIN_COUNT 0}}
{{$.i18n.Tr "repo.model.manage.createtrainjob_tip"}}{{$.i18n.Tr "repo.model.manage.createtrainjob"}}
+ href="{{.RepoLink}}/modelarts/train-job"> {{$.i18n.Tr "repo.model.manage.createtrainjob"}}
{{end}} -
{{$.i18n.Tr "repo.platform_instructions"}}
+
{{$.i18n.Tr "repo.platform_instructions1"}} {{$.i18n.Tr "repo.platform_instructions2"}} {{$.i18n.Tr "repo.platform_instructions3"}}
@@ -330,7 +330,7 @@ .modal({ centered: false, onShow: function () { - $('#model_header').text("导入新模型") + $('#model_header').text({{.i18n.Tr "repo.model.manage.import_new_model"}}) $('input[name="Version"]').addClass('model_disabled') $('.ui.dimmer').css({ "background-color": "rgb(136, 136, 136,0.7)" }) $("#job-name").empty() diff --git a/templates/repo/modelmanage/showinfo.tmpl b/templates/repo/modelmanage/showinfo.tmpl index 4acf394f2..e18c6bd99 100644 --- a/templates/repo/modelmanage/showinfo.tmpl +++ b/templates/repo/modelmanage/showinfo.tmpl @@ -125,7 +125,7 @@ - {{$.i18n.Tr "repo.model.manage.createtrainjob"}} + {{$.i18n.Tr "repo.modelarts.train_job"}} diff --git a/web_src/js/components/Model.vue b/web_src/js/components/Model.vue index a76fe324b..0a555588a 100644 --- a/web_src/js/components/Model.vue +++ b/web_src/js/components/Model.vue @@ -197,11 +197,12 @@ export default { this.getModelList() }, showcreateVue(name,version,label){ + let title= this.i18n.model_create_version_title; $('.ui.modal.second') .modal({ centered: false, onShow:function(){ - $('#model_header').text("创建模型新版本") + $('#model_header').text(title) $('input[name="Name"]').addClass('model_disabled') $('input[name="Name"]').attr('readonly','readonly') $('input[name="modelSelectedFile"]').attr('readonly','readonly') diff --git a/web_src/js/components/dataset/referenceDataset.vue b/web_src/js/components/dataset/referenceDataset.vue index e2c7d5b9c..b01c3dfe5 100644 --- a/web_src/js/components/dataset/referenceDataset.vue +++ b/web_src/js/components/dataset/referenceDataset.vue @@ -248,7 +248,7 @@ > - + {{i18n.dataset_label}}{{ i18n.dataset_label }} - + {{i18n.dataset_select}} + >{{ i18n.dataset_select }} - + - {{i18n.dataset_unziping}} + {{ i18n.dataset_unziping }} - {{i18n.dataset_unzip_failed}} + {{ i18n.dataset_unzip_failed }} @@ -201,7 +205,11 @@
- + - {{i18n.dataset_unziping}} + {{ i18n.dataset_unziping }} - {{i18n.dataset_unzip_failed}} + {{ i18n.dataset_unzip_failed }} @@ -380,13 +388,13 @@ class="zip-loading" v-if="data.DecompressState === 2" > - {{i18n.dataset_unziping}} + {{ i18n.dataset_unziping }} - {{i18n.dataset_unzip_failed}} + {{ i18n.dataset_unzip_failed }} @@ -486,13 +494,13 @@ class="zip-loading" v-if="data.DecompressState === 2" > - {{i18n.dataset_unziping}} + {{ i18n.dataset_unziping }} - {{i18n.dataset_unzip_failed}} + {{ i18n.dataset_unzip_failed }} @@ -536,9 +544,9 @@ line-height: 40px; " > - {{i18n.dataset_selected}} + {{ i18n.dataset_selected }}
-
+
{{i18n.dataset_ok}}{{ i18n.dataset_ok }}
@@ -732,7 +740,6 @@ export default { .then((res) => { this.loadingCurrent = false; let data = JSON.parse(res.data.data); - console.log(data); this.currentDatasetList = this.transformeTreeData( data, "currentTree", @@ -978,7 +985,10 @@ export default { let hasSelectDatasetName = $(".cloudbrain-type") .data("dataset-name") .split(";"); - if (this.hasSelectDatasetList.length !== 0) { + if ( + this.hasSelectDatasetList.length !== 0 && + hasSelectDatasetName[0] !== "" + ) { this.saveStatusList = this.hasSelectDatasetList; this.checkList = hasSelectDatasetName; this.hasSelectDatasetList.forEach((item, index) => { @@ -996,7 +1006,6 @@ export default { location.href.indexOf("train-job") !== -1 || location.href.indexOf("inference") !== -1 ) { - console.log("this.benchmarkNew"); this.benchmarkNew = true; } if ( diff --git a/web_src/js/features/i18nVue.js b/web_src/js/features/i18nVue.js index 4740f01cb..88afaf8ff 100644 --- a/web_src/js/features/i18nVue.js +++ b/web_src/js/features/i18nVue.js @@ -95,6 +95,8 @@ export const i18nVue = { model_create_new_ver: "创建新版本", model_download: "下载", model_delete: "删除", + model_create_title: "导入新模型", + model_create_version_title: "创建模型新版本", }, US: { computer_vision: "computer vision", @@ -148,11 +150,11 @@ export const i18nVue = { downloads: "Downloads", not_link_dataset: "No datasets have been linked yet", no_link_dataset_tips1: - "You can display public datasets on the platform here by clicking the New Linked Dataset button.", + "You can display public datasets on the platform here by clicking the Linked Datasets button.", dataset_instructions_for_use: - "Instructions for use: You can refer to Openi AI Collaboration Platform ", - dataset_camp_course: " Newcomer Training Camp Course", + "Instructions for use: You can refer to OpenI AI Collaboration Platform ", + dataset_camp_course: " OpenI_Learning", dataset_link_success: "Linked dataset succeeded!", dataset_link_failed: "Linked dataset Failed!", dataset_over_nums: "Linked over ? datasets!", @@ -196,5 +198,7 @@ export const i18nVue = { model_create_new_ver: "New Version", model_download: "Download", model_delete: "Delete", + model_create_title: "Import new model", + model_create_version_title: "Create a new version of the model", }, };