1111
+diff --git a/README.md b/README.md index 99f6a6e8c..1d9ab8d06 100644 --- a/README.md +++ b/README.md @@ -54,4 +54,7 @@ ## 平台引用 如果本平台对您的科研工作提供了帮助,可在论文致谢中加入: 英文版:```Thanks for the support provided by OpenI Community (https://git.openi.org.cn).``` -中文版:```感谢启智社区提供的技术支持(https://git.openi.org.cn)。``` \ No newline at end of file +中文版:```感谢启智社区提供的技术支持(https://git.openi.org.cn)。``` + +如果您的成果中引用了本平台,也欢迎在下述开源项目中提交您的成果信息: +https://git.openi.org.cn/OpenIOSSG/references diff --git a/go.mod b/go.mod index 387a34520..3b83aced9 100755 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( github.com/PuerkitoBio/goquery v1.5.0 github.com/RichardKnop/machinery v1.6.9 github.com/RoaringBitmap/roaring v0.4.23 // indirect + github.com/alecthomas/chroma v0.10.0 github.com/alibabacloud-go/darabonba-openapi v0.1.18 github.com/alibabacloud-go/dysmsapi-20170525/v2 v2.0.9 github.com/alibabacloud-go/tea v1.1.17 @@ -120,8 +121,9 @@ require ( github.com/urfave/cli v1.22.1 github.com/xanzy/go-gitlab v0.31.0 github.com/yohcop/openid-go v1.0.0 - github.com/yuin/goldmark v1.1.30 - github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60 + github.com/yuin/goldmark v1.4.13 + github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594 + github.com/yuin/goldmark-meta v1.1.0 golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 golang.org/x/mod v0.3.0 // indirect golang.org/x/net v0.0.0-20200513185701-a91f0712d120 @@ -138,7 +140,7 @@ require ( gopkg.in/ldap.v3 v3.0.2 gopkg.in/macaron.v1 v1.3.9 // indirect gopkg.in/testfixtures.v2 v2.5.0 - gopkg.in/yaml.v2 v2.2.8 + gopkg.in/yaml.v2 v2.3.0 mvdan.cc/xurls/v2 v2.1.0 strk.kbt.io/projects/go/libravatar v0.0.0-20191008002943-06d1c002b251 xorm.io/builder v0.3.7 diff --git a/go.sum b/go.sum index d55d7af48..6735a1938 100755 --- a/go.sum +++ b/go.sum @@ -76,6 +76,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx github.com/Unknwon/com v0.0.0-20190321035513-0fed4efef755/go.mod h1:voKvFVpXBJxdIPeqjoJuLK+UVcRlo/JLjeToGxPYu68= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek= +github.com/alecthomas/chroma v0.10.0/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= @@ -203,6 +205,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c= github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= +github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E= +github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -804,8 +808,16 @@ github.com/yuin/goldmark v1.1.27 h1:nqDD4MMMQA0lmWq03Z2/myGPYLQoXtmi0rGVs95ntbo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.30 h1:j4d4Lw3zqZelDhBksEo3BnWg9xhXRQGJPPSL6OApZjI= github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.5/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg= +github.com/yuin/goldmark v1.4.6/go.mod h1:rmuwmfZ0+bvzB24eSC//bk1R1Zp3hM0OXYv/G2LIilg= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594 h1:yHfZyN55+5dp1wG7wDKv8HQ044moxkyGq12KFFMFDxg= +github.com/yuin/goldmark-highlighting v0.0.0-20220208100518-594be1970594/go.mod h1:U9ihbh+1ZN7fR5Se3daSPoz1CGF9IYtSvWwVQtnzGHU= github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60 h1:gZucqLjL1eDzVWrXj4uiWeMbAopJlBR2mKQAsTGdPwo= github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60/go.mod h1:i9VhcIHN2PxXMbQrKqXNueok6QNONoPjNMoj9MygVL0= +github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= +github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1086,6 +1098,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= diff --git a/models/cloudbrain.go b/models/cloudbrain.go index 333989b76..62bae29e2 100755 --- a/models/cloudbrain.go +++ b/models/cloudbrain.go @@ -371,6 +371,9 @@ type CloudbrainsOptions struct { ComputeResource string BeginTimeUnix int64 EndTimeUnix int64 + AiCenter string + NeedDeleteInfo string + Cluster string } type TaskPod struct { @@ -1448,6 +1451,23 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { ) } } + if (opts.AiCenter) != "" { + cond = cond.And( + builder.Like{"cloudbrain.ai_center", opts.AiCenter}, + ) + } + if (opts.Cluster) != "" { + if opts.Cluster == "resource_cluster_openi" { + cond = cond.And( + builder.Or(builder.Eq{"cloudbrain.type": TypeCloudBrainOne}, builder.Eq{"cloudbrain.type": TypeCloudBrainTwo}), + ) + } + if opts.Cluster == "resource_cluster_c2net" { + cond = cond.And( + builder.Eq{"cloudbrain.type": TypeC2Net}, + ) + } + } if (opts.IsLatestVersion) != "" { cond = cond.And(builder.Or(builder.And(builder.Eq{"cloudbrain.is_latest_version": opts.IsLatestVersion}, builder.Eq{"cloudbrain.job_type": "TRAIN"}), builder.Neq{"cloudbrain.job_type": "TRAIN"})) @@ -1725,21 +1745,6 @@ func GetCloudbrainsNeededStopByUserID(userID int64) ([]*Cloudbrain, error) { return cloudBrains, err } -func GetWaittingTop() ([]*CloudbrainInfo, error) { - sess := x.NewSession() - defer sess.Close() - var cond = builder.NewCond() - cond = cond.And( - builder.Eq{"cloudbrain.status": string(JobWaiting)}, - ) - sess.OrderBy("cloudbrain.created_unix ASC limit 1") - cloudbrains := make([]*CloudbrainInfo, 0, 1) - if err := sess.Table(&Cloudbrain{}).Where(cond). - Find(&cloudbrains); err != nil { - log.Info("find error.") - } - return cloudbrains, nil -} func GetModelartsReDebugTaskByJobId(jobID string) ([]*Cloudbrain, error) { sess := x.NewSession() defer sess.Close() @@ -2004,6 +2009,24 @@ func CloudbrainAll(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { } } + if (opts.AiCenter) != "" { + cond = cond.And( + builder.Like{"cloudbrain.ai_center", opts.AiCenter}, + ) + } + if (opts.NeedDeleteInfo) != "" { + if opts.NeedDeleteInfo == "yes" { + cond = cond.And( + builder.And(builder.NotNull{"cloudbrain.deleted_at"}), + ) + } + if opts.NeedDeleteInfo == "no" { + cond = cond.And( + builder.And(builder.IsNull{"cloudbrain.deleted_at"}), + ) + } + } + if (opts.IsLatestVersion) != "" { cond = cond.And(builder.Or(builder.And(builder.Eq{"cloudbrain.is_latest_version": opts.IsLatestVersion}, builder.Eq{"cloudbrain.job_type": "TRAIN"}), builder.Neq{"cloudbrain.job_type": "TRAIN"})) diff --git a/models/cloudbrain_static.go b/models/cloudbrain_static.go index e3ac5e963..371b30f66 100644 --- a/models/cloudbrain_static.go +++ b/models/cloudbrain_static.go @@ -36,133 +36,6 @@ type TaskDetail struct { FlavorName string `json:"FlavorName"` } -func GetDebugOnePeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and job_type ='" + string(JobTypeDebug) + "'" + - " and type='" + strconv.Itoa(TypeCloudBrainOne) + "'" - - return x.SQL(countSql).Count() -} -func GetDebugOnePeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And job_type = ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), JobTypeDebug, TypeCloudBrainOne).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - - return total, nil -} - -func GetTrainOnePeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and job_type ='" + string(JobTypeTrain) + "'" + - " and type='" + strconv.Itoa(TypeCloudBrainOne) + "'" - - return x.SQL(countSql).Count() -} -func GetTrainOnePeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And job_type = ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), JobTypeTrain, TypeCloudBrainOne).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - - return total, nil -} - -func GetBenchmarkOnePeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and job_type ='" + string(JobTypeBenchmark) + "'" + - " and type='" + strconv.Itoa(TypeCloudBrainOne) + "'" - return x.SQL(countSql).Count() -} -func GetBenchmarkOnePeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And job_type = ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), JobTypeBenchmark, TypeCloudBrainOne).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - - return total, nil -} -func GetDebugTwoPeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and job_type ='" + string(JobTypeDebug) + "'" + - " and type='" + strconv.Itoa(TypeCloudBrainTwo) + "'" - return x.SQL(countSql).Count() -} -func GetDebugTwoPeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And job_type = ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), JobTypeDebug, TypeCloudBrainTwo).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - return total, nil -} -func GetTrainTwoPeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and job_type ='" + string(JobTypeTrain) + "'" + - " and type='" + strconv.Itoa(TypeCloudBrainTwo) + "'" - return x.SQL(countSql).Count() -} -func GetTrainTwoPeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And job_type = ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), JobTypeTrain, TypeCloudBrainTwo).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - return total, nil -} -func GetInferenceTwoPeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and job_type ='" + string(JobTypeInference) + "'" + - " and type='" + strconv.Itoa(TypeCloudBrainTwo) + "'" - return x.SQL(countSql).Count() -} -func GetInferenceTwoPeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And job_type = ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), JobTypeInference, TypeCloudBrainTwo).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - return total, nil -} - -func GetCloudBrainOnePeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and type='" + strconv.Itoa(TypeCloudBrainOne) + "'" - return x.SQL(countSql).Count() -} -func GetCloudBrainOnePeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), TypeCloudBrainOne).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - return total, nil -} -func GetCloudBrainTwoPeriodCount(beginTime time.Time, endTime time.Time) (int64, error) { - countSql := "SELECT count(*) FROM " + - "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + - " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10) + - " and type='" + strconv.Itoa(TypeCloudBrainTwo) + "'" - return x.SQL(countSql).Count() -} -func GetCloudBrainTwoPeriodDuration(beginTime time.Time, endTime time.Time) (int64, error) { - total, err := x.Where("created_unix >= ? And created_unix < ? And type = ? ", strconv.FormatInt(beginTime.Unix(), 10), strconv.FormatInt(endTime.Unix(), 10), TypeCloudBrainTwo).SumInt(&Cloudbrain{}, "duration") - if err != nil { - return 0, err - } - return total, nil -} - func GetTodayCreatorCount(beginTime time.Time, endTime time.Time) (int64, error) { countSql := "SELECT count(distinct user_id) FROM " + "public.cloudbrain where created_unix >=" + strconv.FormatInt(beginTime.Unix(), 10) + @@ -211,6 +84,22 @@ func GetAllStatusCloudBrain() map[string]int { return cloudBrainStatusResult } +func GetWaittingTop() ([]*CloudbrainInfo, error) { + sess := x.NewSession() + defer sess.Close() + var cond = builder.NewCond() + cond = cond.And( + builder.Eq{"cloudbrain.status": string(JobWaiting)}, + ) + sess.OrderBy("cloudbrain.created_unix ASC limit 10") + cloudbrains := make([]*CloudbrainInfo, 0, 10) + if err := sess.Table(&Cloudbrain{}).Where(cond). + Find(&cloudbrains); err != nil { + log.Info("find error.") + } + return cloudbrains, nil +} + func GetRunningTop() ([]*CloudbrainInfo, error) { sess := x.NewSession() defer sess.Close() diff --git a/models/repo.go b/models/repo.go index feb6fd3ef..5e11df2b6 100755 --- a/models/repo.go +++ b/models/repo.go @@ -2250,6 +2250,18 @@ func CheckRepoStats(ctx context.Context) error { "UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?", "repository count 'num_stars'", }, + //Repository.NumIssues + { + "SELECT repo.id FROM `repository` repo WHERE repo.num_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_pull=false)", + "UPDATE `repository` SET num_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_pull=false) WHERE id=?", + "repository count 'num_issues'", + }, + //Repository.NumPulls + { + "SELECT repo.id FROM `repository` repo WHERE repo.num_pulls!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_pull=true)", + "UPDATE `repository` SET num_pulls=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_pull=true) WHERE id=?", + "repository count 'num_pulls'", + }, // Label.NumIssues { "SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)", diff --git a/models/user.go b/models/user.go index a423a843b..a308d9cba 100755 --- a/models/user.go +++ b/models/user.go @@ -1768,7 +1768,6 @@ func (opts *SearchUserOptions) toConds() builder.Cond { if !opts.IsActive.IsNone() { cond = cond.And(builder.Eq{"is_active": opts.IsActive.IsTrue()}) } - return cond } @@ -1780,12 +1779,15 @@ func SearchUsers(opts *SearchUserOptions) (users []*User, _ int64, _ error) { if err != nil { return nil, 0, fmt.Errorf("Count: %v", err) } - + orderby := opts.OrderBy.String() if len(opts.OrderBy) == 0 { - opts.OrderBy = SearchOrderByAlphabetically + orderby = SearchOrderByAlphabetically.String() + lowerKeyword := strings.ToLower(opts.Keyword) + if len(opts.Keyword) > 0 { + orderby = "CASE when lower_name='" + lowerKeyword + "' then 0 when strpos(lower_name,'" + lowerKeyword + "')>0 then 1 else 2 END ASC,lower_name ASC" + } } - - sess := x.Where(cond).OrderBy(opts.OrderBy.String()) + sess := x.Where(cond).OrderBy(orderby) if opts.Page != 0 { sess = opts.setSessionPagination(sess) } diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go index a36bd4736..0c67a569a 100644 --- a/models/user_business_analysis.go +++ b/models/user_business_analysis.go @@ -412,7 +412,16 @@ func QueryUserStaticDataAll(opts *UserBusinessAnalysisQueryOptions) ([]*UserBusi func QueryDataForUserDefineFromDb(opts *UserBusinessAnalysisQueryOptions, key string) ([]*UserBusinessAnalysis, int64) { statictisSess := xStatistic.NewSession() defer statictisSess.Close() - cond := "data_date='" + key + "'" + + var cond = builder.NewCond() + cond = cond.And( + builder.Eq{"data_date": key}, + ) + if len(opts.UserName) > 0 { + cond = cond.And( + builder.Like{"name", opts.UserName}, + ) + } allCount, err := statictisSess.Where(cond).Count(new(UserBusinessAnalysis)) if err == nil { if allCount > 0 { diff --git a/modules/auth/cloudbrain.go b/modules/auth/cloudbrain.go index 160328b5b..39685990d 100755 --- a/modules/auth/cloudbrain.go +++ b/modules/auth/cloudbrain.go @@ -23,6 +23,7 @@ type CreateCloudBrainForm struct { BootFile string `form:"boot_file"` Params string `form:"run_para_list"` BranchName string `form:"branch_name"` + DatasetName string `form:"dataset_name"` } type CommitImageCloudBrainForm struct { @@ -70,6 +71,7 @@ type CreateCloudBrainInferencForm struct { ModelVersion string `form:"model_version" binding:"Required"` CkptName string `form:"ckpt_name" binding:"Required"` LabelName string `form:"label_names" binding:"Required"` + DatasetName string `form:"dataset_name"` } func (f *CreateCloudBrainForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { diff --git a/modules/auth/grampus.go b/modules/auth/grampus.go index ebf0defde..0338d2ae7 100755 --- a/modules/auth/grampus.go +++ b/modules/auth/grampus.go @@ -19,6 +19,7 @@ type CreateGrampusTrainJobForm struct { EngineName string `form:"engine_name" binding:"Required"` WorkServerNumber int `form:"work_server_number" binding:"Required"` Image string `form:"image"` + DatasetName string `form:"dataset_name"` } func (f *CreateGrampusTrainJobForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors { diff --git a/modules/context/context.go b/modules/context/context.go index 2c935881c..8c7808466 100755 --- a/modules/context/context.go +++ b/modules/context/context.go @@ -6,7 +6,6 @@ package context import ( - "code.gitea.io/gitea/routers/notice" "html" "html/template" "io" @@ -16,6 +15,8 @@ import ( "strings" "time" + "code.gitea.io/gitea/routers/notice" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/auth" "code.gitea.io/gitea/modules/base" diff --git a/modules/markup/markdown/markdown.go b/modules/markup/markdown/markdown.go index e50301ffe..407339461 100644 --- a/modules/markup/markdown/markdown.go +++ b/modules/markup/markdown/markdown.go @@ -7,6 +7,7 @@ package markdown import ( "bytes" + "strings" "sync" "code.gitea.io/gitea/modules/log" @@ -14,6 +15,8 @@ import ( "code.gitea.io/gitea/modules/markup/common" "code.gitea.io/gitea/modules/setting" giteautil "code.gitea.io/gitea/modules/util" + chromahtml "github.com/alecthomas/chroma/formatters/html" + highlighting "github.com/yuin/goldmark-highlighting" "github.com/yuin/goldmark" meta "github.com/yuin/goldmark-meta" @@ -42,16 +45,48 @@ func NewGiteaParseContext(urlPrefix string, isWiki bool) parser.Context { func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte { once.Do(func() { converter = goldmark.New( - goldmark.WithExtensions(extension.Table, + goldmark.WithExtensions( + extension.NewTable( + extension.WithTableCellAlignMethod(extension.TableCellAlignAttribute)), extension.Strikethrough, extension.TaskList, extension.DefinitionList, common.FootnoteExtension, - extension.NewTypographer( - extension.WithTypographicSubstitutions(extension.TypographicSubstitutions{ - extension.EnDash: nil, - extension.EmDash: nil, - extension.Ellipsis: nil, + highlighting.NewHighlighting( + highlighting.WithFormatOptions( + chromahtml.WithClasses(true), + chromahtml.PreventSurroundingPre(true), + ), + highlighting.WithWrapperRenderer(func(w util.BufWriter, c highlighting.CodeBlockContext, entering bool) { + if entering { + language, _ := c.Language() + if language == nil { + language = []byte("text") + } + + languageStr := string(language) + + preClasses := []string{"code-block"} + if languageStr == "mermaid" { + preClasses = append(preClasses, "is-loading") + } + + _, err := w.WriteString(`
`)
+ if err != nil {
+ return
+ }
+
+ // include language-x class as part of commonmark spec
+ _, err = w.WriteString(``)
+ if err != nil {
+ return
+ }
+ } else {
+ _, err := w.WriteString("")
+ if err != nil {
+ return
+ }
+ }
}),
),
meta.Meta,
diff --git a/modules/storage/minio_ext.go b/modules/storage/minio_ext.go
index 4c0cbac55..4b738c068 100755
--- a/modules/storage/minio_ext.go
+++ b/modules/storage/minio_ext.go
@@ -179,28 +179,39 @@ func GetOneLevelAllObjectUnderDirMinio(bucket string, prefixRootPath string, rel
output, err := core.ListObjects(bucket, Prefix, "", "", 1000)
fileInfos := make([]FileInfo, 0)
prefixLen := len(Prefix)
+ fileMap := make(map[string]bool, 0)
if err == nil {
for _, val := range output.Contents {
+
log.Info("val key=" + val.Key)
var isDir bool
var fileName string
if val.Key == Prefix {
continue
}
- if strings.HasSuffix(val.Key, "/") {
+ fileName = val.Key[prefixLen:]
+ log.Info("fileName =" + fileName)
+ files := strings.Split(fileName, "/")
+ if fileMap[files[0]] {
+ continue
+ } else {
+ fileMap[files[0]] = true
+ }
+ ParenDir := relativePath
+ fileName = files[0]
+ if len(files) > 1 {
isDir = true
- fileName = val.Key[prefixLen : len(val.Key)-1]
- relativePath += val.Key[prefixLen:]
+ ParenDir += fileName + "/"
} else {
isDir = false
- fileName = val.Key[prefixLen:]
}
+
fileInfo := FileInfo{
ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"),
FileName: fileName,
Size: val.Size,
IsDir: isDir,
- ParenDir: relativePath,
+ ParenDir: ParenDir,
}
fileInfos = append(fileInfos, fileInfo)
}
diff --git a/modules/storage/obs.go b/modules/storage/obs.go
index 29b7998f7..2cb3af927 100755
--- a/modules/storage/obs.go
+++ b/modules/storage/obs.go
@@ -395,29 +395,6 @@ func GetOneLevelAllObjectUnderDir(bucket string, prefixRootPath string, relative
} else {
isDir = false
}
-
- // if strings.Contains(val.Key[prefixLen:len(val.Key)-1], "/") {
-
- // files := strings.Split(fileName, "/")
- // fileName = files[0]
- // isDir = true
- // if fileMap[files[0]] {
- // continue
- // } else {
- // fileMap[files[0]] = true
- // }
- // } else {
- // if strings.HasSuffix(val.Key, "/") {
- // isDir = true
- // fileName = val.Key[prefixLen : len(val.Key)-1]
- // relativePath += val.Key[prefixLen:]
- // } else {
- // isDir = false
- // fileName = val.Key[prefixLen:]
- // }
- // fileMap[fileName] = true
- // }
-
fileInfo := FileInfo{
ModTime: val.LastModified.Local().Format("2006-01-02 15:04:05"),
FileName: fileName,
diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini
index cb0ef205f..8732ea889 100755
--- a/options/locale/locale_en-US.ini
+++ b/options/locale/locale_en-US.ini
@@ -252,10 +252,10 @@ page_dev_env_desc2_title=Model Management and Sharing
page_dev_env_desc2_desc=Associate the model with the code version, you can adjust the model in different ways based on the historical version of the code and save the results. The trained model can be open and shared, so that more people can use the model to test and give feedback.
page_dev_env_desc3_title=Once Configuration, Multiple Reuse
page_dev_env_desc3_desc=Provide execution environment sharing, Once Configuration, Multiple Reuse. Lower the threshold of model development, and avoid spending repetitive time configuring complex environments.
-page_dev_yunlao=PengCheng Cloudbrain Open Source Collaboration
-page_dev_yunlao_desc1=The platform has been connected with Pengcheng Cloudbrain and can use the rich computing resources of Pengcheng Cloudbrain to complete AI development tasks.
-page_dev_yunlao_desc2=Pengcheng Cloudbrain's existing AI computing power is 100p FLOPS@FP16 (billions of half precision floating-point calculations per second), the main hardware infrastructure is composed of GPU server equipped with NVIDIA Tesla V100 and Atlas 900 AI cluster equipped with Kunpeng and Ascend processors.
-page_dev_yunlao_desc3=Developers can freely choose the corresponding computing resources according to their needs, and can test the adaptability, performance, stability of the model in different hardware environments.
+page_dev_yunlao=OpenI AI Collaboration Platform
+page_dev_yunlao_desc1=OpenI AI collaboration platform has been connected with Pengcheng CloudBrain and China computing network (c2net) in phase I, and can use the rich computing resources of Pengcheng CloudBrain and China computing network to complete AI development tasks.
+page_dev_yunlao_desc2=Pengcheng CloudBrain's existing AI computing power is 100p FLOPS@FP16 (billions of half precision floating-point calculations per second), the main hardware infrastructure is composed of GPU servers equipped with NVIDIA Tesla V100 and A100, and Atlas 900 AI clusters equipped with Kunpeng and shengteng processors; China computing network (c2net) phase I can realize the high-speed network interconnection between different AI computing centers, realize the reasonable scheduling of computing power and the flexible allocation of resources. At present, it has been connected to 11 intelligent computing centers, with a total scale of 1924p.
+page_dev_yunlao_desc3=OpenI AI collaboration platform has been connected to Pengcheng Cloud Computing Institute, Chengdu Intelligent Computing Center, Zhongyuan Intelligent Computing Center, Hefei brain and other nodes. Developers can freely choose the corresponding computing resources according to their use needs, and can test the adaptability, performance, stability, etc. of the model in different hardware environments.
page_dev_yunlao_desc4=If your model requires more computing resources, you can also apply for it separately.
page_dev_yunlao_apply=Apply Separately
@@ -1073,6 +1073,7 @@ cloudbrain_operate = Operate
cloudbrain_status_createtime = Status/Createtime
cloudbrain_status_runtime = Running Time
cloudbrain_jobname_err=Name must start with a lowercase letter or number,can include lowercase letter,number,_ and -,can not end with _, and can be up to 36 characters long.
+cloudbrain_bootfile_err=The bootfile does not exist in the repository
cloudbrain_query_fail=Failed to query cloudbrain information.
cloudbrain.mirror_tag = Mirror Tag
cloudbrain.mirror_description = Mirror Description
@@ -3085,6 +3086,8 @@ Platform_Tutorial = Tutorial
foot.advice_feedback = Feedback
[cloudbrain]
+all_resource_cluster=All Cluster
+all_ai_center=All Computing NET
resource_cluster = Resource Cluster
resource_cluster_openi = OpenI Resource Cluster
resource_cluster_c2net = China Computing NET
@@ -3116,7 +3119,8 @@ dataset_path_rule = The dataset location is stored in the environment variable d
view_sample = View sample
inference_output_path_rule = The inference output path is stored in the environment variable result_url.
model_file_path_rule=The model file location is stored in the environment variable ckpt_url
-
+model_file_postfix_rule = The supported format of the model file is [ckpt, pb, h5, json, pkl, pth, t7, pdparams, onnx, pbtxt, keras, mlmodel, cfg, pt]
+model_convert_postfix_rule = The supported format of the model file is [.pth, .pkl, .onnx, .mindir, .ckpt, .pb]
delete_task = Delete task
task_delete_confirm = Are you sure you want to delete this task? Once this task is deleted, it cannot be recovered.
operate_confirm = confirm
@@ -3141,5 +3145,6 @@ Not_Stopped=The job is not stopped, can not be deleted.
Already_stopped=The job is already stopped.
Stopped_failed=Fail to stop the job, please try again later.
Stopped_success_update_status_fail=Succeed in stopping th job, but failed to update the job status and duration time.
+load_code_failed=Fail to load code, please check if the right branch is selected.
error.dataset_select = dataset select error:the count exceed the limit or has same name
diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini
index 652371690..032cb04d1 100755
--- a/options/locale/locale_zh-CN.ini
+++ b/options/locale/locale_zh-CN.ini
@@ -254,11 +254,11 @@ page_dev_env_desc2_title=模型管理与共享
page_dev_env_desc2_desc=将模型与代码版本建立关联,可以基于代码历史版本,使用不同的方式调整模型,并将结果保存下来;训练好的模型可以开放共享,让更多人的使用模型测试并提出反馈
page_dev_env_desc3_title=一次配置,多次使用
page_dev_env_desc3_desc=提供执行环境共享,一次配置,多次使用,降低模型开发门槛,避免花费重复的时间配置复杂的环境
-page_dev_yunlao=鹏城云脑开源协同
-page_dev_yunlao_desc1=平台已经与鹏城云脑打通,可以利用鹏城云脑的丰富算力资源,完成AI开发任务
-page_dev_yunlao_desc2=鹏城云脑现有AI算力100P FLOPS@FP16(每秒十亿亿次半精度浮点计算),主要硬件基础设施由搭载英伟达Tesla V100 的GPU服务器和搭载鲲鹏、昇腾处理器的Atlas 900 AI集群构成
-page_dev_yunlao_desc3=开发者可以根据使用需求,自由选择相应计算资源,可以测试模型在不同硬件环境下的适配能力、性能、稳定性等
-page_dev_yunlao_desc4=如果您的模型需要更多的计算资源,也可以单独申请
+page_dev_yunlao=启智AI协作平台
+page_dev_yunlao_desc1=启智AI协作平台已经与鹏城云脑、中国算力网(C2Net)一期打通,可以利用鹏城云脑和中国算力网的丰富算力资源,完成AI开发任务。
+page_dev_yunlao_desc2=鹏城云脑现有AI算力100P FLOPS@FP16(每秒十亿亿次半精度浮点计算),主要硬件基础设施由搭载英伟达Tesla V100 和A100 的GPU服务器,以及搭载鲲鹏、昇腾处理器的Atlas 900 AI集群构成;中国算力网(C2Net)一期可实现不同人工智能计算中心之间高速网络互联,实现算力合理调度和资源弹性分配,目前已接入11家智算中心,算力总规模1924P。
+page_dev_yunlao_desc3=启智AI协作平台已接入其中的鹏城云计算所、成都智算中心、中原智算中心、合肥类脑等节点,开发者可以根据使用需求,自由选择相应计算资源,可以测试模型在不同硬件环境下的适配能力、性能、稳定性等。
+page_dev_yunlao_desc4=如果您的模型需要更多的计算资源,也可以单独申请。
page_dev_yunlao_apply=单独申请
search=搜索
@@ -1076,6 +1076,7 @@ cloudbrain_operate=操作
cloudbrain_status_createtime=状态/创建时间
cloudbrain_status_runtime = 运行时长
cloudbrain_jobname_err=只能以小写字母或数字开头且只包含小写字母、数字、_和-,不能以_结尾,最长36个字符。
+cloudbrain_bootfile_err=仓库中不存在启动文件
cloudbrain_query_fail=查询云脑任务失败。
cloudbrain.mirror_tag = 镜像标签
cloudbrain.mirror_description = 镜像描述
@@ -3099,6 +3100,8 @@ Platform_Tutorial=新手指引
foot.advice_feedback = 意见反馈
[cloudbrain]
+all_resource_cluster=全部集群
+all_ai_center=全部智算中心
resource_cluster = 算力集群
resource_cluster_openi = 启智集群
resource_cluster_c2net = 智算网络集群
@@ -3131,7 +3134,8 @@ dataset_path_rule = 数据集位置存储在环境变量data_url中,训练输
view_sample = 查看样例
inference_output_path_rule = 推理输出路径存储在环境变量result_url中。
model_file_path_rule = 模型文件位置存储在环境变量ckpt_url中。
-
+model_file_postfix_rule = 模型文件支持的格式为 [ckpt, pb, h5, json, pkl, pth, t7, pdparams, onnx, pbtxt, keras, mlmodel, cfg, pt]
+model_convert_postfix_rule = 模型文件支持的格式为 [.pth, .pkl, .onnx, .mindir, .ckpt, .pb]
delete_task = 删除任务
task_delete_confirm = 你确认删除该任务么?此任务一旦删除不可恢复。
operate_confirm = 确定操作
@@ -3156,6 +3160,7 @@ Not_Stopped=任务还未终止,不能删除。
Already_stopped=任务已停止。
Stopped_failed=任务停止失败,请稍后再试。
Stopped_success_update_status_fail=任务停止成功,状态及运行时间更新失败。
+load_code_failed=代码加载失败,请确认选择了正确的分支。
error.dataset_select = 数据集选择错误:数量超过限制或者有同名数据集
diff --git a/public/self/dataset_preview.js b/public/self/dataset_preview.js
index e6b79dd7d..81620e1a0 100644
--- a/public/self/dataset_preview.js
+++ b/public/self/dataset_preview.js
@@ -123,13 +123,13 @@ function loadimg(uuid,filename){
function loadimg(){
var length = labeltastresult[fileindex].pic_image_field.length;
- if(labeltastresult[fileindex].pic_image_field.substring(length - 5) == ".json"
- || labeltastresult[fileindex].pic_image_field.substring(length - 4) == ".xml"
- || labeltastresult[fileindex].pic_image_field.substring(length - 4) == ".txt"
- || labeltastresult[fileindex].pic_image_field.substring(length - 4) == ".csv"
- || labeltastresult[fileindex].pic_image_field.substring(length - 3) == ".md"
- || labeltastresult[fileindex].pic_image_field.substring(length - 3) == ".py"
- || labeltastresult[fileindex].pic_image_field.substring(length - 3) == ".sh"){
+ if(labeltastresult[fileindex].pic_image_field.substring(length - 5).toLowerCase() == ".json"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 4).toLowerCase() == ".xml"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 4).toLowerCase() == ".txt"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 4).toLowerCase() == ".csv"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 3).toLowerCase() == ".md"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 3).toLowerCase() == ".py"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 3).toLowerCase() == ".sh"){
//文本
canvas.style.display="none";
@@ -138,11 +138,11 @@ function loadimg(){
$('#textcontent').height(canvas.height-40)
$("#textcontent").text(textContent);
}else{
- if(labeltastresult[fileindex].pic_image_field.substring(length - 5) == ".jpeg"
- || labeltastresult[fileindex].pic_image_field.substring(length - 4) == ".jpg"
- || labeltastresult[fileindex].pic_image_field.substring(length - 4) == ".bmp"
- || labeltastresult[fileindex].pic_image_field.substring(length - 4) == ".gif"
- || labeltastresult[fileindex].pic_image_field.substring(length - 4) == ".png"){
+ if(labeltastresult[fileindex].pic_image_field.substring(length - 5).toLowerCase() == ".jpeg"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 4).toLowerCase() == ".jpg"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 4).toLowerCase() == ".bmp"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 4).toLowerCase() == ".gif"
+ || labeltastresult[fileindex].pic_image_field.substring(length - 4).toLowerCase() == ".png"){
canvas.style.display="block";
document.getElementById("textcontent").style.display="none";
img.src = ip + "/getgiteaimage?uuid=" + dataset_id + "&filename=" + labeltastresult[fileindex].pic_image_field;
diff --git a/routers/admin/cloudbrains.go b/routers/admin/cloudbrains.go
index 5876baf18..ec0034f4f 100755
--- a/routers/admin/cloudbrains.go
+++ b/routers/admin/cloudbrains.go
@@ -35,10 +35,14 @@ func CloudBrains(ctx *context.Context) {
listType := ctx.Query("listType")
jobType := ctx.Query("jobType")
jobStatus := ctx.Query("jobStatus")
+ aiCenter := ctx.Query("aiCenter")
+ cluster := ctx.Query("cluster")
ctx.Data["ListType"] = listType
ctx.Data["JobType"] = jobType
ctx.Data["JobStatus"] = jobStatus
+ ctx.Data["aiCenter"] = aiCenter
+ ctx.Data["cluster"] = cluster
page := ctx.QueryInt("page")
if page <= 0 {
@@ -80,6 +84,8 @@ func CloudBrains(ctx *context.Context) {
IsLatestVersion: modelarts.IsLatestVersion,
ComputeResource: listType,
Type: models.TypeCloudBrainAll,
+ AiCenter: aiCenter,
+ Cluster: cluster,
})
if err != nil {
ctx.ServerError("Get job failed:", err)
diff --git a/routers/api/v1/repo/cloudbrain_dashboard.go b/routers/api/v1/repo/cloudbrain_dashboard.go
index c632f3c8b..52ee3ed2c 100755
--- a/routers/api/v1/repo/cloudbrain_dashboard.go
+++ b/routers/api/v1/repo/cloudbrain_dashboard.go
@@ -679,6 +679,8 @@ func GetCloudbrainsDetailData(ctx *context.Context) {
jobType := ctx.Query("jobType")
jobStatus := ctx.Query("jobStatus")
cloudBrainType := ctx.QueryInt("Type")
+ aiCenter := ctx.Query("aiCenter")
+ needDeleteInfo := ctx.Query("needDeleteInfo")
page := ctx.QueryInt("page")
pageSize := ctx.QueryInt("pagesize")
@@ -724,6 +726,8 @@ func GetCloudbrainsDetailData(ctx *context.Context) {
NeedRepoInfo: true,
BeginTimeUnix: int64(recordBeginTime),
EndTimeUnix: endTime.Unix(),
+ AiCenter: aiCenter,
+ NeedDeleteInfo: needDeleteInfo,
})
if err != nil {
ctx.ServerError("Get job failed:", err)
diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go
index 0f852b3fc..c3c6c43cb 100755
--- a/routers/repo/cloudbrain.go
+++ b/routers/repo/cloudbrain.go
@@ -217,255 +217,6 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error {
return nil
}
-func cloudBrainTrainJobErrorPrepare(ctx *context.Context, form auth.CreateCloudBrainForm) error {
- ctx.Data["PageIsCloudBrain"] = true
-
- if categories == nil {
- json.Unmarshal([]byte(setting.BenchmarkCategory), &categories)
- }
- ctx.Data["benchmark_categories"] = categories.Category
-
- ctx.Data["benchmark_types"] = GetBenchmarkTypes(ctx).BenchmarkType
- queuesDetail, _ := cloudbrain.GetQueuesDetail()
- if queuesDetail != nil {
- ctx.Data["QueuesDetail"] = queuesDetail
- }
-
- cloudbrain.InitSpecialPool()
-
- if gpuInfos == nil {
- json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos)
- }
- ctx.Data["gpu_types"] = gpuInfos.GpuInfo
-
- if trainGpuInfos == nil {
- json.Unmarshal([]byte(setting.TrainGpuTypes), &trainGpuInfos)
- }
- ctx.Data["train_gpu_types"] = trainGpuInfos.GpuInfo
-
- if inferenceGpuInfos == nil && setting.InferenceGpuTypes != "" {
- json.Unmarshal([]byte(setting.InferenceGpuTypes), &inferenceGpuInfos)
- }
- if inferenceGpuInfos != nil {
- ctx.Data["inference_gpu_types"] = inferenceGpuInfos.GpuInfo
- }
-
- if benchmarkGpuInfos == nil {
- json.Unmarshal([]byte(setting.BenchmarkGpuTypes), &benchmarkGpuInfos)
- }
- ctx.Data["benchmark_gpu_types"] = benchmarkGpuInfos.GpuInfo
-
- if benchmarkResourceSpecs == nil {
- json.Unmarshal([]byte(setting.BenchmarkResourceSpecs), &benchmarkResourceSpecs)
- }
- ctx.Data["benchmark_resource_specs"] = benchmarkResourceSpecs.ResourceSpec
-
- if cloudbrain.ResourceSpecs == nil {
- json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs)
- }
- ctx.Data["resource_specs"] = cloudbrain.ResourceSpecs.ResourceSpec
-
- if cloudbrain.TrainResourceSpecs == nil {
- json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs)
- }
- ctx.Data["train_resource_specs"] = cloudbrain.TrainResourceSpecs.ResourceSpec
-
- if cloudbrain.InferenceResourceSpecs == nil && setting.InferenceResourceSpecs != "" {
- json.Unmarshal([]byte(setting.InferenceResourceSpecs), &cloudbrain.InferenceResourceSpecs)
- }
- if cloudbrain.InferenceResourceSpecs != nil {
- ctx.Data["inference_resource_specs"] = cloudbrain.InferenceResourceSpecs.ResourceSpec
- }
-
- if cloudbrain.SpecialPools != nil {
- var debugGpuTypes []*models.GpuInfo
- var trainGpuTypes []*models.GpuInfo
-
- for _, pool := range cloudbrain.SpecialPools.Pools {
- org, _ := models.GetOrgByName(pool.Org)
- if org != nil {
- isOrgMember, _ := models.IsOrganizationMember(org.ID, ctx.User.ID)
- if isOrgMember {
- for _, jobType := range pool.JobType {
- if jobType == string(models.JobTypeDebug) {
- debugGpuTypes = append(debugGpuTypes, pool.Pool...)
- if pool.ResourceSpec != nil {
- ctx.Data["resource_specs"] = pool.ResourceSpec
- }
- } else if jobType == string(models.JobTypeTrain) {
- trainGpuTypes = append(trainGpuTypes, pool.Pool...)
- if pool.ResourceSpec != nil {
- ctx.Data["train_resource_specs"] = pool.ResourceSpec
- }
- }
- }
- break
- }
- }
-
- }
-
- if len(debugGpuTypes) > 0 {
- ctx.Data["gpu_types"] = debugGpuTypes
- }
-
- if len(trainGpuTypes) > 0 {
- ctx.Data["train_gpu_types"] = trainGpuTypes
- }
-
- }
-
- var Parameters modelarts.Parameters
- if err := json.Unmarshal([]byte(form.Params), &Parameters); err != nil {
- ctx.ServerError("json.Unmarshal failed:", err)
- return err
- }
- ctx.Data["params"] = Parameters.Parameter
- ctx.Data["boot_file"] = form.BootFile
- ctx.Data["attachment"] = form.Attachment
- _, datasetNames, err := models.GetDatasetInfo(form.Attachment)
- if err != nil {
- log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"])
- return nil
- }
- ctx.Data["dataset_name"] = datasetNames
- ctx.Data["branch_name"] = form.BranchName
- ctx.Data["datasetType"] = models.TypeCloudBrainOne
-
- ctx.Data["display_job_name"] = form.DisplayJobName
- ctx.Data["image"] = form.Image
- ctx.Data["job_type"] = form.JobType
- ctx.Data["gpu_type"] = form.GpuType
- ctx.Data["resource_spec_id"] = form.ResourceSpecId
- return nil
-}
-
-func cloudBrainInferenceJobErrorPrepare(ctx *context.Context, form auth.CreateCloudBrainInferencForm) error {
- ctx.Data["PageIsCloudBrain"] = true
-
- if categories == nil {
- json.Unmarshal([]byte(setting.BenchmarkCategory), &categories)
- }
- ctx.Data["benchmark_categories"] = categories.Category
-
- ctx.Data["benchmark_types"] = GetBenchmarkTypes(ctx).BenchmarkType
- queuesDetail, _ := cloudbrain.GetQueuesDetail()
- if queuesDetail != nil {
- ctx.Data["QueuesDetail"] = queuesDetail
- }
-
- cloudbrain.InitSpecialPool()
-
- if gpuInfos == nil {
- json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos)
- }
- ctx.Data["gpu_types"] = gpuInfos.GpuInfo
-
- if trainGpuInfos == nil {
- json.Unmarshal([]byte(setting.TrainGpuTypes), &trainGpuInfos)
- }
- ctx.Data["train_gpu_types"] = trainGpuInfos.GpuInfo
-
- if inferenceGpuInfos == nil && setting.InferenceGpuTypes != "" {
- json.Unmarshal([]byte(setting.InferenceGpuTypes), &inferenceGpuInfos)
- }
- if inferenceGpuInfos != nil {
- ctx.Data["inference_gpu_types"] = inferenceGpuInfos.GpuInfo
- }
-
- if benchmarkGpuInfos == nil {
- json.Unmarshal([]byte(setting.BenchmarkGpuTypes), &benchmarkGpuInfos)
- }
- ctx.Data["benchmark_gpu_types"] = benchmarkGpuInfos.GpuInfo
-
- if benchmarkResourceSpecs == nil {
- json.Unmarshal([]byte(setting.BenchmarkResourceSpecs), &benchmarkResourceSpecs)
- }
- ctx.Data["benchmark_resource_specs"] = benchmarkResourceSpecs.ResourceSpec
-
- if cloudbrain.ResourceSpecs == nil {
- json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs)
- }
- ctx.Data["resource_specs"] = cloudbrain.ResourceSpecs.ResourceSpec
-
- if cloudbrain.TrainResourceSpecs == nil {
- json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs)
- }
- ctx.Data["train_resource_specs"] = cloudbrain.TrainResourceSpecs.ResourceSpec
-
- if cloudbrain.InferenceResourceSpecs == nil && setting.InferenceResourceSpecs != "" {
- json.Unmarshal([]byte(setting.InferenceResourceSpecs), &cloudbrain.InferenceResourceSpecs)
- }
- if cloudbrain.InferenceResourceSpecs != nil {
- ctx.Data["inference_resource_specs"] = cloudbrain.InferenceResourceSpecs.ResourceSpec
- }
-
- if cloudbrain.SpecialPools != nil {
- var debugGpuTypes []*models.GpuInfo
- var trainGpuTypes []*models.GpuInfo
-
- for _, pool := range cloudbrain.SpecialPools.Pools {
- org, _ := models.GetOrgByName(pool.Org)
- if org != nil {
- isOrgMember, _ := models.IsOrganizationMember(org.ID, ctx.User.ID)
- if isOrgMember {
- for _, jobType := range pool.JobType {
- if jobType == string(models.JobTypeDebug) {
- debugGpuTypes = append(debugGpuTypes, pool.Pool...)
- if pool.ResourceSpec != nil {
- ctx.Data["resource_specs"] = pool.ResourceSpec
- }
- } else if jobType == string(models.JobTypeTrain) {
- trainGpuTypes = append(trainGpuTypes, pool.Pool...)
- if pool.ResourceSpec != nil {
- ctx.Data["train_resource_specs"] = pool.ResourceSpec
- }
- }
- }
- break
- }
- }
-
- }
- if len(debugGpuTypes) > 0 {
- ctx.Data["gpu_types"] = debugGpuTypes
- }
-
- if len(trainGpuTypes) > 0 {
- ctx.Data["train_gpu_types"] = trainGpuTypes
- }
-
- }
- var Parameters modelarts.Parameters
- if err := json.Unmarshal([]byte(form.Params), &Parameters); err != nil {
- ctx.ServerError("json.Unmarshal failed:", err)
- return err
- }
- ctx.Data["params"] = Parameters.Parameter
- ctx.Data["boot_file"] = form.BootFile
- ctx.Data["attachment"] = form.Attachment
- _, datasetNames, err := models.GetDatasetInfo(form.Attachment)
- if err != nil {
- log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"])
- return nil
- }
- ctx.Data["dataset_name"] = datasetNames
- ctx.Data["branch_name"] = form.BranchName
- ctx.Data["datasetType"] = models.TypeCloudBrainOne
-
- ctx.Data["display_job_name"] = form.DisplayJobName
- ctx.Data["image"] = form.Image
- ctx.Data["job_type"] = form.JobType
- ctx.Data["gpu_type"] = form.GpuType
- ctx.Data["resource_spec_id"] = form.ResourceSpecId
- ctx.Data["label_names"] = form.LabelName
- ctx.Data["train_url"] = form.TrainUrl
- ctx.Data["ckpt_name"] = form.CkptName
- ctx.Data["model_name"] = form.ModelName
- ctx.Data["model_version"] = form.ModelVersion
- ctx.Data["description"] = form.Description
- return nil
-}
func CloudBrainNew(ctx *context.Context) {
err := cloudBrainNewDataPrepare(ctx)
@@ -488,6 +239,7 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath
resourceSpecId := form.ResourceSpecId
branchName := form.BranchName
+ bootFile := strings.TrimSpace(form.BootFile)
repo := ctx.Repo.Repository
tpl := tplCloudBrainNew
@@ -499,28 +251,28 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
if err == nil {
if len(tasks) != 0 {
log.Error("the job name did already exist", ctx.Data["MsgID"])
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr("the job name did already exist", tpl, &form)
return
}
} else {
if !models.IsErrJobNotExist(err) {
log.Error("system error, %v", err, ctx.Data["MsgID"])
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr("system error", tpl, &form)
return
}
}
if !jobNamePattern.MatchString(displayJobName) {
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tpl, &form)
return
}
if jobType != string(models.JobTypeBenchmark) && jobType != string(models.JobTypeDebug) && jobType != string(models.JobTypeTrain) {
log.Error("jobtype error:", jobType, ctx.Data["MsgID"])
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr("jobtype error", tpl, &form)
return
}
@@ -528,13 +280,13 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, jobType)
if err != nil {
log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"])
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr("system error", tpl, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain.morethanonejob"), tpl, &form)
return
}
@@ -554,10 +306,18 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
command := cloudbrain.GetCloudbrainDebugCommand()
if jobType == string(models.JobTypeTrain) {
+ bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
+ if err != nil || !bootFileExist {
+ log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
+ cloudBrainNewDataPrepare(ctx)
+ ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tpl, &form)
+ return
+ }
tpl = tplCloudBrainTrainJobNew
commandTrain, err := getTrainJobCommand(form)
if err != nil {
log.Error("getTrainJobCommand failed: %v", err)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}
@@ -568,7 +328,7 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
errStr := checkCloudBrainSpecialPool(ctx, jobType, gpuQueue, resourceSpecId)
if errStr != "" {
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(errStr, tpl, &form)
return
}
@@ -576,12 +336,12 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
if branchName == "" {
branchName = cloudbrain.DefaultBranchName
}
- downloadCode(repo, codePath, branchName)
- uploadCodeToMinio(codePath+"/", jobName, cloudbrain.CodeMountPath+"/")
-
- modelPath := setting.JobPath + jobName + cloudbrain.ModelMountPath + "/"
- mkModelPath(modelPath)
- uploadCodeToMinio(modelPath, jobName, cloudbrain.ModelMountPath+"/")
+ errStr = loadCodeAndMakeModelPath(repo, codePath, branchName, jobName, cloudbrain.ModelMountPath)
+ if errStr != "" {
+ cloudBrainNewDataPrepare(ctx)
+ ctx.RenderWithErr(ctx.Tr(errStr), tpl, &form)
+ return
+ }
commitID, _ := ctx.Repo.GitRepo.GetBranchCommitID(branchName)
@@ -614,7 +374,7 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
err = cloudbrain.GenerateTask(req)
if err != nil {
- cloudBrainTrainJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}
@@ -626,6 +386,30 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) {
}
}
+func loadCodeAndMakeModelPath(repo *models.Repository, codePath string, branchName string, jobName string, resultPath string) string {
+ err := downloadCode(repo, codePath, branchName)
+ if err != nil {
+ return "cloudbrain.load_code_failed"
+ }
+
+ err = uploadCodeToMinio(codePath+"/", jobName, cloudbrain.CodeMountPath+"/")
+ if err != nil {
+ return "cloudbrain.load_code_failed"
+ }
+
+ modelPath := setting.JobPath + jobName + resultPath + "/"
+ err = mkModelPath(modelPath)
+ if err != nil {
+ return "cloudbrain.load_code_failed"
+ }
+ err = uploadCodeToMinio(modelPath, jobName, resultPath+"/")
+ if err != nil {
+ return "cloudbrain.load_code_failed"
+ }
+
+ return ""
+}
+
func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBrainInferencForm) {
ctx.Data["PageIsCloudBrain"] = true
displayJobName := form.DisplayJobName
@@ -637,6 +421,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra
codePath := setting.JobPath + jobName + cloudbrain.CodeMountPath
resourceSpecId := form.ResourceSpecId
branchName := form.BranchName
+ bootFile := strings.TrimSpace(form.BootFile)
labelName := form.LabelName
repo := ctx.Repo.Repository
@@ -646,6 +431,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra
command, err := getInferenceJobCommand(form)
if err != nil {
log.Error("getTrainJobCommand failed: %v", err)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}
@@ -654,35 +440,43 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra
if err == nil {
if len(tasks) != 0 {
log.Error("the job name did already exist", ctx.Data["MsgID"])
- cloudBrainInferenceJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr("the job name did already exist", tpl, &form)
return
}
} else {
if !models.IsErrJobNotExist(err) {
log.Error("system error, %v", err, ctx.Data["MsgID"])
- cloudBrainInferenceJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr("system error", tpl, &form)
return
}
}
if !jobNamePattern.MatchString(displayJobName) {
- cloudBrainInferenceJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tpl, &form)
return
}
+ bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
+ if err != nil || !bootFileExist {
+ log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
+ cloudBrainNewDataPrepare(ctx)
+ ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tpl, &form)
+ return
+ }
+
count, err := models.GetCloudbrainCountByUserID(ctx.User.ID, jobType)
if err != nil {
log.Error("GetCloudbrainCountByUserID failed:%v", err, ctx.Data["MsgID"])
- cloudBrainInferenceJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr("system error", tpl, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
- cloudBrainInferenceJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain.morethanonejob"), tpl, &form)
return
}
@@ -691,18 +485,19 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra
if branchName == "" {
branchName = cloudbrain.DefaultBranchName
}
- downloadCode(repo, codePath, branchName)
- uploadCodeToMinio(codePath+"/", jobName, cloudbrain.CodeMountPath+"/")
- resultPath := setting.JobPath + jobName + cloudbrain.ResultPath + "/"
- mkResultPath(resultPath)
- uploadCodeToMinio(resultPath, jobName, cloudbrain.ResultPath+"/")
+ errStr := loadCodeAndMakeModelPath(repo, codePath, branchName, jobName, cloudbrain.ResultPath)
+ if errStr != "" {
+ cloudBrainNewDataPrepare(ctx)
+ ctx.RenderWithErr(ctx.Tr(errStr), tpl, &form)
+ return
+ }
commitID, _ := ctx.Repo.GitRepo.GetBranchCommitID(branchName)
datasetInfos, datasetNames, err := models.GetDatasetInfo(uuid)
if err != nil {
log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"])
- cloudBrainInferenceJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(ctx.Tr("cloudbrain.error.dataset_select"), tpl, &form)
return
}
@@ -739,7 +534,7 @@ func CloudBrainInferenceJobCreate(ctx *context.Context, form auth.CreateCloudBra
err = cloudbrain.GenerateTask(req)
if err != nil {
- cloudBrainInferenceJobErrorPrepare(ctx, form)
+ cloudBrainNewDataPrepare(ctx)
ctx.RenderWithErr(err.Error(), tpl, &form)
return
}
@@ -1133,7 +928,7 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo
}
}
-
+ ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false)
ctx.Data["task"] = task
labelName := strings.Fields(task.LabelName)
ctx.Data["LabelName"] = labelName
@@ -1911,11 +1706,7 @@ func uploadCodeToMinio(codePath, jobName, parentDir string) error {
}
func mkModelPath(modelPath string) error {
- return mkPathAndReadMeFile(modelPath, "You can put the model file into this directory and download it by the web page.")
-}
-
-func mkResultPath(resultPath string) error {
- return mkPathAndReadMeFile(resultPath, "You can put the result file into this directory and download it by the web page.")
+ return mkPathAndReadMeFile(modelPath, "You can put the files into this directory and download the files by the web page.")
}
func mkPathAndReadMeFile(path string, text string) error {
diff --git a/routers/repo/grampus.go b/routers/repo/grampus.go
index be87ceb36..cdde7596c 100755
--- a/routers/repo/grampus.go
+++ b/routers/repo/grampus.go
@@ -3,6 +3,7 @@ package repo
import (
"encoding/json"
"errors"
+ "fmt"
"io/ioutil"
"net/http"
"os"
@@ -45,8 +46,7 @@ func GrampusTrainJobGPUNew(ctx *context.Context) {
ctx.ServerError("get new train-job info failed", err)
return
}
- waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeC2Net, models.GPUResource, models.JobTypeTrain)
- ctx.Data["WaitCount"] = waitCount
+
ctx.HTML(http.StatusOK, tplGrampusTrainJobGPUNew)
}
@@ -57,8 +57,6 @@ func GrampusTrainJobNPUNew(ctx *context.Context) {
ctx.ServerError("get new train-job info failed", err)
return
}
- waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeC2Net, models.NPUResource, models.JobTypeTrain)
- ctx.Data["WaitCount"] = waitCount
ctx.HTML(200, tplGrampusTrainJobNPUNew)
}
@@ -131,100 +129,17 @@ func grampusTrainJobNewDataPrepare(ctx *context.Context, processType string) err
if processType == grampus.ProcessorTypeGPU {
ctx.Data["datasetType"] = models.TypeCloudBrainOne
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeC2Net, models.GPUResource, models.JobTypeTrain)
+ ctx.Data["WaitCount"] = waitCount
} else if processType == grampus.ProcessorTypeNPU {
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeC2Net, models.NPUResource, models.JobTypeTrain)
+ ctx.Data["WaitCount"] = waitCount
}
return nil
}
-func grampusTrainJobErrorPrepare(ctx *context.Context, processType string, form auth.CreateGrampusTrainJobForm) error {
- ctx.Data["PageIsCloudBrain"] = true
-
- //get valid images
- images, err := grampus.GetImages(processType)
- if err != nil {
- log.Error("GetImages failed:", err.Error())
- } else {
- ctx.Data["images"] = images.Infos
- }
-
- grampus.InitSpecialPool()
-
- ctx.Data["GPUEnabled"] = true
- ctx.Data["NPUEnabled"] = true
- includeCenters := make(map[string]struct{})
- excludeCenters := make(map[string]struct{})
- if grampus.SpecialPools != nil {
- for _, pool := range grampus.SpecialPools.Pools {
- if pool.IsExclusive {
- if !IsUserInOrgPool(ctx.User.ID, pool) {
- ctx.Data[pool.Type+"Enabled"] = false
- }
- } else {
- if strings.Contains(strings.ToLower(processType), strings.ToLower(pool.Type)) {
- if IsUserInOrgPool(ctx.User.ID, pool) {
- for _, center := range pool.Pool {
- includeCenters[center.Queue] = struct{}{}
- }
- } else {
- for _, center := range pool.Pool {
- excludeCenters[center.Queue] = struct{}{}
- }
-
- }
-
- }
-
- }
- }
- }
-
- //get valid resource specs
- specs, err := grampus.GetResourceSpecs(processType)
-
- grampusSpecs := getFilterSpecBySpecialPool(specs, includeCenters, excludeCenters)
-
- if err != nil {
- log.Error("GetResourceSpecs failed:", err.Error())
- } else {
- ctx.Data["flavor_infos"] = grampusSpecs
- }
-
- if processType == grampus.ProcessorTypeGPU {
- ctx.Data["datasetType"] = models.TypeCloudBrainOne
- } else if processType == grampus.ProcessorTypeNPU {
- ctx.Data["datasetType"] = models.TypeCloudBrainTwo
- }
-
- var Parameters modelarts.Parameters
- if err := json.Unmarshal([]byte(form.Params), &Parameters); err != nil {
- ctx.ServerError("json.Unmarshal failed:", err)
- return err
- }
- ctx.Data["params"] = Parameters.Parameter
- ctx.Data["boot_file"] = form.BootFile
- ctx.Data["attachment"] = form.Attachment
- _, datasetNames, err := models.GetDatasetInfo(form.Attachment)
- if err != nil {
- log.Error("GetDatasetInfo failed: %v", err, ctx.Data["MsgID"])
- return nil
- }
- ctx.Data["dataset_name"] = datasetNames
- ctx.Data["branch_name"] = form.BranchName
- ctx.Data["image_id"] = form.ImageID
-
- ctx.Data["display_job_name"] = form.DisplayJobName
- ctx.Data["image"] = form.Image
- ctx.Data["flavor"] = form.FlavorID
- ctx.Data["flavor_name"] = form.FlavorName
- ctx.Data["description"] = form.Description
- ctx.Data["engine_name"] = form.EngineName
- ctx.Data["work_server_number"] = form.WorkServerNumber
-
- return nil
-}
-
func getFilterSpecBySpecialPool(specs *models.GetGrampusResourceSpecsResult, includeCenters map[string]struct{}, excludeCenters map[string]struct{}) []models.GrampusSpec {
if len(includeCenters) == 0 && len(excludeCenters) == 0 {
return specs.Infos
@@ -295,14 +210,22 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
image := strings.TrimSpace(form.Image)
if !jobNamePattern.MatchString(displayJobName) {
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tplGrampusTrainJobGPUNew, &form)
return
}
+ bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
+ if err != nil || !bootFileExist {
+ log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
+ ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplGrampusTrainJobGPUNew, &form)
+ return
+ }
+
errStr := checkSpecialPool(ctx, "GPU")
if errStr != "" {
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(errStr, tplGrampusTrainJobGPUNew, &form)
return
}
@@ -311,13 +234,13 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
count, err := models.GetGrampusCountByUserID(ctx.User.ID, string(models.JobTypeTrain), models.GPUResource)
if err != nil {
log.Error("GetGrampusCountByUserID failed:%v", err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobGPUNew, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplGrampusTrainJobGPUNew, &form)
return
}
@@ -326,7 +249,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
//check param
if err := grampusParamCheckCreateTrainJob(form); err != nil {
log.Error("paramCheckCreateTrainJob failed:(%v)", err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobGPUNew, &form)
return
}
@@ -336,14 +259,14 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err == nil {
if len(tasks) != 0 {
log.Error("the job name did already exist", ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("the job name did already exist", tplGrampusTrainJobGPUNew, &form)
return
}
} else {
if !models.IsErrJobNotExist(err) {
log.Error("system error, %v", err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobGPUNew, &form)
return
}
@@ -353,7 +276,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
attachment, err := models.GetAttachmentByUUID(uuid)
if err != nil {
log.Error("GetAttachmentByUUID failed:", err.Error(), ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("dataset is not exist", tplGrampusTrainJobGPUNew, &form)
return
}
@@ -366,8 +289,8 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := downloadZipCode(ctx, codeLocalPath, branchName); err != nil {
log.Error("downloadZipCode failed, server timed out: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
- ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobGPUNew, &form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
return
}
@@ -375,24 +298,24 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
//upload code
if err := uploadCodeToMinio(codeLocalPath+"/", jobName, cloudbrain.CodeMountPath+"/"); err != nil {
log.Error("Failed to uploadCodeToMinio: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
- ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobGPUNew, &form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
return
}
modelPath := setting.JobPath + jobName + cloudbrain.ModelMountPath + "/"
if err := mkModelPath(modelPath); err != nil {
log.Error("Failed to mkModelPath: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
- ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobGPUNew, &form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
return
}
//init model readme
if err := uploadCodeToMinio(modelPath, jobName, cloudbrain.ModelMountPath+"/"); err != nil {
log.Error("Failed to uploadCodeToMinio: %s (%v)", repo.FullName(), err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
- ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobGPUNew, &form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobGPUNew, &form)
return
}
@@ -400,7 +323,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
command, err := generateCommand(repo.Name, grampus.ProcessorTypeGPU, codeMinioPath+cloudbrain.DefaultBranchName+".zip", dataMinioPath, bootFile, params, setting.CBCodePathPrefix+jobName+cloudbrain.ModelMountPath+"/", attachment.Name)
if err != nil {
log.Error("Failed to generateCommand: %s (%v)", displayJobName, err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobGPUNew, &form)
return
}
@@ -432,7 +355,7 @@ func GrampusTrainJobGpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
err = grampus.GenerateTrainJob(ctx, req)
if err != nil {
log.Error("GenerateTrainJob failed:%v", err.Error(), ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeGPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeGPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobGPUNew, &form)
return
}
@@ -479,14 +402,22 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
engineName := form.EngineName
if !jobNamePattern.MatchString(displayJobName) {
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_jobname_err"), tplGrampusTrainJobNPUNew, &form)
return
}
+ bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
+ if err != nil || !bootFileExist {
+ log.Error("Get bootfile error:", err, ctx.Data["MsgID"])
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
+ ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplGrampusTrainJobNPUNew, &form)
+ return
+ }
+
errStr := checkSpecialPool(ctx, "NPU")
if errStr != "" {
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(errStr, tplGrampusTrainJobGPUNew, &form)
return
}
@@ -495,13 +426,13 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
count, err := models.GetGrampusCountByUserID(ctx.User.ID, string(models.JobTypeTrain), models.NPUResource)
if err != nil {
log.Error("GetGrampusCountByUserID failed:%v", err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobNPUNew, &form)
return
} else {
if count >= 1 {
log.Error("the user already has running or waiting task", ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("you have already a running or waiting task, can not create more", tplGrampusTrainJobNPUNew, &form)
return
}
@@ -510,7 +441,7 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
//check param
if err := grampusParamCheckCreateTrainJob(form); err != nil {
log.Error("paramCheckCreateTrainJob failed:(%v)", err)
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobNPUNew, &form)
return
}
@@ -520,14 +451,14 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err == nil {
if len(tasks) != 0 {
log.Error("the job name did already exist", ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("the job name did already exist", tplGrampusTrainJobNPUNew, &form)
return
}
} else {
if !models.IsErrJobNotExist(err) {
log.Error("system error, %v", err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("system error", tplGrampusTrainJobNPUNew, &form)
return
}
@@ -537,7 +468,7 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
attachment, err := models.GetAttachmentByUUID(uuid)
if err != nil {
log.Error("GetAttachmentByUUID failed:", err.Error(), ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("dataset is not exist", tplGrampusTrainJobNPUNew, &form)
return
}
@@ -550,23 +481,23 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
if err := downloadZipCode(ctx, codeLocalPath, branchName); err != nil {
log.Error("downloadZipCode failed, server timed out: %s (%v)", repo.FullName(), err)
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
- ctx.RenderWithErr("Create task failed, server timed out", tplGrampusTrainJobNPUNew, &form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobNPUNew, &form)
return
}
//todo: upload code (send to file_server todo this work?)
if err := obsMkdir(setting.CodePathPrefix + jobName + modelarts.OutputPath); err != nil {
log.Error("Failed to obsMkdir_output: %s (%v)", repo.FullName(), err)
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
- ctx.RenderWithErr("Failed to obsMkdir_output", tplGrampusTrainJobNPUNew, &form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobNPUNew, &form)
return
}
if err := uploadCodeToObs(codeLocalPath, jobName, ""); err != nil {
log.Error("Failed to uploadCodeToObs: %s (%v)", repo.FullName(), err)
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
- ctx.RenderWithErr("Failed to uploadCodeToObs", tplGrampusTrainJobNPUNew, &form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplGrampusTrainJobNPUNew, &form)
return
}
@@ -574,7 +505,7 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
command, err := generateCommand(repo.Name, grampus.ProcessorTypeNPU, codeObsPath+cloudbrain.DefaultBranchName+".zip", dataObsPath+"'"+attachment.Name+"'", bootFile, params, setting.CodePathPrefix+jobName+modelarts.OutputPath, attachment.Name)
if err != nil {
log.Error("Failed to generateCommand: %s (%v)", displayJobName, err, ctx.Data["MsgID"])
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr("Create task failed, internal error", tplGrampusTrainJobNPUNew, &form)
return
}
@@ -610,7 +541,7 @@ func GrampusTrainJobNpuCreate(ctx *context.Context, form auth.CreateGrampusTrain
err = grampus.GenerateTrainJob(ctx, req)
if err != nil {
log.Error("GenerateTrainJob failed:%v", err.Error())
- grampusTrainJobErrorPrepare(ctx, grampus.ProcessorTypeNPU, form)
+ grampusTrainJobNewDataPrepare(ctx, grampus.ProcessorTypeNPU)
ctx.RenderWithErr(err.Error(), tplGrampusTrainJobNPUNew, &form)
return
}
@@ -782,7 +713,7 @@ func GrampusTrainJobShow(ctx *context.Context) {
taskList := make([]*models.Cloudbrain, 0)
taskList = append(taskList, task)
ctx.Data["version_list_task"] = taskList
-
+ ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false)
ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task)
ctx.Data["displayJobName"] = task.DisplayJobName
@@ -874,7 +805,13 @@ func generateCommand(repoName, processorType, codeRemotePath, dataRemotePath, bo
}
}
- commandCode := "cd " + workDir + "code/" + strings.ToLower(repoName) + ";python " + bootFile + paramCode + ";"
+ var commandCode string
+ if processorType == grampus.ProcessorTypeNPU {
+ commandCode = "/bin/bash /home/work/run_train_for_openi.sh " + workDir + "code/" + strings.ToLower(repoName) + "/" + bootFile + " /tmp/log/train.log" + paramCode + ";"
+ } else if processorType == grampus.ProcessorTypeGPU {
+ commandCode = "cd " + workDir + "code/" + strings.ToLower(repoName) + ";python " + bootFile + paramCode + ";"
+ }
+
command += commandCode
//get exec result
@@ -926,6 +863,9 @@ func downloadZipCode(ctx *context.Context, codePath, branchName string) error {
log.Error("GetBranchCommit failed:" + err.Error())
return err
}
+ } else {
+ log.Error("the branch is not exist: " + branchName)
+ return fmt.Errorf("The branch does not exist.")
}
archivePath = path.Join(archivePath, grampus.CodeArchiveName)
diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go
index 050dfd65b..cefa7806f 100755
--- a/routers/repo/modelarts.go
+++ b/routers/repo/modelarts.go
@@ -118,8 +118,7 @@ func MustEnableModelArts(ctx *context.Context) {
func NotebookNew(ctx *context.Context) {
notebookNewDataPrepare(ctx)
- waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
- ctx.Data["WaitCount"] = waitCount
+
ctx.HTML(200, tplModelArtsNotebookNew)
}
@@ -149,6 +148,9 @@ func notebookNewDataPrepare(ctx *context.Context) error {
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
+ ctx.Data["WaitCount"] = waitCount
+
return nil
}
@@ -283,34 +285,7 @@ func NotebookShow(ctx *context.Context) {
datasetDownload := make([]models.DatasetDownload, 0)
if ctx.IsSigned {
if task.Uuid != "" && task.UserID == ctx.User.ID {
- uuidList := strings.Split(task.Uuid, ";")
- for _, uuidStr := range uuidList {
- attachment, err := models.GetAttachmentByUUID(uuidStr)
- if err != nil {
- log.Error("GetAttachmentByUUID failed:%v", err.Error())
- return
- }
- dataset, err := models.GetDatasetByID(attachment.DatasetID)
- if err != nil {
- log.Error("GetDatasetByID failed:%v", err.Error())
- return
- }
- repo, err := models.GetRepositoryByID(dataset.RepoID)
- if err != nil {
- log.Error("GetRepositoryByID failed:%v", err.Error())
- return
- }
- datasetDownload = append(datasetDownload, models.DatasetDownload{
- DatasetName: attachment.Name,
- DatasetDownloadLink: attachment.S3DownloadURL(),
- RepositoryLink: repo.Link() + "/datasets",
- })
-
- }
- // datasetName, err := GetDatasetNameByUUID(task.Uuid)
- // if err == nil {
- // task.DatasetName = datasetName
- // }
+ datasetDownload = GetCloudBrainDataSetInfo(task.Uuid, true)
}
}
user, err := models.GetUserByID(task.UserID)
@@ -356,6 +331,39 @@ func NotebookShow(ctx *context.Context) {
ctx.HTML(200, tplModelArtsNotebookShow)
}
+func GetCloudBrainDataSetInfo(uuid string, isNeedDown bool) []models.DatasetDownload {
+ datasetDownload := make([]models.DatasetDownload, 0)
+
+ uuidList := strings.Split(uuid, ";")
+ for _, uuidStr := range uuidList {
+ attachment, err := models.GetAttachmentByUUID(uuidStr)
+ if err != nil {
+ log.Error("GetAttachmentByUUID failed:%v", err.Error())
+ return datasetDownload
+ }
+ dataset, err := models.GetDatasetByID(attachment.DatasetID)
+ if err != nil {
+ log.Error("GetDatasetByID failed:%v", err.Error())
+ return datasetDownload
+ }
+ repo, err := models.GetRepositoryByID(dataset.RepoID)
+ if err != nil {
+ log.Error("GetRepositoryByID failed:%v", err.Error())
+ return datasetDownload
+ }
+ url := ""
+ if isNeedDown {
+ url = attachment.S3DownloadURL()
+ }
+ datasetDownload = append(datasetDownload, models.DatasetDownload{
+ DatasetName: attachment.Name,
+ DatasetDownloadLink: url,
+ RepositoryLink: repo.Link() + "/datasets",
+ })
+ }
+ return datasetDownload
+}
+
func setShowSpecBySpecialPoolConfig(ctx *context.Context, findSpec bool, task *models.Cloudbrain) {
modelarts.InitSpecialPool()
if modelarts.SpecialPools != nil && !findSpec {
@@ -663,8 +671,6 @@ func TrainJobNew(ctx *context.Context) {
ctx.ServerError("get new train-job info failed", err)
return
}
- waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
- ctx.Data["WaitCount"] = waitCount
ctx.HTML(200, tplModelArtsTrainJobNew)
}
@@ -734,6 +740,8 @@ func trainJobNewDataPrepare(ctx *context.Context) error {
}
ctx.Data["config_list"] = configList.ParaConfigs
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
+ ctx.Data["WaitCount"] = waitCount
return nil
}
@@ -850,6 +858,8 @@ func trainJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModelArts
ctx.Data["dataset_name"] = datasetNames
ctx.Data["branch_name"] = form.BranchName
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
+ ctx.Data["WaitCount"] = waitCount
return nil
}
@@ -861,8 +871,6 @@ func TrainJobNewVersion(ctx *context.Context) {
ctx.ServerError("get new train-job info failed", err)
return
}
- waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
- ctx.Data["WaitCount"] = waitCount
ctx.HTML(200, tplModelArtsTrainJobVersionNew)
}
@@ -955,6 +963,8 @@ func trainJobNewVersionDataPrepare(ctx *context.Context) error {
return err
}
ctx.Data["config_list"] = configList.ParaConfigs
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
+ ctx.Data["WaitCount"] = waitCount
return nil
}
@@ -1046,6 +1056,8 @@ func versionErrorDataPrepare(ctx *context.Context, form auth.CreateModelArtsTrai
}
ctx.Data["config_list"] = configList.ParaConfigs
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
+ ctx.Data["WaitCount"] = waitCount
return nil
}
@@ -1070,7 +1082,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
outputObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.OutputPath + VersionOutputPath + "/"
logObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.LogPath + VersionOutputPath + "/"
// dataPath := "/" + setting.Bucket + "/" + setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + uuid + "/"
- branch_name := form.BranchName
+ branchName := form.BranchName
isLatestVersion := modelarts.IsLatestVersion
FlavorName := form.FlavorName
VersionCount := modelarts.VersionCountOne
@@ -1098,6 +1110,14 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
return
}
+ bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
+ if err != nil || !bootFileExist {
+ log.Error("Get bootfile error:", err)
+ trainJobErrorNewDataPrepare(ctx, form)
+ ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplModelArtsTrainJobNew, &form)
+ return
+ }
+
errStr := checkModelArtsSpecialPool(ctx, flavorCode, string(models.JobTypeTrain))
if errStr != "" {
trainJobErrorNewDataPrepare(ctx, form)
@@ -1129,12 +1149,12 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
}
gitRepo, _ := git.OpenRepository(repo.RepoPath())
- commitID, _ := gitRepo.GetBranchCommitID(branch_name)
+ commitID, _ := gitRepo.GetBranchCommitID(branchName)
- if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
+ if err := downloadCode(repo, codeLocalPath, branchName); err != nil {
log.Error("downloadCode failed, server timed out: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
- ctx.RenderWithErr("Create task failed, server timed out", tplModelArtsTrainJobNew, &form)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobNew, &form)
return
}
@@ -1158,7 +1178,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
// if err := uploadCodeToObs(codeLocalPath, jobName, parentDir); err != nil {
log.Error("Failed to uploadCodeToObs: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
- ctx.RenderWithErr("Failed to uploadCodeToObs", tplModelArtsTrainJobNew, &form)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobNew, &form)
return
}
@@ -1273,7 +1293,7 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
Parameters: param,
CommitID: commitID,
IsLatestVersion: isLatestVersion,
- BranchName: branch_name,
+ BranchName: branchName,
Params: form.Params,
FlavorName: FlavorName,
EngineName: EngineName,
@@ -1375,7 +1395,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
outputObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.OutputPath + VersionOutputPath + "/"
logObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.LogPath + VersionOutputPath + "/"
// dataPath := "/" + setting.Bucket + "/" + setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + uuid + "/"
- branch_name := form.BranchName
+ branchName := form.BranchName
PreVersionName := form.VersionName
FlavorName := form.FlavorName
EngineName := form.EngineName
@@ -1395,6 +1415,14 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
return
}
+ bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
+ if err != nil || !bootFileExist {
+ log.Error("Get bootfile error:", err)
+ versionErrorDataPrepare(ctx, form)
+ ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplModelArtsTrainJobVersionNew, &form)
+ return
+ }
+
errStr := checkModelArtsSpecialPool(ctx, flavorCode, string(models.JobTypeTrain))
if errStr != "" {
versionErrorDataPrepare(ctx, form)
@@ -1409,11 +1437,11 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
}
gitRepo, _ := git.OpenRepository(repo.RepoPath())
- commitID, _ := gitRepo.GetBranchCommitID(branch_name)
- if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
+ commitID, _ := gitRepo.GetBranchCommitID(branchName)
+ if err := downloadCode(repo, codeLocalPath, branchName); err != nil {
log.Error("Failed git clone repo to local(!: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
- ctx.RenderWithErr("Failed git clone repo to local!", tplModelArtsTrainJobVersionNew, &form)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1438,7 +1466,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
if err := uploadCodeToObs(codeLocalPath, jobName, parentDir); err != nil {
log.Error("Failed to uploadCodeToObs: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
- ctx.RenderWithErr("Failed to uploadCodeToObs", tplModelArtsTrainJobVersionNew, &form)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsTrainJobVersionNew, &form)
return
}
@@ -1563,7 +1591,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
Parameters: param,
PreVersionId: task.VersionID,
CommitID: commitID,
- BranchName: branch_name,
+ BranchName: branchName,
FlavorName: FlavorName,
EngineName: EngineName,
PreVersionName: PreVersionName,
@@ -1751,7 +1779,7 @@ func TrainJobShow(ctx *context.Context) {
return
}
ctx.Data["canNewJob"] = canNewJob
-
+ datasetList := make([][]models.DatasetDownload, 0)
//将运行参数转化为epoch_size = 3, device_target = Ascend的格式
for i, task := range VersionListTasks {
@@ -1774,7 +1802,7 @@ func TrainJobShow(ctx *context.Context) {
} else {
VersionListTasks[i].Parameters = ""
}
-
+ datasetList = append(datasetList, GetCloudBrainDataSetInfo(task.Uuid, false))
VersionListTasks[i].CanDel = cloudbrain.CanDeleteJob(ctx, &task.Cloudbrain)
VersionListTasks[i].CanModify = cloudbrain.CanModifyJob(ctx, &task.Cloudbrain)
}
@@ -1786,6 +1814,7 @@ func TrainJobShow(ctx *context.Context) {
ctx.Data["displayJobName"] = VersionListTasks[0].DisplayJobName
ctx.Data["version_list_task"] = VersionListTasks
ctx.Data["version_list_count"] = VersionListCount
+ ctx.Data["datasetList"] = datasetList
ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, &VersionListTasks[0].Cloudbrain)
ctx.HTML(http.StatusOK, tplModelArtsTrainJobShow)
}
@@ -1942,7 +1971,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
resultObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.ResultPath + VersionOutputPath + "/"
logObsPath := "/" + setting.Bucket + modelarts.JobPath + jobName + modelarts.LogPath + VersionOutputPath + "/"
dataPath := "/" + setting.Bucket + "/" + setting.BasePath + path.Join(uuid[0:1], uuid[1:2]) + "/" + uuid + uuid + "/"
- branch_name := form.BranchName
+ branchName := form.BranchName
FlavorName := form.FlavorName
EngineName := form.EngineName
LabelName := form.LabelName
@@ -1977,6 +2006,14 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
return
}
+ bootFileExist, err := ctx.Repo.FileExists(bootFile, branchName)
+ if err != nil || !bootFileExist {
+ log.Error("Get bootfile error:", err)
+ inferenceJobErrorNewDataPrepare(ctx, form)
+ ctx.RenderWithErr(ctx.Tr("repo.cloudbrain_bootfile_err"), tplModelArtsInferenceJobNew, &form)
+ return
+ }
+
//Determine whether the task name of the task in the project is duplicated
tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, string(models.JobTypeInference), displayJobName)
if err == nil {
@@ -2009,12 +2046,12 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
}
gitRepo, _ := git.OpenRepository(repo.RepoPath())
- commitID, _ := gitRepo.GetBranchCommitID(branch_name)
+ commitID, _ := gitRepo.GetBranchCommitID(branchName)
- if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
+ if err := downloadCode(repo, codeLocalPath, branchName); err != nil {
log.Error("Create task failed, server timed out: %s (%v)", repo.FullName(), err)
inferenceJobErrorNewDataPrepare(ctx, form)
- ctx.RenderWithErr("Create task failed, server timed out", tplModelArtsInferenceJobNew, &form)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsInferenceJobNew, &form)
return
}
@@ -2036,7 +2073,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
if err := uploadCodeToObs(codeLocalPath, jobName, ""); err != nil {
log.Error("Failed to uploadCodeToObs: %s (%v)", repo.FullName(), err)
inferenceJobErrorNewDataPrepare(ctx, form)
- ctx.RenderWithErr("Failed to uploadCodeToObs", tplModelArtsInferenceJobNew, &form)
+ ctx.RenderWithErr(ctx.Tr("cloudbrain.load_code_failed"), tplModelArtsInferenceJobNew, &form)
return
}
@@ -2095,7 +2132,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
Uuid: uuid,
Parameters: param, //modelarts train parameters
CommitID: commitID,
- BranchName: branch_name,
+ BranchName: branchName,
Params: form.Params,
FlavorName: FlavorName,
EngineName: EngineName,
@@ -2247,8 +2284,7 @@ func InferenceJobNew(ctx *context.Context) {
ctx.ServerError("get new inference-job info failed", err)
return
}
- waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
- ctx.Data["WaitCount"] = waitCount
+
ctx.HTML(200, tplModelArtsInferenceJobNew)
}
func inferenceJobNewDataPrepare(ctx *context.Context) error {
@@ -2319,6 +2355,8 @@ func inferenceJobNewDataPrepare(ctx *context.Context) error {
})
ctx.Data["MODEL_COUNT"] = model_count
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
+ ctx.Data["WaitCount"] = waitCount
return nil
}
@@ -2392,6 +2430,8 @@ func inferenceJobErrorNewDataPrepare(ctx *context.Context, form auth.CreateModel
ctx.Data["ckpt_name"] = form.CkptName
ctx.Data["train_url"] = form.TrainUrl
ctx.Data["datasetType"] = models.TypeCloudBrainTwo
+ waitCount := cloudbrain.GetWaitingCloudbrainCount(models.TypeCloudBrainTwo, "")
+ ctx.Data["WaitCount"] = waitCount
return nil
}
@@ -2445,7 +2485,7 @@ func InferenceJobShow(ctx *context.Context) {
ctx.Data["displayJobName"] = task.DisplayJobName
ctx.Data["task"] = task
ctx.Data["canDownload"] = cloudbrain.CanModifyJob(ctx, task)
-
+ ctx.Data["datasetDownload"] = GetCloudBrainDataSetInfo(task.Uuid, false)
tempUids := []int64{}
tempUids = append(tempUids, task.UserID)
JobCreater, err := models.GetUserNamesByIDs(tempUids)
diff --git a/routers/search.go b/routers/search.go
index 628350424..8453d5c18 100644
--- a/routers/search.go
+++ b/routers/search.go
@@ -314,7 +314,7 @@ func searchRepo(ctx *context.Context, TableName string, Key string, Page int, Pa
res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "num_stars", false)...).From(from).Size(Size).Highlight(queryHighlight("alias", "description", "topics")).Do(ctx.Req.Context())
if err == nil {
esresult := makeRepoResult(res, Key, OnlyReturnNum, language)
- setForkRepoOrder(esresult)
+ setForkRepoOrder(esresult, SortBy)
resultObj.Total = resultObj.PrivateTotal + esresult.Total
isNeedSort := false
if len(resultObj.Result) > 0 {
@@ -347,24 +347,26 @@ func searchRepo(ctx *context.Context, TableName string, Key string, Page int, Pa
}
}
-func setForkRepoOrder(esresult *SearchRes) {
- forkidMap := make(map[string]int, 0)
- for index, re := range esresult.Result {
- if re["fork_id"] != nil {
- fork_id := re["fork_id"].(string)
- if _, ok := forkidMap[fork_id]; !ok {
- forkidMap[fork_id] = index
+func setForkRepoOrder(esresult *SearchRes, SortBy string) {
+ if SortBy == "default" || SortBy == "" {
+ forkidMap := make(map[string]int, 0)
+ for index, re := range esresult.Result {
+ if re["fork_id"] != nil {
+ fork_id := re["fork_id"].(string)
+ if _, ok := forkidMap[fork_id]; !ok {
+ forkidMap[fork_id] = index
+ }
}
}
- }
- for key, value := range forkidMap {
- for index, re := range esresult.Result {
- if re["id"].(string) == key {
- if value < index { //swap
- tmp := esresult.Result[index]
- esresult.Result[index] = esresult.Result[value]
- esresult.Result[value] = tmp
- break
+ for key, value := range forkidMap {
+ for index, re := range esresult.Result {
+ if re["id"].(string) == key {
+ if value < index { //swap
+ tmp := esresult.Result[index]
+ esresult.Result[index] = esresult.Result[value]
+ esresult.Result[value] = tmp
+ break
+ }
}
}
}
diff --git a/routers/user/home.go b/routers/user/home.go
index 25b1c518e..d8c2565c6 100755
--- a/routers/user/home.go
+++ b/routers/user/home.go
@@ -761,10 +761,14 @@ func Cloudbrains(ctx *context.Context) {
listType := ctx.Query("listType")
jobType := ctx.Query("jobType")
jobStatus := ctx.Query("jobStatus")
+ aiCenter := ctx.Query("aiCenter")
+ cluster := ctx.Query("cluster")
ctx.Data["ListType"] = listType
ctx.Data["JobType"] = jobType
ctx.Data["JobStatus"] = jobStatus
+ ctx.Data["aiCenter"] = aiCenter
+ ctx.Data["cluster"] = cluster
page := ctx.QueryInt("page")
if page <= 0 {
@@ -825,6 +829,8 @@ func Cloudbrains(ctx *context.Context) {
RepoIDList: repoIDList,
ComputeResource: listType,
Type: models.TypeCloudBrainAll,
+ AiCenter: aiCenter,
+ Cluster: cluster,
})
if err != nil {
ctx.ServerError("Get job failed:", err)
diff --git a/templates/admin/cloudbrain/list.tmpl b/templates/admin/cloudbrain/list.tmpl
index cd5913c40..2f102f10a 100755
--- a/templates/admin/cloudbrain/list.tmpl
+++ b/templates/admin/cloudbrain/list.tmpl
@@ -14,6 +14,10 @@
人工智能算力网络推进联盟已接入10家智算中心,算力总规模1542P
+人工智能算力网络推进联盟已接入11家智算中心,算力总规模1924P
1111
+