{{.i18n.Tr "repo.images.task_delete_confirm"}}
+diff --git a/models/attachment.go b/models/attachment.go
index 7c95a73dd..ea8f1645f 100755
--- a/models/attachment.go
+++ b/models/attachment.go
@@ -10,6 +10,7 @@ import (
"io"
"path"
"strings"
+ "time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/obs"
@@ -64,6 +65,7 @@ type AttachmentInfo struct {
Repo *Repository `xorm:"extends"`
RelAvatarLink string `xorm:"extends"`
UserName string `xorm:"extends"`
+ Recommend bool `xorm:"-"`
}
type AttachmentsOptions struct {
@@ -78,6 +80,7 @@ type AttachmentsOptions struct {
JustNeedZipFile bool
NeedRepoInfo bool
Keyword string
+ RecommendOnly bool
}
func (a *Attachment) AfterUpdate() {
@@ -104,6 +107,14 @@ func (a *Attachment) IncreaseDownloadCount() error {
return nil
}
+func (a *Attachment) UpdateDatasetUpdateUnix() error {
+ // Update download count.
+ if _, err := x.Exec("UPDATE `dataset` SET updated_unix="+fmt.Sprint(time.Now().Unix())+" WHERE id=?", a.DatasetID); err != nil {
+ return fmt.Errorf("UpdateDatasetUpdateUnix: %v", err)
+ }
+ return nil
+}
+
// APIFormat converts models.Attachment to api.Attachment
func (a *Attachment) APIFormat() *api.Attachment {
return &api.Attachment{
@@ -570,6 +581,11 @@ func Attachments(opts *AttachmentsOptions) ([]*AttachmentInfo, int64, error) {
builder.Eq{"attachment.is_private": opts.IsPrivate},
)
}
+ if opts.RecommendOnly {
+ cond = cond.And(builder.In("attachment.id", builder.Select("attachment.id").
+ From("attachment").
+ Join("INNER", "dataset", "attachment.dataset_id = dataset.id and dataset.recommend=true")))
+ }
if opts.JustNeedZipFile {
var DecompressState []int32
@@ -618,6 +634,7 @@ func Attachments(opts *AttachmentsOptions) ([]*AttachmentInfo, int64, error) {
if err != nil {
return nil, 0, fmt.Errorf("GetDatasetByID failed error: %v", err)
}
+ attachment.Recommend = dataset.Recommend
repo, err := GetRepositoryByID(dataset.RepoID)
if err == nil {
attachment.Repo = repo
diff --git a/models/cloudbrain.go b/models/cloudbrain.go
index 4a82a2031..44b177471 100755
--- a/models/cloudbrain.go
+++ b/models/cloudbrain.go
@@ -1,13 +1,14 @@
package models
import (
- "code.gitea.io/gitea/modules/util"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
+ "code.gitea.io/gitea/modules/util"
+
"xorm.io/builder"
"xorm.io/xorm"
@@ -111,7 +112,7 @@ type Cloudbrain struct {
SubTaskName string
ContainerID string
ContainerIp string
- CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
Duration int64 `xorm:"DEFAULT 0"` //运行时长 单位秒
TrainJobDuration string `xorm:"DEFAULT '00:00:00'"`
@@ -184,6 +185,12 @@ func (task *Cloudbrain) ComputeAndSetDuration() {
task.TrainJobDuration = ConvertDurationToStr(d)
}
+func (task *Cloudbrain) CorrectCreateUnix() {
+ if task.StartTime > 0 && task.CreatedUnix > task.StartTime {
+ task.CreatedUnix = task.StartTime
+ }
+}
+
func (task *Cloudbrain) IsTerminal() bool {
status := task.Status
return status == string(ModelArtsTrainJobCompleted) || status == string(ModelArtsTrainJobFailed) || status == string(ModelArtsTrainJobKilled) || status == string(ModelArtsStopped) || status == string(JobStopped) || status == string(JobFailed) || status == string(JobSucceeded)
@@ -218,9 +225,22 @@ func ParseAndSetDurationFromCloudBrainOne(result JobResultPayload, task *Cloudbr
task.EndTime = timeutil.TimeStamp(result.JobStatus.CompletedTime / 1000)
}
}
+ task.CorrectCreateUnix()
task.ComputeAndSetDuration()
}
+func ParseAndSetDurationFromModelArtsNotebook(result *GetNotebook2Result, job *Cloudbrain) {
+ if job.StartTime == 0 && result.Lease.UpdateTime > 0 {
+ job.StartTime = timeutil.TimeStamp(result.Lease.UpdateTime / 1000)
+ }
+ job.Status = result.Status
+ if job.EndTime == 0 && IsModelArtsDebugJobTerminal(job.Status) {
+ job.EndTime = timeutil.TimeStampNow()
+ }
+ job.CorrectCreateUnix()
+ job.ComputeAndSetDuration()
+}
+
type CloudbrainInfo struct {
Cloudbrain `xorm:"extends"`
User `xorm:"extends"`
@@ -306,6 +326,7 @@ type CloudbrainsOptions struct {
IsLatestVersion string
JobTypeNot bool
NeedRepoInfo bool
+ RepoIDList []int64
}
type TaskPod struct {
@@ -546,13 +567,23 @@ type PoolInfo struct {
PoolType string `json:"pool_type"`
}
-type CommitImageParams struct {
+type CommitImageCloudBrainParams struct {
Ip string `json:"ip"`
TaskContainerId string `json:"taskContainerId"`
ImageTag string `json:"imageTag"`
ImageDescription string `json:"imageDescription"`
}
+type CommitImageParams struct {
+ CommitImageCloudBrainParams
+ IsPrivate bool
+ Topics []string
+ CloudBrainType int
+ UID int64
+ Place string
+ Type int
+}
+
type CommitImageResult struct {
Code string `json:"code"`
Msg string `json:"msg"`
@@ -1178,6 +1209,12 @@ func Cloudbrains(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) {
)
}
}
+ if len(opts.RepoIDList) > 0 {
+ cond = cond.And(
+ builder.In("cloudbrain.repo_id", opts.RepoIDList),
+ )
+
+ }
var count int64
var err error
@@ -1354,7 +1391,7 @@ func CloudbrainsVersionList(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int, e
func CreateCloudbrain(cloudbrain *Cloudbrain) (err error) {
cloudbrain.TrainJobDuration = DURATION_STR_ZERO
- if _, err = x.Insert(cloudbrain); err != nil {
+ if _, err = x.NoAutoTime().Insert(cloudbrain); err != nil {
return err
}
return nil
@@ -1464,7 +1501,7 @@ func UpdateTrainJobVersion(job *Cloudbrain) error {
func updateJobTrainVersion(e Engine, job *Cloudbrain) error {
var sess *xorm.Session
sess = e.Where("job_id = ? AND version_name=?", job.JobID, job.VersionName)
- _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time").Update(job)
+ _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job)
return err
}
@@ -1553,7 +1590,7 @@ func UpdateInferenceJob(job *Cloudbrain) error {
func updateInferenceJob(e Engine, job *Cloudbrain) error {
var sess *xorm.Session
sess = e.Where("job_id = ?", job.JobID)
- _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time").Update(job)
+ _, err := sess.Cols("status", "train_job_duration", "duration", "start_time", "end_time", "created_unix").Update(job)
return err
}
func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) {
@@ -1569,7 +1606,7 @@ func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) {
return err
}
- if _, err = sess.Insert(new); err != nil {
+ if _, err = sess.NoAutoTime().Insert(new); err != nil {
sess.Rollback()
return err
}
@@ -1580,3 +1617,64 @@ func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) {
return nil
}
+func CloudbrainAll(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) {
+ sess := x.NewSession()
+ defer sess.Close()
+ var cond = builder.NewCond()
+ if (opts.Type) >= 0 {
+ cond = cond.And(
+ builder.Eq{"cloudbrain.type": opts.Type},
+ )
+ }
+
+ var count int64
+ var err error
+ condition := "cloudbrain.user_id = `user`.id"
+ if len(opts.Keyword) == 0 {
+ count, err = sess.Where(cond).Count(new(Cloudbrain))
+ } else {
+ lowerKeyWord := strings.ToLower(opts.Keyword)
+
+ cond = cond.And(builder.Or(builder.Like{"LOWER(cloudbrain.job_name)", lowerKeyWord}, builder.Like{"LOWER(cloudbrain.display_job_name)", lowerKeyWord}, builder.Like{"`user`.lower_name", lowerKeyWord}))
+ count, err = sess.Table(&Cloudbrain{}).Where(cond).
+ Join("left", "`user`", condition).Count(new(CloudbrainInfo))
+
+ }
+
+ if err != nil {
+ return nil, 0, fmt.Errorf("Count: %v", err)
+ }
+
+ if opts.Page >= 0 && opts.PageSize > 0 {
+ var start int
+ if opts.Page == 0 {
+ start = 0
+ } else {
+ start = (opts.Page - 1) * opts.PageSize
+ }
+ sess.Limit(opts.PageSize, start)
+ }
+
+ sess.OrderBy("cloudbrain.created_unix DESC")
+ cloudbrains := make([]*CloudbrainInfo, 0, setting.UI.IssuePagingNum)
+ if err := sess.Table(&Cloudbrain{}).Unscoped().Where(cond).
+ Join("left", "`user`", condition).
+ Find(&cloudbrains); err != nil {
+ return nil, 0, fmt.Errorf("Find: %v", err)
+ }
+ if opts.NeedRepoInfo {
+ var ids []int64
+ for _, task := range cloudbrains {
+ ids = append(ids, task.RepoID)
+ }
+ repositoryMap, err := GetRepositoriesMapByIDs(ids)
+ if err == nil {
+ for _, task := range cloudbrains {
+ task.Repo = repositoryMap[task.RepoID]
+ }
+ }
+
+ }
+
+ return cloudbrains, count, nil
+}
diff --git a/models/cloudbrain_image.go b/models/cloudbrain_image.go
new file mode 100644
index 000000000..eb21e0d87
--- /dev/null
+++ b/models/cloudbrain_image.go
@@ -0,0 +1,583 @@
+package models
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "xorm.io/builder"
+
+ "code.gitea.io/gitea/modules/timeutil"
+)
+
+const RECOMMOND_TYPE = 5
+const NORMAL_TYPE = 0
+const IMAGE_STATUS_COMMIT = 0
+const IMAGE_STATUS_SUCCESS = 1
+const IMAGE_STATUS_Failed = 2
+
+type Image struct {
+ ID int64 `xorm:"pk autoincr" json:"id"`
+ Type int `xorm:"INDEX NOT NULL" json:"type"` //0 normal 5官方推荐,中间值保留为后续扩展
+ CloudbrainType int `xorm:"INDEX NOT NULL" json:"cloudbrainType"` //0 云脑一 1云脑二
+ UID int64 `xorm:"INDEX NOT NULL" json:"uid"`
+ IsPrivate bool `xorm:"INDEX NOT NULL" json:"isPrivate"`
+ Tag string `xorm:"varchar(100) UNIQUE" json:"tag"`
+ Description string `xorm:"varchar(765)" json:"description"`
+ Topics []string `xorm:"TEXT JSON" json:"topics"`
+ Place string `xorm:"varchar(300)" json:"place"`
+ NumStars int `xorm:"NOT NULL DEFAULT 0" json:"numStars"`
+ IsStar bool `xorm:"-" json:"isStar"`
+ UserName string `xorm:"-" json:"userName"`
+ RelAvatarLink string `xorm:"-" json:"relAvatarLink"`
+ Status int `xorm:"INDEX NOT NULL DEFAULT 0" json:"status"` //0代表正在提交,1提交完成,2提交失败
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created" json:"createdUnix"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated" json:"updatedUnix"`
+}
+
+type ImageList []*Image
+
+type ImageStar struct {
+ ID int64 `xorm:"pk autoincr"`
+ UID int64 `xorm:"UNIQUE(s)"`
+ ImageID int64 `xorm:"UNIQUE(s)"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+type ImageTopic struct {
+ ID int64
+ Name string `xorm:"UNIQUE VARCHAR(105)"`
+ ImageCount int
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+type ImageTopicRelation struct {
+ ImageID int64 `xorm:"UNIQUE(s)"`
+ TopicID int64 `xorm:"UNIQUE(s)"`
+}
+
+type SearchImageOptions struct {
+ Keyword string
+ UID int64
+ Status int
+ IncludePublicOnly bool
+ IncludeOfficialOnly bool
+ IncludePrivateOnly bool
+ IncludeStarByMe bool
+ IncludeCustom bool
+ IncludeOwnerOnly bool
+ Topics string
+ ListOptions
+ SearchOrderBy
+}
+type ErrorImageTagExist struct {
+ Tag string
+}
+
+type ErrorImageCommitting struct {
+ Tag string
+}
+
+type ImagesPageResult struct {
+ Count int64 `json:"count"`
+ Images []*Image `json:"images"`
+}
+
+func (err ErrorImageTagExist) Error() string {
+ return fmt.Sprintf("Image already exists [tag: %s]", err.Tag)
+}
+
+func (err ErrorImageCommitting) Error() string {
+ return fmt.Sprintf("Image already exists [tag: %s]", err.Tag)
+}
+
+type ErrImageNotExist struct {
+ ID int64
+ Tag string
+}
+
+func (err ErrImageNotExist) Error() string {
+ return fmt.Sprintf("Image does not exist [id: %d] [tag: %s]", err.ID, err.Tag)
+}
+
+func IsErrorImageCommitting(err error) bool {
+ _, ok := err.(ErrorImageCommitting)
+ return ok
+}
+
+func IsErrImageNotExist(err error) bool {
+ _, ok := err.(ErrImageNotExist)
+ return ok
+}
+
+func IsErrImageTagExist(err error) bool {
+ _, ok := err.(ErrorImageTagExist)
+ return ok
+}
+
+func IsImageExist(tag string) (bool, error) {
+ return x.Exist(&Image{
+ Tag: tag,
+ })
+}
+
+func IsImageExistByUser(tag string, uid int64) (bool, error) {
+ return x.Exist(&Image{
+ Tag: tag,
+ UID: uid,
+ Status: IMAGE_STATUS_SUCCESS,
+ })
+}
+
+type FindImageTopicOptions struct {
+ ListOptions
+ ImageID int64
+ Keyword string
+}
+
+func (opts *FindImageTopicOptions) toConds() builder.Cond {
+ var cond = builder.NewCond()
+ if opts.ImageID > 0 {
+ cond = cond.And(builder.Eq{"image_topic_relation.image_id": opts.ImageID})
+ }
+
+ if opts.Keyword != "" {
+ cond = cond.And(builder.Like{"image_topic.name", strings.ToLower(opts.Keyword)})
+ }
+
+ return cond
+}
+
+func GetImageByID(id int64) (*Image, error) {
+ rel := new(Image)
+ has, err := x.
+ ID(id).
+ Get(rel)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrImageNotExist{ID: id}
+ }
+
+ return rel, nil
+}
+
+func GetImageByTag(tag string) (*Image, error) {
+
+ image := &Image{Tag: tag}
+ has, err := x.
+ Get(image)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrImageNotExist{Tag: tag}
+ }
+
+ return image, nil
+}
+
+func SanitizeAndValidateImageTopics(topics []string) (validTopics []string, invalidTopics []string) {
+ validTopics = make([]string, 0)
+ mValidTopics := make(map[string]struct{})
+ invalidTopics = make([]string, 0)
+
+ for _, topic := range topics {
+ topic = strings.TrimSpace(strings.ToLower(topic))
+ // ignore empty string
+ if len(topic) == 0 {
+ continue
+ }
+ // ignore same topic twice
+ if _, ok := mValidTopics[topic]; ok {
+ continue
+ }
+ if utf8.RuneCountInString(topic) <= 35 {
+ validTopics = append(validTopics, topic)
+ mValidTopics[topic] = struct{}{}
+ } else {
+ invalidTopics = append(invalidTopics, topic)
+ }
+ }
+
+ return validTopics, invalidTopics
+}
+func FindImageTopics(opts *FindImageTopicOptions) (topics []*ImageTopic, err error) {
+ sess := x.Select("image_topic.*").Where(opts.toConds())
+ if opts.ImageID > 0 {
+ sess.Join("INNER", "image_topic_relation", "image_topic_relation.topic_id = image_topic.id")
+ }
+ if opts.PageSize != 0 && opts.Page != 0 {
+ sess = opts.setSessionPagination(sess)
+ }
+ return topics, sess.Desc("image_topic.image_count").Find(&topics)
+}
+
+func SaveImageTopics(imageID int64, topicNames ...string) error {
+ topics, err := FindImageTopics(&FindImageTopicOptions{
+ ImageID: imageID,
+ })
+ if err != nil {
+ return err
+ }
+
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ var addedTopicNames []string
+ for _, topicName := range topicNames {
+ if strings.TrimSpace(topicName) == "" {
+ continue
+ }
+
+ var found bool
+ for _, t := range topics {
+ if strings.EqualFold(topicName, t.Name) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ addedTopicNames = append(addedTopicNames, topicName)
+ }
+ }
+
+ var removeTopics []*ImageTopic
+ for _, t := range topics {
+ var found bool
+ for _, topicName := range topicNames {
+ if strings.EqualFold(topicName, t.Name) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ removeTopics = append(removeTopics, t)
+ }
+ }
+
+ for _, topicName := range addedTopicNames {
+ _, err := addTopicByNameToImage(sess, imageID, topicName)
+ if err != nil {
+ return err
+ }
+ }
+
+ for _, topic := range removeTopics {
+ err := removeTopicFromImage(sess, imageID, topic)
+ if err != nil {
+ return err
+ }
+ }
+
+ topicNames = make([]string, 0, 25)
+ if err := sess.Table("image_topic").Cols("name").
+ Join("INNER", "image_topic_relation", "image_topic_relation.topic_id = image_topic.id").
+ Where("image_topic_relation.image_id = ?", imageID).Desc("image_topic.image_count").Find(&topicNames); err != nil {
+ return err
+ }
+
+ if _, err := sess.ID(imageID).Cols("topics").Update(&Image{
+ Topics: topicNames,
+ }); err != nil {
+ return err
+ }
+
+ return sess.Commit()
+}
+
+func addTopicByNameToImage(e Engine, imageID int64, topicName string) (*ImageTopic, error) {
+ var topic ImageTopic
+ has, err := e.Where("name = ?", topicName).Get(&topic)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ topic.Name = topicName
+ topic.ImageCount = 1
+ if _, err := e.Insert(&topic); err != nil {
+ return nil, err
+ }
+ } else {
+ topic.ImageCount++
+ if _, err := e.ID(topic.ID).Cols("image_count").Update(&topic); err != nil {
+ return nil, err
+ }
+ }
+
+ if _, err := e.Insert(&ImageTopicRelation{
+ ImageID: imageID,
+ TopicID: topic.ID,
+ }); err != nil {
+ return nil, err
+ }
+
+ return &topic, nil
+}
+
+func removeTopicFromImage(e Engine, imageId int64, topic *ImageTopic) error {
+ topic.ImageCount--
+ if _, err := e.ID(topic.ID).Cols("image_count").Update(topic); err != nil {
+ return err
+ }
+
+ if _, err := e.Delete(&ImageTopicRelation{
+ ImageID: imageId,
+ TopicID: topic.ID,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func SearchImage(opts *SearchImageOptions) (ImageList, int64, error) {
+ cond := SearchImageCondition(opts)
+ return SearchImageByCondition(opts, cond)
+}
+
+func SearchImageCondition(opts *SearchImageOptions) builder.Cond {
+ var cond = builder.NewCond()
+
+ if len(opts.Keyword) > 0 {
+
+ var subQueryCond = builder.NewCond()
+ for _, v := range strings.Split(opts.Keyword, ",") {
+
+ subQueryCond = subQueryCond.Or(builder.Like{"LOWER(image_topic.name)", strings.ToLower(v)})
+
+ }
+ subQuery := builder.Select("image_topic_relation.image_id").From("image_topic_relation").
+ Join("INNER", "image_topic", "image_topic.id = image_topic_relation.topic_id").
+ Where(subQueryCond).
+ GroupBy("image_topic_relation.image_id")
+ var keywordCond = builder.In("id", subQuery)
+
+ var likes = builder.NewCond()
+ for _, v := range strings.Split(opts.Keyword, ",") {
+ likes = likes.Or(builder.Like{"LOWER(tag)", strings.ToLower(v)})
+
+ likes = likes.Or(builder.Like{"LOWER(description)", strings.ToLower(v)})
+
+ }
+ keywordCond = keywordCond.Or(likes)
+
+ cond = cond.And(keywordCond)
+
+ }
+ if len(opts.Topics) > 0 { //标签精确匹配
+ var subQueryCond = builder.NewCond()
+ for _, v := range strings.Split(opts.Keyword, ",") {
+
+ subQueryCond = subQueryCond.Or(builder.Eq{"LOWER(image_topic.name)": strings.ToLower(v)})
+ subQuery := builder.Select("image_topic_relation.image_id").From("image_topic_relation").
+ Join("INNER", "image_topic", "image_topic.id = image_topic_relation.topic_id").
+ Where(subQueryCond).
+ GroupBy("image_topic_relation.image_id")
+ var topicCond = builder.In("id", subQuery)
+ cond = cond.And(topicCond)
+ }
+ }
+
+ if opts.IncludePublicOnly {
+ cond = cond.And(builder.Eq{"is_private": false})
+ }
+
+ if opts.IncludePrivateOnly {
+ cond = cond.And(builder.Eq{"is_private": true})
+ }
+
+ if opts.IncludeOwnerOnly {
+
+ cond = cond.And(builder.Eq{"uid": opts.UID})
+ }
+ if opts.IncludeOfficialOnly {
+ cond = cond.And(builder.Eq{"type": RECOMMOND_TYPE})
+ }
+ if opts.Status >= 0 {
+ cond = cond.And(builder.Eq{"status": opts.Status})
+ }
+
+ if opts.IncludeStarByMe {
+
+ subQuery := builder.Select("image_id").From("image_star").
+ Where(builder.Eq{"uid": opts.UID})
+ var starCond = builder.In("id", subQuery)
+ cond = cond.And(starCond)
+
+ }
+
+ return cond
+}
+
+func SearchImageByCondition(opts *SearchImageOptions, cond builder.Cond) (ImageList, int64, error) {
+ if opts.Page <= 0 {
+ opts.Page = 1
+ }
+
+ var err error
+ sess := x.NewSession()
+ defer sess.Close()
+
+ images := make(ImageList, 0, opts.PageSize)
+ count, err := sess.Where(cond).Count(new(Image))
+
+ if err != nil {
+ return nil, 0, fmt.Errorf("Count: %v", err)
+ }
+
+ sess.Where(cond).OrderBy(opts.SearchOrderBy.String())
+
+ if opts.PageSize > 0 {
+ sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize)
+ }
+ if err = sess.Find(&images); err != nil {
+ return nil, 0, fmt.Errorf("Images: %v", err)
+ }
+
+ if err = images.loadAttributes(sess, opts.UID); err != nil {
+ return nil, 0, fmt.Errorf("LoadAttributes: %v", err)
+ }
+
+ return images, count, nil
+}
+
+func (images ImageList) loadAttributes(e Engine, uid int64) error {
+ if len(images) == 0 {
+ return nil
+ }
+
+ set := make(map[int64]struct{})
+
+ for i := range images {
+ set[images[i].UID] = struct{}{}
+ }
+
+ // Load creators.
+ users := make(map[int64]*User, len(set))
+ if err := e.Table("\"user\"").
+ Cols("name", "lower_name", "avatar", "email").
+ Where("id > 0").
+ In("id", keysInt64(set)).
+ Find(&users); err != nil {
+ return fmt.Errorf("find users: %v", err)
+ }
+
+ for i := range images {
+ images[i].UserName = users[images[i].UID].Name
+ images[i].RelAvatarLink = users[images[i].UID].RelAvatarLink()
+ if uid == -1 {
+ images[i].IsStar = false
+ } else {
+ images[i].IsStar = isImageStaring(e, uid, images[i].ID)
+ }
+ }
+
+ return nil
+}
+
+func GetCommittingImageCount() int {
+
+ total, err := x.Where("status =?", 0).Count(new(Image))
+
+ if err != nil {
+ return 0
+ }
+ return int(total)
+}
+
+func CreateLocalImage(image *Image) error {
+
+ _, err := x.Insert(image)
+ return err
+}
+
+func UpdateLocalImage(image *Image) error {
+
+ _, err := x.ID(image.ID).Cols("description", "is_private", "status").Update(image)
+ return err
+}
+
+func UpdateLocalImageStatus(image *Image) error {
+
+ _, err := x.ID(image.ID).Cols("status").Update(image)
+ return err
+}
+
+func DeleteLocalImage(id int64) error {
+ image := new(Image)
+ _, err := x.ID(id).Delete(image)
+ return err
+}
+
+//star or unstar Image
+func StarImage(userID, imageID int64, star bool) error {
+ sess := x.NewSession()
+ defer sess.Close()
+
+ if err := sess.Begin(); err != nil {
+ return err
+ }
+
+ if star {
+ if isImageStaring(sess, userID, imageID) {
+ return nil
+ }
+
+ if _, err := sess.Insert(&ImageStar{UID: userID, ImageID: imageID}); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("UPDATE `image` SET num_stars = num_stars + 1 WHERE id = ?", imageID); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("UPDATE `user` SET num_image_stars = num_image_stars + 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ } else {
+ if !isImageStaring(sess, userID, imageID) {
+ return nil
+ }
+
+ if _, err := sess.Delete(&ImageStar{0, userID, imageID, 0}); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("UPDATE `image` SET num_stars = num_stars - 1 WHERE id = ?", imageID); err != nil {
+ return err
+ }
+ if _, err := sess.Exec("UPDATE `user` SET num_image_stars = num_image_stars - 1 WHERE id = ?", userID); err != nil {
+ return err
+ }
+ }
+
+ return sess.Commit()
+}
+
+func IsImageStaring(userID, datasetID int64) bool {
+ return isImageStaring(x, userID, datasetID)
+
+}
+
+func isImageStaring(e Engine, userID, imageID int64) bool {
+ has, _ := e.Get(&ImageStar{0, userID, imageID, 0})
+ return has
+}
+func RecommendImage(imageId int64, recommond bool) error {
+
+ image := Image{Type: GetRecommondType(recommond)}
+ _, err := x.ID(imageId).Cols("type").Update(image)
+ return err
+}
+
+func GetRecommondType(recommond bool) int {
+ if recommond {
+
+ return RECOMMOND_TYPE
+ } else {
+ return NORMAL_TYPE
+ }
+
+}
diff --git a/models/dataset.go b/models/dataset.go
index 95800100c..e841261c7 100755
--- a/models/dataset.go
+++ b/models/dataset.go
@@ -23,7 +23,8 @@ type Dataset struct {
Category string
Description string `xorm:"TEXT"`
DownloadTimes int64
- NumStars int `xorm:"INDEX NOT NULL DEFAULT 0"`
+ NumStars int `xorm:"INDEX NOT NULL DEFAULT 0"`
+ Recommend bool `xorm:"INDEX NOT NULL DEFAULT false"`
License string
Task string
ReleaseID int64 `xorm:"INDEX"`
@@ -99,6 +100,7 @@ type SearchDatasetOptions struct {
OwnerID int64
RepoID int64
IncludePublic bool
+ RecommendOnly bool
Category string
Task string
License string
@@ -132,6 +134,13 @@ func CreateDataset(dataset *Dataset) (err error) {
}
+func RecommendDataset(dataSetId int64, recommend bool) error {
+
+ dataset := Dataset{Recommend: recommend}
+ _, err := x.ID(dataSetId).Cols("recommend").Update(dataset)
+ return err
+}
+
func SearchDataset(opts *SearchDatasetOptions) (DatasetList, int64, error) {
cond := SearchDatasetCondition(opts)
return SearchDatasetByCondition(opts, cond)
@@ -146,6 +155,9 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond {
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID})
}
+ if opts.RecommendOnly {
+ cond = cond.And(builder.Eq{"dataset.recommend": opts.RecommendOnly})
+ }
if opts.IncludePublic {
cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic})
@@ -198,7 +210,7 @@ func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (Da
defer sess.Close()
datasets := make(DatasetList, 0, opts.PageSize)
- selectColumnsSql := "distinct dataset.id,dataset.title, dataset.status, dataset.category, dataset.description, dataset.download_times, dataset.license, dataset.task, dataset.release_id, dataset.user_id, dataset.repo_id, dataset.created_unix,dataset.updated_unix,dataset.num_stars"
+ selectColumnsSql := "distinct dataset.id,dataset.title, dataset.status, dataset.category, dataset.description, dataset.download_times, dataset.license, dataset.task, dataset.release_id, dataset.user_id, dataset.repo_id, dataset.created_unix,dataset.updated_unix,dataset.num_stars,dataset.recommend"
count, err := sess.Distinct("dataset.id").Join("INNER", "repository", "repository.id = dataset.repo_id").
Join("INNER", "attachment", "attachment.dataset_id=dataset.id").
diff --git a/models/models.go b/models/models.go
index 2ec61941d..2a2e119fb 100755
--- a/models/models.go
+++ b/models/models.go
@@ -131,6 +131,10 @@ func init() {
new(Dataset),
new(DatasetStar),
new(Cloudbrain),
+ new(Image),
+ new(ImageStar),
+ new(ImageTopic),
+ new(ImageTopicRelation),
new(FileChunk),
new(BlockChain),
new(RecommendOrg),
diff --git a/models/org.go b/models/org.go
index 28a6701c5..2a6528023 100755
--- a/models/org.go
+++ b/models/org.go
@@ -193,22 +193,22 @@ func (org *User) getOrgStatistics() (int, error) {
}
func FindTopNStarsOrgs(n int) ([]*OrgScore, error) {
- sql := "select a.id,sum(b.num_stars) score from \"user\" a ,repository b where a.id=b.owner_id and a.type=1 group by a.id order by score desc limit " + strconv.Itoa(n)
+ sql := "select a.id,sum(b.num_stars) score from \"user\" a ,repository b where a.id=b.owner_id and a.type=1 and a.visibility=0 group by a.id order by score desc limit " + strconv.Itoa(n)
return findTopNOrgs(sql)
}
func FindTopNMembersOrgs(n int) ([]*OrgScore, error) {
sql := "select id, count(user_id) score from" +
- " (select org_id as id, uid as user_id from org_user " +
+ " (select org_id as id, uid as user_id from org_user o, \"user\" u where o.org_id=u.id and u.visibility=0 " +
"union select a.id,b.user_id from \"user\" a,collaboration b,repository c " +
- "where a.type=1 and a.id=c.owner_id and b.repo_id=c.id) d " +
+ "where a.type=1 and a.visibility=0 and a.id=c.owner_id and b.repo_id=c.id) d " +
"group by id order by score desc limit " + strconv.Itoa(n)
return findTopNOrgs(sql)
}
func FindTopNOpenIOrgs(n int) ([]*OrgScore, error) {
- sql := "select org_id id,num_score score from org_statistic order by num_score desc limit " + strconv.Itoa(n)
+ sql := "select org_id id,num_score score from org_statistic a, \"user\" b where a.org_id=b.id and b.visibility=0 order by num_score desc limit " + strconv.Itoa(n)
return findTopNOrgs(sql)
}
diff --git a/models/user.go b/models/user.go
index 71885aeb1..7d4c8ce34 100755
--- a/models/user.go
+++ b/models/user.go
@@ -157,6 +157,7 @@ type User struct {
NumFollowing int `xorm:"NOT NULL DEFAULT 0"`
NumStars int
NumDatasetStars int `xorm:"NOT NULL DEFAULT 0"`
+ NumImageStars int `xorm:"NOT NULL DEFAULT 0"`
NumRepos int
// For organization
diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go
index 65ce642d5..34d84555c 100644
--- a/models/user_business_analysis.go
+++ b/models/user_business_analysis.go
@@ -246,7 +246,7 @@ func QueryUserStaticDataByTableName(start int, pageSize int, tableName string, q
}
log.Info("query return total:" + fmt.Sprint(allCount))
userBusinessAnalysisAllList := make([]*UserBusinessAnalysisAll, 0)
- if err := statictisSess.Table(tableName).Where(cond).OrderBy("commit_count desc,id desc").Limit(pageSize, start).
+ if err := statictisSess.Table(tableName).Where(cond).OrderBy("user_index desc,id desc").Limit(pageSize, start).
Find(&userBusinessAnalysisAllList); err != nil {
return nil, 0
}
@@ -448,6 +448,9 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
var indexTotal int64
indexTotal = 0
insertCount := 0
+ userIndexMap := make(map[int64]float64, 0)
+ maxUserIndex := 0.0
+ minUserIndex := 100000000.0
dateRecordBatch := make([]UserBusinessAnalysisAll, 0)
for {
sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal))
@@ -494,7 +497,13 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
dateRecordAll.GpuBenchMarkJob = getMapKeyStringValue(fmt.Sprint(dateRecordAll.ID)+"_GpuBenchMarkJob", CloudBrainTaskItemMap)
dateRecordAll.CommitModelCount = getMapValue(dateRecordAll.ID, AiModelManageMap)
dateRecordAll.UserIndex = getUserIndexFromAnalysisAll(dateRecordAll, ParaWeight)
-
+ userIndexMap[dateRecordAll.ID] = dateRecordAll.UserIndex
+ if maxUserIndex < dateRecordAll.UserIndex {
+ maxUserIndex = dateRecordAll.UserIndex
+ }
+ if minUserIndex > dateRecordAll.UserIndex {
+ minUserIndex = dateRecordAll.UserIndex
+ }
dateRecordBatch = append(dateRecordBatch, dateRecordAll)
if len(dateRecordBatch) >= BATCH_INSERT_SIZE {
insertTable(dateRecordBatch, tableName, statictisSess)
@@ -523,9 +532,22 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS
}
}
+ //normalization
+ for k, v := range userIndexMap {
+ tmpResult := (v - minUserIndex) / (maxUserIndex - minUserIndex)
+ if tmpResult > 0.99 {
+ tmpResult = 0.99
+ }
+ updateUserIndex(tableName, statictisSess, k, tmpResult)
+ }
log.Info("refresh data finished.tableName=" + tableName + " total record:" + fmt.Sprint(insertCount))
}
+func updateUserIndex(tableName string, statictisSess *xorm.Session, userId int64, userIndex float64) {
+ updateSql := "UPDATE public." + tableName + " set user_index=" + fmt.Sprint(userIndex*100) + " where id=" + fmt.Sprint(userId)
+ statictisSess.Exec(updateSql)
+}
+
func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, statictisSess *xorm.Session) {
insertBatchSql := "INSERT INTO public." + tableName +
@@ -809,7 +831,12 @@ func getUserIndex(dateRecord UserBusinessAnalysis, ParaWeight map[string]float64
result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1)
result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1)
result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3)
- result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1)
+ codeLine := float64(dateRecord.CommitCodeSize) / 1000
+ limitCodeLine := getParaWeightValue("LimitCommitCodeSize", ParaWeight, 100)
+ if codeLine >= limitCodeLine {
+ codeLine = limitCodeLine
+ }
+ result += codeLine * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1)
result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2)
result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1)
result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05)
diff --git a/modules/auth/cloudbrain.go b/modules/auth/cloudbrain.go
index 9d3d6290f..e5be38084 100755
--- a/modules/auth/cloudbrain.go
+++ b/modules/auth/cloudbrain.go
@@ -27,9 +27,37 @@ type CreateCloudBrainForm struct {
type CommitImageCloudBrainForm struct {
Description string `form:"description" binding:"Required"`
- Tag string `form:"tag" binding:"Required"`
+ Type int `form:"type" binding:"Required"`
+ Tag string `form:"tag" binding:"Required;MaxSize(100)" `
+ IsPrivate bool `form:"isPrivate" binding:"Required"`
+ Topics string `form:"topics"`
+}
+
+type CommitAdminImageCloudBrainForm struct {
+ Description string `form:"description" binding:"Required"`
+ Type int `form:"type" binding:"Required"`
+ Tag string `form:"tag" binding:"Required;MaxSize(100)" `
+ IsPrivate bool `form:"isPrivate" binding:"Required"`
+ Topics string `form:"topics"`
+ Place string `form:"place" binding:"Required"`
+ IsRecommend bool `form:"isRecommend" binding:"Required"`
+}
+
+type EditImageCloudBrainForm struct {
+ ID int64 `form:"id" binding:"Required"`
+ Description string `form:"description" binding:"Required"`
+ IsPrivate bool `form:"isPrivate" binding:"Required"`
+ Topics string `form:"topics"`
}
func (f *CreateCloudBrainForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors {
return validate(errs, ctx.Data, f, ctx.Locale)
}
+
+func (f *CommitImageCloudBrainForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors {
+ return validate(errs, ctx.Data, f, ctx.Locale)
+}
+
+func (f *EditImageCloudBrainForm) Validate(ctx *macaron.Context, errs binding.Errors) binding.Errors {
+ return validate(errs, ctx.Data, f, ctx.Locale)
+}
diff --git a/modules/cloudbrain/cloudbrain.go b/modules/cloudbrain/cloudbrain.go
index 4a89f9393..eaf680c65 100755
--- a/modules/cloudbrain/cloudbrain.go
+++ b/modules/cloudbrain/cloudbrain.go
@@ -1,6 +1,7 @@
package cloudbrain
import (
+ "code.gitea.io/gitea/modules/timeutil"
"encoding/json"
"errors"
"strconv"
@@ -85,6 +86,18 @@ func isAdminOrJobCreater(ctx *context.Context, job *models.Cloudbrain, err error
}
+func isAdminOrImageCreater(ctx *context.Context, image *models.Image, err error) bool {
+ if !ctx.IsSigned {
+ return false
+ }
+ if err != nil {
+ return ctx.IsUserSiteAdmin()
+ } else {
+ return ctx.IsUserSiteAdmin() || ctx.User.ID == image.UID
+ }
+
+}
+
func AdminOrOwnerOrJobCreaterRight(ctx *context.Context) {
var ID = ctx.Params(":id")
@@ -149,7 +162,31 @@ func AdminOrJobCreaterRightForTrain(ctx *context.Context) {
}
+func AdminOrImageCreaterRight(ctx *context.Context) {
+
+ id, err := strconv.ParseInt(ctx.Params(":id"), 10, 64)
+ var image *models.Image
+ if err != nil {
+ log.Error("Get Image by ID failed:%v", err.Error())
+
+ } else {
+ image, err = models.GetImageByID(id)
+ if err != nil {
+ log.Error("Get Image by ID failed:%v", err.Error())
+ return
+ }
+ }
+
+ if !isAdminOrImageCreater(ctx, image, err) {
+ log.Error("!isAdminOrImageCreater error:%v", err.Error())
+ ctx.NotFound(ctx.Req.URL.RequestURI(), nil)
+ }
+
+}
+
+
func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command, uuid, codePath, modelPath, benchmarkPath, snn4imagenetPath, brainScorePath, jobType, gpuQueue, description, branchName, bootFile, params string, benchmarkTypeID, benchmarkChildTypeID, resourceSpecId int) error {
+
dataActualPath := setting.Attachment.Minio.RealPath +
setting.Attachment.Minio.Bucket + "/" +
setting.Attachment.Minio.BasePath +
@@ -194,6 +231,7 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command,
datasetName = attach.Name
}
+ createTime := timeutil.TimeStampNow()
jobResult, err := CreateJob(jobName, models.CreateJobParams{
JobName: jobName,
RetryCount: 1,
@@ -294,6 +332,8 @@ func GenerateTask(ctx *context.Context, displayJobName, jobName, image, command,
BootFile: bootFile,
DatasetName: datasetName,
Parameters: params,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
})
if err != nil {
@@ -341,6 +381,7 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e
return errors.New("no such resourceSpec")
}
+ createTime := timeutil.TimeStampNow()
jobResult, err := CreateJob(jobName, models.CreateJobParams{
JobName: jobName,
RetryCount: 1,
@@ -432,6 +473,8 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e
GpuQueue: task.GpuQueue,
ResourceSpecId: task.ResourceSpecId,
ComputeResource: task.ComputeResource,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
}
err = models.RestartCloudbrain(task, newTask)
diff --git a/modules/cloudbrain/resty.go b/modules/cloudbrain/resty.go
index 46b7c991b..1565d3044 100755
--- a/modules/cloudbrain/resty.go
+++ b/modules/cloudbrain/resty.go
@@ -4,9 +4,11 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math"
"net/http"
"strconv"
"strings"
+ "time"
"code.gitea.io/gitea/modules/log"
@@ -28,6 +30,7 @@ const (
Custom = "custom"
LogPageSize = 500
LogPageTokenExpired = "5m"
+ pageSize = 15
)
func getRestyClient() *resty.Client {
@@ -210,6 +213,42 @@ func getQueryString(page int, size int, name string) string {
}
func CommitImage(jobID string, params models.CommitImageParams) error {
+
+ dbImage, err := models.GetImageByTag(params.ImageTag)
+
+ if err != nil && !models.IsErrImageNotExist(err) {
+ return fmt.Errorf("resty CommitImage: %v", err)
+ }
+ var createTime time.Time
+ var isSetCreatedUnix = false
+ if dbImage != nil {
+ if dbImage.UID != params.UID {
+ return models.ErrorImageTagExist{
+ Tag: params.ImageTag,
+ }
+ } else {
+ if dbImage.Status == models.IMAGE_STATUS_COMMIT {
+ return models.ErrorImageCommitting{
+ Tag: params.ImageTag,
+ }
+
+ } else { //覆盖提交
+
+ result, err := GetImagesPageable(1, pageSize, Custom, "")
+ if err == nil && result.Code == "S000" {
+ for _, v := range result.Payload.ImageInfo {
+ if v.Place == dbImage.Place {
+ isSetCreatedUnix = true
+ createTime, _ = time.Parse(time.RFC3339, v.Createtime)
+ break
+ }
+ }
+ }
+
+ }
+ }
+ }
+
checkSetting()
client := getRestyClient()
var result models.CommitImageResult
@@ -220,7 +259,7 @@ sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetAuthToken(TOKEN).
- SetBody(params).
+ SetBody(params.CommitImageCloudBrainParams).
SetResult(&result).
Post(HOST + "/rest-server/api/v1/jobs/" + jobID + "/commitImage")
@@ -238,7 +277,128 @@ sendjob:
return fmt.Errorf("CommitImage err: %s", res.String())
}
- return nil
+ image := models.Image{
+ Type: models.NORMAL_TYPE,
+ CloudbrainType: params.CloudBrainType,
+ UID: params.UID,
+ IsPrivate: params.IsPrivate,
+ Tag: params.ImageTag,
+ Description: params.ImageDescription,
+ Place: setting.Cloudbrain.ImageURLPrefix + params.ImageTag,
+ Status: models.IMAGE_STATUS_COMMIT,
+ }
+
+ err = models.WithTx(func(ctx models.DBContext) error {
+ if dbImage != nil {
+ dbImage.IsPrivate = params.IsPrivate
+ dbImage.Description = params.ImageDescription
+ dbImage.Status = models.IMAGE_STATUS_COMMIT
+ image = *dbImage
+ if err := models.UpdateLocalImage(dbImage); err != nil {
+ log.Error("Failed to update image record.", err)
+ return fmt.Errorf("CommitImage err: %s", res.String())
+ }
+
+ } else {
+ if err := models.CreateLocalImage(&image); err != nil {
+ log.Error("Failed to insert image record.", err)
+ return fmt.Errorf("CommitImage err: %s", res.String())
+ }
+ }
+ if err := models.SaveImageTopics(image.ID, params.Topics...); err != nil {
+ log.Error("Failed to insert image record.", err)
+ return fmt.Errorf("CommitImage err: %s", res.String())
+ }
+ return nil
+ })
+ if err == nil {
+ go updateImageStatus(image, isSetCreatedUnix, createTime)
+ }
+ return err
+}
+
+func CommitAdminImage(params models.CommitImageParams) error {
+
+ exist, err := models.IsImageExist(params.ImageTag)
+
+ if err != nil {
+ return fmt.Errorf("resty CommitImage: %v", err)
+ }
+ if exist {
+ return models.ErrorImageTagExist{
+ Tag: params.ImageTag,
+ }
+ }
+
+ image := models.Image{
+ CloudbrainType: params.CloudBrainType,
+ UID: params.UID,
+ IsPrivate: params.IsPrivate,
+ Tag: params.ImageTag,
+ Description: params.ImageDescription,
+ Place: params.Place,
+ Status: models.IMAGE_STATUS_SUCCESS,
+ Type: params.Type,
+ }
+
+ err = models.WithTx(func(ctx models.DBContext) error {
+
+ if err := models.CreateLocalImage(&image); err != nil {
+ log.Error("Failed to insert image record.", err)
+ return fmt.Errorf("resty CommitImage: %v", err)
+ }
+
+ if err := models.SaveImageTopics(image.ID, params.Topics...); err != nil {
+ log.Error("Failed to insert image record.", err)
+ return fmt.Errorf("resty CommitImage: %v", err)
+ }
+ return nil
+ })
+ return err
+}
+
+func updateImageStatus(image models.Image, isSetCreatedUnix bool, createTime time.Time) {
+ attemps := 5
+ commitSuccess := false
+ time.Sleep(5 * time.Second)
+ for i := 0; i < attemps; i++ {
+
+ if commitSuccess {
+ break
+ }
+
+ result, err := GetImagesPageable(1, pageSize, Custom, "")
+ if err == nil && result.Code == "S000" {
+ for _, v := range result.Payload.ImageInfo {
+ if v.Place == image.Place && (!isSetCreatedUnix || (isSetCreatedUnix && createTimeUpdated(v, createTime))) {
+ image.Status = models.IMAGE_STATUS_SUCCESS
+ models.UpdateLocalImageStatus(&image)
+ commitSuccess = true
+ break
+ }
+
+ }
+
+ }
+ //第一次循环等待4秒,第二次等待4的2次方16秒,...,第5次。。。 ,总共大概是20多分钟内进行5次重试
+ var sleepTime = time.Duration(int(math.Pow(4, (float64(i + 1)))))
+
+ time.Sleep(sleepTime * time.Second)
+
+ }
+ if !commitSuccess {
+ image.Status = models.IMAGE_STATUS_Failed
+ models.UpdateLocalImageStatus(&image)
+ }
+
+}
+
+func createTimeUpdated(v *models.ImageInfo, createTime time.Time) bool {
+ newTime, err := time.Parse(time.RFC3339, v.Createtime)
+ if err != nil {
+ return false
+ }
+ return newTime.After(createTime)
}
func StopJob(jobID string) error {
diff --git a/modules/convert/convert.go b/modules/convert/convert.go
index fa2e8f2e7..a542fe78b 100755
--- a/modules/convert/convert.go
+++ b/modules/convert/convert.go
@@ -403,6 +403,16 @@ func ToTopicResponse(topic *models.Topic) *api.TopicResponse {
}
}
+func ToImageTopicResponse(topic *models.ImageTopic) *api.ImageTopicResponse {
+ return &api.ImageTopicResponse{
+ ID: topic.ID,
+ Name: topic.Name,
+ ImageCount: topic.ImageCount,
+ Created: topic.CreatedUnix.AsTime(),
+ Updated: topic.UpdatedUnix.AsTime(),
+ }
+}
+
// ToOAuth2Application convert from models.OAuth2Application to api.OAuth2Application
func ToOAuth2Application(app *models.OAuth2Application) *api.OAuth2Application {
return &api.OAuth2Application{
diff --git a/modules/labelmsg/redismsgsender.go b/modules/labelmsg/redismsgsender.go
index 8b2eae772..c06407588 100644
--- a/modules/labelmsg/redismsgsender.go
+++ b/modules/labelmsg/redismsgsender.go
@@ -50,6 +50,7 @@ func SendDecompressAttachToLabelOBS(attach string) error {
_, err := redisclient.Do("Publish", setting.DecompressOBSTaskName, attach)
if err != nil {
log.Critical("redis Publish failed.")
+ return err
}
log.Info("LabelDecompressOBSQueue(%s) success", attach)
diff --git a/modules/modelarts/modelarts.go b/modules/modelarts/modelarts.go
index b1e7b269e..78b40fd56 100755
--- a/modules/modelarts/modelarts.go
+++ b/modules/modelarts/modelarts.go
@@ -1,6 +1,7 @@
package modelarts
import (
+ "code.gitea.io/gitea/modules/timeutil"
"encoding/json"
"errors"
"fmt"
@@ -197,6 +198,7 @@ func GenerateTask(ctx *context.Context, jobName, uuid, description, flavor strin
if poolInfos == nil {
json.Unmarshal([]byte(setting.PoolInfos), &poolInfos)
}
+ createTime := timeutil.TimeStampNow()
jobResult, err := CreateJob(models.CreateNotebookParams{
JobName: jobName,
Description: description,
@@ -235,6 +237,8 @@ func GenerateTask(ctx *context.Context, jobName, uuid, description, flavor strin
Type: models.TypeCloudBrainTwo,
Uuid: uuid,
ComputeResource: models.NPUResource,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
})
if err != nil {
@@ -254,7 +258,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc
log.Error("GetNotebookImageName failed: %v", err.Error())
return err
}
-
+ createTime := timeutil.TimeStampNow()
jobResult, err := createNotebook2(models.CreateNotebook2Params{
JobName: jobName,
Description: description,
@@ -288,6 +292,8 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc
ComputeResource: models.NPUResource,
Image: imageName,
Description: description,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
})
if err != nil {
@@ -304,6 +310,7 @@ func GenerateNotebook2(ctx *context.Context, displayJobName, jobName, uuid, desc
}
func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error) {
+ createTime := timeutil.TimeStampNow()
jobResult, err := createTrainJob(models.CreateTrainJobParams{
JobName: req.JobName,
Description: req.Description,
@@ -364,6 +371,8 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error
EngineName: req.EngineName,
VersionCount: req.VersionCount,
TotalVersionCount: req.TotalVersionCount,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
})
if err != nil {
@@ -375,6 +384,7 @@ func GenerateTrainJob(ctx *context.Context, req *GenerateTrainJobReq) (err error
}
func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, jobId string) (err error) {
+ createTime := timeutil.TimeStampNow()
jobResult, err := createTrainJobVersion(models.CreateTrainJobVersionParams{
Description: req.Description,
Config: models.TrainJobVersionConfig{
@@ -451,6 +461,8 @@ func GenerateTrainJobVersion(ctx *context.Context, req *GenerateTrainJobReq, job
EngineName: req.EngineName,
TotalVersionCount: VersionTaskList[0].TotalVersionCount + 1,
VersionCount: VersionListCount + 1,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
})
if err != nil {
log.Error("CreateCloudbrain(%s) failed:%v", req.JobName, err.Error())
@@ -526,6 +538,7 @@ func GetOutputPathByCount(TotalVersionCount int) (VersionOutputPath string) {
}
func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (err error) {
+ createTime := timeutil.TimeStampNow()
jobResult, err := createInferenceJob(models.CreateInferenceJobParams{
JobName: req.JobName,
Description: req.Description,
@@ -591,6 +604,8 @@ func GenerateInferenceJob(ctx *context.Context, req *GenerateInferenceJobReq) (e
ModelVersion: req.ModelVersion,
CkptName: req.CkptName,
ResultUrl: req.ResultUrl,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
})
if err != nil {
diff --git a/modules/setting/cloudbrain.go b/modules/setting/cloudbrain.go
index c0ab3b275..2d80eea25 100755
--- a/modules/setting/cloudbrain.go
+++ b/modules/setting/cloudbrain.go
@@ -1,9 +1,10 @@
package setting
type CloudbrainLoginConfig struct {
- Username string
- Password string
- Host string
+ Username string
+ Password string
+ Host string
+ ImageURLPrefix string
}
var (
@@ -15,5 +16,6 @@ func GetCloudbrainConfig() CloudbrainLoginConfig {
Cloudbrain.Username = cloudbrainSec.Key("USERNAME").MustString("")
Cloudbrain.Password = cloudbrainSec.Key("PASSWORD").MustString("")
Cloudbrain.Host = cloudbrainSec.Key("REST_SERVER_HOST").MustString("")
+ Cloudbrain.ImageURLPrefix = cloudbrainSec.Key("IMAGE_URL_PREFIX").MustString("")
return Cloudbrain
}
diff --git a/modules/setting/setting.go b/modules/setting/setting.go
index 26f068193..eee539d0c 100755
--- a/modules/setting/setting.go
+++ b/modules/setting/setting.go
@@ -438,6 +438,7 @@ var (
//home page
RecommentRepoAddr string
ESSearchURL string
+ INDEXPOSTFIX string
//notice config
UserNameOfNoticeRepo string
RepoNameOfNoticeRepo string
@@ -1268,6 +1269,7 @@ func NewContext() {
sec = Cfg.Section("homepage")
RecommentRepoAddr = sec.Key("Address").MustString("https://git.openi.org.cn/OpenIOSSG/promote/raw/branch/master/")
ESSearchURL = sec.Key("ESSearchURL").MustString("http://192.168.207.94:9200")
+ INDEXPOSTFIX = sec.Key("INDEXPOSTFIX").MustString("")
sec = Cfg.Section("notice")
UserNameOfNoticeRepo = sec.Key("USER_NAME").MustString("OpenIOSSG")
diff --git a/modules/storage/minio_ext.go b/modules/storage/minio_ext.go
index 2f738ebad..167cd0488 100755
--- a/modules/storage/minio_ext.go
+++ b/modules/storage/minio_ext.go
@@ -2,6 +2,7 @@ package storage
import (
"encoding/xml"
+ "errors"
"path"
"sort"
"strconv"
@@ -129,7 +130,7 @@ func NewMultiPartUpload(uuid string) (string, error) {
return core.NewMultipartUpload(bucketName, objectName, miniov6.PutObjectOptions{})
}
-func CompleteMultiPartUpload(uuid string, uploadID string) (string, error) {
+func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (string, error) {
client, core, err := getClients()
if err != nil {
log.Error("getClients failed:", err.Error())
@@ -146,6 +147,11 @@ func CompleteMultiPartUpload(uuid string, uploadID string) (string, error) {
return "", err
}
+ if len(partInfos) != totalChunks {
+ log.Error("ListObjectParts number(%d) is not equal the set total chunk number(%d)", len(partInfos), totalChunks)
+ return "", errors.New("the parts is not complete")
+ }
+
var complMultipartUpload completeMultipartUpload
for _, partInfo := range partInfos {
complMultipartUpload.Parts = append(complMultipartUpload.Parts, miniov6.CompletePart{
diff --git a/modules/storage/obs.go b/modules/storage/obs.go
index f733eef6c..03349864a 100755
--- a/modules/storage/obs.go
+++ b/modules/storage/obs.go
@@ -30,6 +30,8 @@ type FileInfo struct {
}
type FileInfoList []FileInfo
+const MAX_LIST_PARTS = 1000
+
func (ulist FileInfoList) Swap(i, j int) { ulist[i], ulist[j] = ulist[j], ulist[i] }
func (ulist FileInfoList) Len() int { return len(ulist) }
func (ulist FileInfoList) Less(i, j int) bool {
@@ -57,21 +59,55 @@ func ObsHasObject(path string) (bool, error) {
return hasObject, nil
}
+func listAllParts(uuid, uploadID, key string) (output *obs.ListPartsOutput, err error) {
+ output = &obs.ListPartsOutput{}
+ partNumberMarker := 0
+ for {
+ temp, err := ObsCli.ListParts(&obs.ListPartsInput{
+ Bucket: setting.Bucket,
+ Key: key,
+ UploadId: uploadID,
+ MaxParts: MAX_LIST_PARTS,
+ PartNumberMarker: partNumberMarker,
+ })
+ if err != nil {
+ log.Error("ListParts failed:", err.Error())
+ return output, err
+ }
+
+ partNumberMarker = temp.NextPartNumberMarker
+ log.Info("uuid:%s, MaxParts:%d, PartNumberMarker:%d, NextPartNumberMarker:%d, len:%d", uuid, temp.MaxParts, temp.PartNumberMarker, temp.NextPartNumberMarker, len(temp.Parts))
+
+ for _, partInfo := range temp.Parts {
+ output.Parts = append(output.Parts, obs.Part{
+ PartNumber: partInfo.PartNumber,
+ ETag: partInfo.ETag,
+ })
+ }
+
+ if !temp.IsTruncated {
+ break
+ } else {
+ continue
+ }
+
+ break
+ }
+
+ return output, nil
+}
+
func GetObsPartInfos(uuid, uploadID, fileName string) (string, error) {
key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
- output, err := ObsCli.ListParts(&obs.ListPartsInput{
- Bucket: setting.Bucket,
- Key: key,
- UploadId: uploadID,
- })
+ allParts, err := listAllParts(uuid, uploadID, key)
if err != nil {
- log.Error("ListParts failed:", err.Error())
+ log.Error("listAllParts failed: %v", err)
return "", err
}
var chunks string
- for _, partInfo := range output.Parts {
+ for _, partInfo := range allParts.Parts {
chunks += strconv.Itoa(partInfo.PartNumber) + "-" + partInfo.ETag + ","
}
@@ -92,34 +128,33 @@ func NewObsMultiPartUpload(uuid, fileName string) (string, error) {
return output.UploadId, nil
}
-func CompleteObsMultiPartUpload(uuid, uploadID, fileName string) error {
+func CompleteObsMultiPartUpload(uuid, uploadID, fileName string, totalChunks int) error {
input := &obs.CompleteMultipartUploadInput{}
input.Bucket = setting.Bucket
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
input.UploadId = uploadID
- output, err := ObsCli.ListParts(&obs.ListPartsInput{
- Bucket: setting.Bucket,
- Key: input.Key,
- UploadId: uploadID,
- })
+
+ allParts, err := listAllParts(uuid, uploadID, input.Key)
if err != nil {
- log.Error("ListParts failed:", err.Error())
+ log.Error("listAllParts failed: %v", err)
return err
}
- for _, partInfo := range output.Parts {
- input.Parts = append(input.Parts, obs.Part{
- PartNumber: partInfo.PartNumber,
- ETag: partInfo.ETag,
- })
+ if len(allParts.Parts) != totalChunks {
+ log.Error("listAllParts number(%d) is not equal the set total chunk number(%d)", len(allParts.Parts), totalChunks)
+ return errors.New("the parts is not complete")
}
- _, err = ObsCli.CompleteMultipartUpload(input)
+ input.Parts = allParts.Parts
+
+ output, err := ObsCli.CompleteMultipartUpload(input)
if err != nil {
log.Error("CompleteMultipartUpload failed:", err.Error())
return err
}
+ log.Info("uuid:%s, RequestId:%s", uuid, output.RequestId)
+
return nil
}
diff --git a/modules/structs/repo_topic.go b/modules/structs/repo_topic.go
index 294d56a95..6fb6a92b4 100644
--- a/modules/structs/repo_topic.go
+++ b/modules/structs/repo_topic.go
@@ -17,6 +17,14 @@ type TopicResponse struct {
Updated time.Time `json:"updated"`
}
+type ImageTopicResponse struct {
+ ID int64 `json:"id"`
+ Name string `json:"topic_name"`
+ ImageCount int `json:"image_count"`
+ Created time.Time `json:"created"`
+ Updated time.Time `json:"updated"`
+}
+
// TopicName a list of repo topic names
type TopicName struct {
TopicNames []string `json:"topics"`
diff --git a/modules/templates/helper.go b/modules/templates/helper.go
index 77c6fca8d..006a1e046 100755
--- a/modules/templates/helper.go
+++ b/modules/templates/helper.go
@@ -92,6 +92,7 @@ func NewFuncMap() []template.FuncMap {
"Safe": Safe,
"SafeJS": SafeJS,
"Str2html": Str2html,
+ "subOne": subOne,
"TimeSince": timeutil.TimeSince,
"TimeSinceUnix": timeutil.TimeSinceUnix,
"TimeSinceUnix1": timeutil.TimeSinceUnix1,
@@ -443,7 +444,10 @@ func SafeJS(raw string) template.JS {
func Str2html(raw string) template.HTML {
return template.HTML(markup.Sanitize(raw))
}
-
+//
+func subOne(length int)int{
+ return length-1
+}
// Escape escapes a HTML string
func Escape(raw string) string {
return html.EscapeString(raw)
diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini
index 394b9b7df..f53bea7b1 100755
--- a/options/locale/locale_en-US.ini
+++ b/options/locale/locale_en-US.ini
@@ -266,6 +266,16 @@ search_related=related
search_maybe=maybe
search_ge=
+wecome_AI_plt = Welcome to OpenI AI Collaboration Platform!
+explore_AI = Explore better AI, come here to find more interesting
+datasets = Datasets
+repositories = Repositories
+use_plt__fuction = To use the AI collaboration functions provided by this platform, such as: hosting code, sharing data, debugging algorithms or training models, start with
+provide_resoure = Computing resources of CPU/GPU/NPU are provided freely for various types of AI tasks.
+activity = Activity
+no_events = There are no events related
+or_t = or
+
[explore]
repos = Repositories
select_repos = Select the project
@@ -742,7 +752,7 @@ dataset_setting= Dataset Setting
title = Name
title_format_err=Name can only contain number,letter,'-','_' or '.', and can be up to 100 characters long.
description = Description
-description_format_err=Description's length can be up to 1024 characters long.
+description_format_err=Description's length can be up to %s characters long.
create_dataset = Create Dataset
create_dataset_fail=Failed to create dataset.
query_dataset_fail=Failed to query dataset.
@@ -895,7 +905,7 @@ readme_helper = Select a README file template.
auto_init = Initialize Repository (Adds .gitignore, License and README)
create_repo = Create Repository
create_course = Publish Course
-failed_to_create_course=Fail to publish course, please try again later.
+failed_to_create_course=Failed to publish course, please try again later.
default_branch = Default Branch
mirror_prune = Prune
mirror_prune_desc = Remove obsolete remote-tracking references
@@ -935,10 +945,28 @@ more=More
gpu_type_all=All
model_download=Model Download
submit_image=Submit Image
+modify_image=Modify Image
+image_exist=Image name has been used, please use a new one.
+image_committing=Image is submitting, please try again later.
+image_commit_fail=Failed to submit image, please try again later.
+image_not_exist=Image does not exits.
+image_edit_fail=Failed to edit image, please try again later.
+image_delete_fail=Failed to delete image, please try again later.
+image_overwrite=You had submitted the same name image before, are you sure to overwrite the original image?
download=Download
score=Score
+images.name = Image Tag
+images.name_placerholder = Please enter the image name
+image.label_tooltips = Example Python 3.7, Tensorflow 2.0, cuda 10, pytorch 1.6
+images.public_tooltips = After the image is set to public, it can be seen by other users.
+images.name_rule = Please enter letters, numbers, _ and - up to 64 characters and cannot end with a dash (-).
+images.delete_task = Delete image
+images.task_delete_confirm = Are you sure you want to delete this image? Once this image is deleted, it cannot be recovered.
+
cloudbrain=Cloudbrain
+cloudbrain.task = Cloudbrain Task
+cloudbrain.search = Seach Task Name
cloudbrain.new=New cloudbrain
cloudbrain.desc=Cloudbrain
cloudbrain.cancel=Cancel
@@ -971,7 +999,7 @@ total_count_get_error=Can not get the total page.
last_update_time_error=Can not get the last updated time.
get_repo_stat_error=Can not get the statistics of the repository.
get_repo_info_error=Can not get the information of the repository.
-generate_statistic_file_error=Fail to generate file.
+generate_statistic_file_error=Failed to generate file.
repo_stat_inspect=ProjectAnalysis
all=All
@@ -1121,7 +1149,7 @@ form.name_reserved = The repository name '%s' is reserved.
form.course_name_reserved=The course name '%s' is reserved.
form.name_pattern_not_allowed = The pattern '%s' is not allowed in a repository name.
form.course_name_pattern_not_allowed=The pattern '%s' is not allowed in a course name.
-add_course_org_fail=Fail to add organization, please try again later.
+add_course_org_fail=Failed to add organization, please try again later.
need_auth = Clone Authorization
migrate_type = Migration Type
@@ -1361,6 +1389,7 @@ issues.filter_sort.feweststars = Fewest stars
issues.filter_sort.mostforks = Most forks
issues.filter_sort.fewestforks = Fewest forks
issues.filter_sort.downloadtimes = Most downloaded
+issues.filter_sort.moststars = Most star
issues.action_open = Open
issues.action_close = Close
issues.action_label = Label
@@ -2165,6 +2194,7 @@ topic.manage_topics = Manage Topics
topic.done = Done
topic.count_prompt = You can not select more than 25 topics
topic.format_prompt = Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.
+imagetopic.format_prompt = Topics can be up to 35 characters long.
[org]
org_name_holder = Organization Name
@@ -2473,11 +2503,15 @@ repos.contributor=Contributor
repos.yes=Yes
repos.no=No
+images.recommend = Recommend
+images.unrecommend = Unrecommend
datasets.dataset_manage_panel= Dataset Manage
datasets.owner=Owner
datasets.name=name
datasets.private=Private
+datasets.recommend=Set recommend
+datasets.unrecommend=Set unrecommend
cloudbrain.all_task_types=All Task Types
cloudbrain.all_computing_resources=All Computing Resources
@@ -2825,7 +2859,7 @@ mirror_sync_create = synced new reference %[2]s to %[2]s at %[3]s from mirror
approve_pull_request = `approved %s#%[2]s`
reject_pull_request = `suggested changes for %s#%[2]s`
-upload_dataset=`upload dataset %s`
+upload_dataset=`upload dataset %s`
task_gpudebugjob=`created CPU/GPU type debugging task%s`
task_npudebugjob=`created NPU type debugging task %s`
task_nputrainjob=`created NPU training task%s`
@@ -2935,6 +2969,7 @@ snn4imagenet_path = Snn4imagenet script path
brainscore_path = Brainscore script path
start_command = Start command
choose_mirror = select mirror or enter mirror path
+input_mirror = Please enter image path
select_dataset = select dataset
specification = specification
select_specification = select specification
@@ -2955,3 +2990,11 @@ gpu_num = GPU
cpu_num = CPU
memory = Memory
shared_memory = Shared Memory
+
+
+DEBUG = DEBUG
+SNN4IMAGENET = SNN4IMAGENET
+BRAINSCORE = BRAINSCORE
+TRAIN = TRAIN
+INFERENCE = INFERENCE
+BENCHMARK = BENCHMARK
\ No newline at end of file
diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini
index 8982d59d7..c82347d5e 100755
--- a/options/locale/locale_zh-CN.ini
+++ b/options/locale/locale_zh-CN.ini
@@ -268,6 +268,18 @@ search_related=相关
search_maybe=约为
search_ge=个
+wecome_AI_plt=欢迎来到启智AI协作平台!
+explore_AI = 探索更好的AI,来这里发现更有意思的
+datasets = 数据集
+repositories = 项目
+use_plt__fuction = 使用本平台提供的AI协作功能,如:托管代码、共享数据、调试算法或训练模型,请先
+provide_resoure = 平台目前免费提供CPU、GPU、NPU的算力资源,可进行多种类型的AI任务。
+create_pro = 创建项目
+activity = 活动
+no_events = 还没有与您相关的活动
+or_t = 或
+
+
[explore]
repos=项目
select_repos=精选项目
@@ -745,7 +757,7 @@ dataset_setting=数据集设置
title=名称
title_format_err=名称最多允许输入100个字符,只允许字母,数字,中划线 (‘-’),下划线 (‘_’) 和点 (‘.’) 。
description=描述
-description_format_err=描述最多允许输入1024个字符。
+description_format_err=描述最多允许输入%s个字符。
create_dataset=创建数据集
create_dataset_fail=创建数据集失败。
query_dataset_fail=查询数据集失败。
@@ -934,10 +946,29 @@ more=更多
gpu_type_all=全部
model_download=结果下载
submit_image=提交镜像
+modify_image=修改镜像
+image_exist=镜像Tag已被使用,请修改镜像Tag。
+image_committing=镜像正在提交中,请稍后再试。
+image_commit_fail=提交镜像失败,请稍后再试。
+image_not_exist=镜像不存在。
+image_edit_fail=编辑镜像失败,请稍后再试。
+image_delete_fail=删除镜像失败,请稍后再试。
+image_overwrite=您已经提交过相同名称的镜像,您确定要覆盖原来提交的镜像吗?
download=模型下载
score=评分
+
+images.name = 镜像Tag
+images.name_placerholder = 请输入镜像Tag
+image.label_tooltips = 如Python 3.7, Tensorflow 2.0, cuda 10, pytorch 1.6
+images.public_tooltips = 镜像设置为公开后,可被其他用户看到。
+images.name_rule = 请输入字母、数字、_和-,最长100个字符,且不能以中划线(-)结尾。
+images.delete_task = 删除镜像
+images.task_delete_confirm = 你确认删除该镜像么?此镜像一旦删除不可恢复。
+
cloudbrain=云脑
+cloudbrain.task = 云脑任务
+cloudbrain.search = 搜索任务名称
cloudbrain.new=新建任务
cloudbrain.desc=云脑功能
cloudbrain.cancel=取消
@@ -1015,7 +1046,9 @@ modelarts.train_job.basic_info=基本信息
modelarts.train_job.job_status=任务状态
modelarts.train_job.job_name=任务名称
modelarts.train_job.version=任务版本
-modelarts.train_job.start_time=开始时间
+modelarts.train_job.start_time=开始运行时间
+modelarts.train_job.end_time=运行结束时间
+modelarts.train_job.wait_time=等待时间
modelarts.train_job.dura_time=运行时长
modelarts.train_job.description=任务描述
modelarts.train_job.parameter_setting=参数设置
@@ -1368,6 +1401,7 @@ issues.filter_sort.feweststars=点赞由少到多
issues.filter_sort.mostforks=派生由多到少
issues.filter_sort.fewestforks=派生由少到多
issues.filter_sort.downloadtimes=下载次数
+issues.filter_sort.moststars=收藏数量
issues.action_open=开启
issues.action_close=关闭
issues.action_label=标签
@@ -2168,8 +2202,9 @@ branch.included=已包含
topic.manage_topics=管理主题
topic.done=保存
-topic.count_prompt=您最多选择25个主题
-topic.format_prompt=主题必须以中文、字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符
+topic.count_prompt=您最多选择25个标签
+topic.format_prompt=标签必须以中文、字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符
+imagetopic.format_prompt=标签长度不得超过35个字符
[org]
org_name_holder=组织名称
@@ -2478,11 +2513,15 @@ repos.contributor=贡献者数
repos.yes=是
repos.no=否
+images.recommend = 推荐
+images.unrecommend = 不推荐
datasets.dataset_manage_panel=数据集管理
datasets.owner=所有者
datasets.name=名称
datasets.private=私有
+datasets.recommend=设为推荐
+datasets.unrecommend=取消推荐
cloudbrain.all_task_types=全部任务类型
cloudbrain.all_computing_resources=全部计算资源
@@ -2830,7 +2869,7 @@ mirror_sync_create=从镜像同步了新的引用 %[2]s
mirror_sync_delete=从镜像同步并从 %[3]s 删除了引用 %[2]s
approve_pull_request=`同意了 %s#%[2]s`
reject_pull_request=`建议变更 %s#%[2]s`
-upload_dataset=`上传了数据集文件 %s`
+upload_dataset=`上传了数据集文件 %s`
task_gpudebugjob=`创建了CPU/GPU类型调试任务 %s`
task_npudebugjob=`创建了NPU类型调试任务 %s`
task_nputrainjob=`创建了NPU类型训练任务 %s`
@@ -2940,6 +2979,7 @@ snn4imagenet_path = snn4imagenet脚本存放路径
brainscore_path = brainscore脚本存放路径
start_command = 启动命令
choose_mirror = 选择镜像或输入镜像地址
+input_mirror = 请输入云脑镜像地址
select_dataset = 选择数据集
specification = 规格
select_specification = 选择资源规格
@@ -2956,8 +2996,16 @@ task_delete_confirm = 你确认删除该任务么?此任务一旦删除不可
operate_confirm = 确定操作
operate_cancel = 取消操作
+
gpu_num = GPU数
cpu_num = CPU数
memory = 内存
shared_memory = 共享内存
+DEBUG = 调试任务
+SNN4IMAGENET = 调试任务-脉冲神经网络图片分类测评
+BRAINSCORE = 调试任务-神经相似性测评
+TRAIN = 训练任务
+INFERENCE = 推理任务
+BENCHMARK = 评测任务
+
diff --git a/public/home/home.js b/public/home/home.js
index 478c70f21..d8e423def 100755
--- a/public/home/home.js
+++ b/public/home/home.js
@@ -99,6 +99,11 @@ socket.onmessage = function (e) {
console.log("receive action type=" + record.OpType + " name=" + actionName + " but user is null.");
continue;
}
+ if(record.OpType == "24"){
+ if(record.Content.indexOf("true") != -1){
+ continue;
+ }
+ }
var recordPrefix = getMsg(record);
if(record.OpType == "6" || record.OpType == "10" || record.OpType == "12" || record.OpType == "13"){
html += recordPrefix + actionName;
@@ -162,7 +167,7 @@ socket.onmessage = function (e) {
function getTaskLink(record){
var re = getRepoLink(record);
if(record.OpType == 24){
- re = re + "/datasets?type=" + record.Content;
+ re = re + "/datasets";
}else if(record.OpType == 25){
re = re + "/cloudbrain/" + record.Content;
}else if(record.OpType == 26){
diff --git a/public/home/search.js b/public/home/search.js
index e23d27549..c55d1807c 100644
--- a/public/home/search.js
+++ b/public/home/search.js
@@ -101,16 +101,20 @@ function initPageInfo(){
function searchItem(type,sortType){
console.log("enter item 2.");
- currentSearchKeyword = document.getElementById("keyword_input").value;
- if(!isEmpty(currentSearchKeyword)){
- initPageInfo();
- currentSearchTableName = itemType[type];
- currentSearchSortBy = sortBy[sortType];
- currentSearchAscending = sortAscending[sortType];
- OnlySearchLabel =false;
- page(currentPage);
+ if(OnlySearchLabel){
+ doSearchLabel(currentSearchTableName,currentSearchKeyword,sortBy[sortType],sortAscending[sortType])
}else{
- emptySearch();
+ currentSearchKeyword = document.getElementById("keyword_input").value;
+ if(!isEmpty(currentSearchKeyword)){
+ initPageInfo();
+ currentSearchTableName = itemType[type];
+ currentSearchSortBy = sortBy[sortType];
+ currentSearchAscending = sortAscending[sortType];
+ OnlySearchLabel =false;
+ page(currentPage);
+ }else{
+ emptySearch();
+ }
}
}
@@ -806,17 +810,21 @@ var repoAndOrgEN={
function page(current){
currentPage=current;
+ startIndex = currentPage -1;
+ if(startIndex < 1){
+ startIndex = 1;
+ }
+ endIndex = currentPage + 2;
+ if(endIndex >= totalPage){
+ endIndex = totalPage;
+ }
doSearch(currentSearchTableName,currentSearchKeyword,current,pageSize,false,currentSearchSortBy,OnlySearchLabel);
-
}
function nextPage(){
currentPage = currentPage+1;
console.log("currentPage=" + currentPage);
- if(currentPage >= endIndex){
- startIndex=startIndex+1;
- endIndex = endIndex +1;
- }
+
page(currentPage);
}
@@ -824,10 +832,6 @@ function page(current){
console.log("currentPage=" + currentPage);
if(currentPage > 1){
currentPage = currentPage-1;
- if(currentPage <= startIndex && startIndex > 1){
- startIndex = startIndex -1;
- endIndex = endIndex - 1;
- }
console.log("currentPage=" + (currentPage));
page(currentPage);
}
@@ -862,7 +866,7 @@ function getYPosition(e){
showTip(getLabel(isZh,"search_input_large_0"),"warning",left+5,top);
}
else if(goNum<=totalPage){
- page(goNum);
+ page(parseInt(goNum,10));
}
else{
showTip(getLabel(isZh,"search_input_maxed"),"warning",left+5,top);
@@ -908,6 +912,11 @@ function getYPosition(e){
}
}
+ if (endIndex < totalPage-1){
+ html += "...";
+ html += "" + totalPage + "";
+ }
+
if(currentPage >=totalPage){
html += "";
html += "";
diff --git a/public/img/jian.svg b/public/img/jian.svg
new file mode 100644
index 000000000..0fc47c1b7
--- /dev/null
+++ b/public/img/jian.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/routers/admin/cloudbrains.go b/routers/admin/cloudbrains.go
index 884ed6b9b..6687b990a 100755
--- a/routers/admin/cloudbrains.go
+++ b/routers/admin/cloudbrains.go
@@ -20,6 +20,8 @@ import (
const (
tplCloudBrains base.TplName = "admin/cloudbrain/list"
+ tplImages base.TplName = "admin/cloudbrain/images"
+ tplCommitImages base.TplName = "admin/cloudbrain/imagecommit"
EXCEL_DATE_FORMAT = "20060102150405"
CREATE_TIME_FORMAT = "2006/01/02 15:04:05"
)
@@ -107,6 +109,18 @@ func CloudBrains(ctx *context.Context) {
}
+func Images(ctx *context.Context) {
+ ctx.Data["PageIsAdminImages"] = true
+ ctx.HTML(200, tplImages)
+
+}
+
+func CloudBrainCommitImageShow(ctx *context.Context) {
+ ctx.Data["PageIsAdminImages"] = true
+ ctx.HTML(200, tplCommitImages)
+
+}
+
func DownloadCloudBrains(ctx *context.Context) {
page := 1
diff --git a/routers/admin/dataset.go b/routers/admin/dataset.go
index a4378cf67..6b29b06ff 100644
--- a/routers/admin/dataset.go
+++ b/routers/admin/dataset.go
@@ -1,6 +1,8 @@
package admin
import (
+ "net/http"
+ "strconv"
"strings"
"code.gitea.io/gitea/models"
@@ -49,6 +51,8 @@ func Datasets(ctx *context.Context) {
orderBy = models.SearchOrderBySizeReverse
case "size":
orderBy = models.SearchOrderBySize
+ case "downloadtimes":
+ orderBy = models.SearchOrderByDownloadTimes
case "moststars":
orderBy = models.SearchOrderByStarsReverse
case "feweststars":
@@ -70,6 +74,7 @@ func Datasets(ctx *context.Context) {
PageSize: setting.UI.ExplorePagingNum,
},
Keyword: keyword,
+ RecommendOnly: ctx.QueryBool("recommend"),
SearchOrderBy: orderBy,
})
if err != nil {
@@ -80,7 +85,7 @@ func Datasets(ctx *context.Context) {
ctx.Data["Keyword"] = keyword
ctx.Data["Total"] = count
ctx.Data["Datasets"] = datasets
-
+ ctx.Data["Recommend"] = ctx.QueryBool("recommend")
pager := context.NewPagination(int(count), setting.UI.ExplorePagingNum, page, 5)
pager.SetDefaultParams(ctx)
ctx.Data["Page"] = pager
@@ -88,6 +93,23 @@ func Datasets(ctx *context.Context) {
ctx.HTML(200, tplDatasets)
}
+func DatasetAction(ctx *context.Context) {
+ var err error
+ datasetId, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
+ switch ctx.Params(":action") {
+
+ case "recommend":
+ err = models.RecommendDataset(datasetId, true)
+ case "unrecommend":
+ err = models.RecommendDataset(datasetId, false)
+ }
+ if err != nil {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action"))))
+ } else {
+ ctx.JSON(http.StatusOK, models.BaseOKMessage)
+ }
+}
+
func DeleteDataset(ctx *context.Context) {
dataset, err := models.GetDatasetByID(ctx.QueryInt64("id"))
if err != nil {
diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go
index 9de65662f..2b070a4b8 100755
--- a/routers/api/v1/api.go
+++ b/routers/api/v1/api.go
@@ -557,6 +557,10 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/query_user_last_month", operationReq, repo_ext.QueryUserStaticLastMonth)
m.Get("/query_user_yesterday", operationReq, repo_ext.QueryUserStaticYesterday)
m.Get("/query_user_all", operationReq, repo_ext.QueryUserStaticAll)
+ //cloudbrain board
+ m.Group("/cloudbrainboard", func() {
+ m.Get("/downloadAll", repo.DownloadCloudBrainBoard)
+ }, operationReq)
// Users
m.Group("/users", func() {
m.Get("/search", user.Search)
@@ -1007,6 +1011,9 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/topics", func() {
m.Get("/search", repo.TopicSearch)
})
+ m.Group("/image/topics", func() {
+ m.Get("/search", repo.ImageTopicSearch)
+ })
m.Group("/from_wechat", func() {
m.Get("/event", authentication.ValidEventSource)
m.Post("/event", authentication.AcceptWechatEvent)
diff --git a/routers/api/v1/repo/cloudbrain_dashboard.go b/routers/api/v1/repo/cloudbrain_dashboard.go
new file mode 100644
index 000000000..b979729a8
--- /dev/null
+++ b/routers/api/v1/repo/cloudbrain_dashboard.go
@@ -0,0 +1,135 @@
+package repo
+
+import (
+ "net/http"
+ "net/url"
+ "time"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/modules/context"
+ "code.gitea.io/gitea/modules/log"
+ "github.com/360EntSecGroup-Skylar/excelize/v2"
+)
+
+func DownloadCloudBrainBoard(ctx *context.Context) {
+
+ page := 1
+
+ pageSize := 300
+
+ var cloudBrain = ctx.Tr("repo.cloudbrain")
+ fileName := getCloudbrainFileName(cloudBrain)
+
+ _, total, err := models.CloudbrainAll(&models.CloudbrainsOptions{
+ ListOptions: models.ListOptions{
+ Page: page,
+ PageSize: 1,
+ },
+ Type: models.TypeCloudBrainAll,
+ NeedRepoInfo: false,
+ })
+
+ if err != nil {
+ log.Warn("Can not get cloud brain info", err)
+ ctx.Error(http.StatusBadRequest, ctx.Tr("repo.cloudbrain_query_fail"))
+ return
+ }
+
+ totalPage := getTotalPage(total, pageSize)
+
+ f := excelize.NewFile()
+
+ index := f.NewSheet(cloudBrain)
+ f.DeleteSheet("Sheet1")
+
+ for k, v := range allCloudbrainHeader(ctx) {
+ f.SetCellValue(cloudBrain, k, v)
+ }
+
+ var row = 2
+ for i := 0; i < totalPage; i++ {
+
+ pageRecords, _, err := models.CloudbrainAll(&models.CloudbrainsOptions{
+ ListOptions: models.ListOptions{
+ Page: page,
+ PageSize: pageSize,
+ },
+ Type: models.TypeCloudBrainAll,
+ NeedRepoInfo: true,
+ })
+ if err != nil {
+ log.Warn("Can not get cloud brain info", err)
+ continue
+ }
+ for _, record := range pageRecords {
+
+ for k, v := range allCloudbrainValues(row, record, ctx) {
+ f.SetCellValue(cloudBrain, k, v)
+ }
+ row++
+
+ }
+
+ page++
+ }
+ f.SetActiveSheet(index)
+
+ ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+url.QueryEscape(fileName))
+ ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
+
+ f.WriteTo(ctx.Resp)
+}
+func getCloudbrainFileName(baseName string) string {
+ return baseName + "_" + time.Now().Format(EXCEL_DATE_FORMAT) + ".xlsx"
+
+}
+func allCloudbrainHeader(ctx *context.Context) map[string]string {
+
+ return map[string]string{"A1": ctx.Tr("repo.cloudbrain_task"), "B1": ctx.Tr("repo.cloudbrain_task_type"), "C1": ctx.Tr("repo.modelarts.status"),
+ "D1": ctx.Tr("repo.modelarts.createtime"), "E1": ctx.Tr("repo.modelarts.train_job.wait_time"), "F1": ctx.Tr("repo.modelarts.train_job.dura_time"),
+ "G1": ctx.Tr("repo.modelarts.train_job.start_time"),
+ "H1": ctx.Tr("repo.modelarts.train_job.end_time"), "I1": ctx.Tr("repo.modelarts.computing_resources"),
+ "J1": ctx.Tr("repo.cloudbrain_creator"), "K1": ctx.Tr("repo.repo_name"), "L1": ctx.Tr("repo.cloudbrain_task_name")}
+
+}
+func allCloudbrainValues(row int, rs *models.CloudbrainInfo, ctx *context.Context) map[string]string {
+ return map[string]string{getCellName("A", row): rs.DisplayJobName, getCellName("B", row): rs.JobType, getCellName("C", row): rs.Status,
+ getCellName("D", row): time.Unix(int64(rs.Cloudbrain.CreatedUnix), 0).Format(CREATE_TIME_FORMAT), getCellName("E", row): getBrainWaitTime(rs),
+ getCellName("F", row): rs.TrainJobDuration, getCellName("G", row): getBrainStartTime(rs),
+ getCellName("H", row): getBrainEndTime(rs),
+ getCellName("I", row): rs.ComputeResource, getCellName("J", row): rs.Name, getCellName("K", row): getBrainRepo(rs),
+ getCellName("L", row): rs.JobName,
+ }
+}
+func getBrainRepo(rs *models.CloudbrainInfo) string {
+ if rs.Repo != nil {
+ return rs.Repo.OwnerName + "/" + rs.Repo.Alias
+ }
+ return ""
+}
+func getBrainStartTime(rs *models.CloudbrainInfo) string {
+ timeString := time.Unix(int64(rs.Cloudbrain.StartTime), 0).Format(CREATE_TIME_FORMAT)
+ if timeString != "1970/01/01 08:00:00" {
+ return timeString
+ } else {
+ return "0"
+ }
+
+}
+func getBrainEndTime(rs *models.CloudbrainInfo) string {
+ timeString := time.Unix(int64(rs.Cloudbrain.EndTime), 0).Format(CREATE_TIME_FORMAT)
+ if timeString != "1970/01/01 08:00:00" {
+ return timeString
+ } else {
+ return "0"
+ }
+
+}
+func getBrainWaitTime(rs *models.CloudbrainInfo) string {
+ waitTime := rs.Cloudbrain.StartTime - rs.Cloudbrain.CreatedUnix
+ if waitTime <= 0 {
+ return "0"
+ } else {
+ return models.ConvertDurationToStr(int64(waitTime))
+ }
+}
diff --git a/routers/api/v1/repo/modelarts.go b/routers/api/v1/repo/modelarts.go
index e24ac95fb..9e4edea03 100755
--- a/routers/api/v1/repo/modelarts.go
+++ b/routers/api/v1/repo/modelarts.go
@@ -74,6 +74,7 @@ func GetModelArtsNotebook2(ctx *context.APIContext) {
if job.EndTime == 0 && models.IsModelArtsDebugJobTerminal(job.Status) {
job.EndTime = timeutil.TimeStampNow()
}
+ job.CorrectCreateUnix()
job.ComputeAndSetDuration()
err = models.UpdateJob(job)
if err != nil {
@@ -160,6 +161,7 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) {
}
if result.JobStatus.State != string(models.JobWaiting) {
+ models.ParseAndSetDurationFromCloudBrainOne(result, job)
err = models.UpdateJob(job)
if err != nil {
log.Error("UpdateJob failed:", err)
@@ -177,14 +179,12 @@ func GetModelArtsTrainJobVersion(ctx *context.APIContext) {
}
job.Status = modelarts.TransTrainJobStatus(result.IntStatus)
job.Duration = result.Duration / 1000
- job.TrainJobDuration = result.TrainJobDuration
-
job.TrainJobDuration = models.ConvertDurationToStr(job.Duration)
if job.EndTime == 0 && models.IsTrainJobTerminal(job.Status) && job.StartTime > 0 {
job.EndTime = job.StartTime.Add(job.Duration)
}
-
+ job.CorrectCreateUnix()
err = models.UpdateTrainJobVersion(job)
if err != nil {
log.Error("UpdateJob failed:", err)
@@ -417,7 +417,7 @@ func GetModelArtsInferenceJob(ctx *context.APIContext) {
if job.EndTime == 0 && models.IsTrainJobTerminal(job.Status) && job.StartTime > 0 {
job.EndTime = job.StartTime.Add(job.Duration)
}
-
+ job.CorrectCreateUnix()
err = models.UpdateInferenceJob(job)
if err != nil {
log.Error("UpdateJob failed:", err)
diff --git a/routers/api/v1/repo/topic.go b/routers/api/v1/repo/topic.go
index 530b92a10..f4ff7a329 100644
--- a/routers/api/v1/repo/topic.go
+++ b/routers/api/v1/repo/topic.go
@@ -300,3 +300,63 @@ func TopicSearch(ctx *context.APIContext) {
"topics": topicResponses,
})
}
+
+func ImageTopicSearch(ctx *context.APIContext) {
+ // swagger:operation GET /image/topics/search image topicSearch
+ // ---
+ // summary: search topics via keyword
+ // produces:
+ // - application/json
+ // parameters:
+ // - name: q
+ // in: query
+ // description: keywords to search
+ // required: true
+ // type: string
+ // - name: page
+ // in: query
+ // description: page number of results to return (1-based)
+ // type: integer
+ // - name: limit
+ // in: query
+ // description: page size of results, maximum page size is 50
+ // type: integer
+ // responses:
+ // "200":
+ // "$ref": "#/responses/TopicListResponse"
+ // "403":
+ // "$ref": "#/responses/forbidden"
+
+ if ctx.User == nil {
+ ctx.Error(http.StatusForbidden, "UserIsNil", "Only owners could change the topics.")
+ return
+ }
+
+ kw := ctx.Query("q")
+
+ listOptions := utils.GetListOptions(ctx)
+ if listOptions.Page < 1 {
+ listOptions.Page = 1
+ }
+ if listOptions.PageSize < 1 {
+ listOptions.PageSize = 10
+ }
+
+ topics, err := models.FindImageTopics(&models.FindImageTopicOptions{
+ Keyword: kw,
+ ListOptions: listOptions,
+ })
+ if err != nil {
+ log.Error("SearchImageTopics failed: %v", err)
+ ctx.InternalServerError(err)
+ return
+ }
+
+ topicResponses := make([]*api.ImageTopicResponse, len(topics))
+ for i, topic := range topics {
+ topicResponses[i] = convert.ToImageTopicResponse(topic)
+ }
+ ctx.JSON(http.StatusOK, map[string]interface{}{
+ "topics": topicResponses,
+ })
+}
diff --git a/routers/home.go b/routers/home.go
index 324bb1032..5dec05ebe 100755
--- a/routers/home.go
+++ b/routers/home.go
@@ -331,6 +331,7 @@ func ExploreDatasets(ctx *context.Context) {
Task: task,
License: license,
OwnerID: ownerID,
+ RecommendOnly: ctx.QueryBool("recommend"),
ListOptions: models.ListOptions{
Page: page,
PageSize: 30,
@@ -357,6 +358,7 @@ func ExploreDatasets(ctx *context.Context) {
ctx.Data["Category"] = category
ctx.Data["Task"] = task
ctx.Data["License"] = license
+ ctx.Data["Recommend"] = ctx.QueryBool("recommend")
pager.SetDefaultParams(ctx)
ctx.Data["Page"] = pager
diff --git a/routers/image/image.go b/routers/image/image.go
new file mode 100644
index 000000000..ae9912e3d
--- /dev/null
+++ b/routers/image/image.go
@@ -0,0 +1,30 @@
+package image
+
+import (
+ "net/http"
+ "strconv"
+
+ "code.gitea.io/gitea/models"
+ "code.gitea.io/gitea/modules/context"
+)
+
+func Action(ctx *context.Context) {
+ var err error
+ imageId, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
+ switch ctx.Params(":action") {
+
+ case "star":
+ err = models.StarImage(ctx.User.ID, imageId, true)
+ case "unstar":
+ err = models.StarImage(ctx.User.ID, imageId, false)
+ case "recommend":
+ err = models.RecommendImage(imageId, true)
+ case "unrecommend":
+ err = models.RecommendImage(imageId, false)
+ }
+ if err != nil {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action"))))
+ } else {
+ ctx.JSON(http.StatusOK, models.BaseOKMessage)
+ }
+}
diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go
index 96f17b74b..aa52a1400 100755
--- a/routers/repo/attachment.go
+++ b/routers/repo/attachment.go
@@ -11,7 +11,6 @@ import (
"fmt"
"mime/multipart"
"net/http"
- "path"
"strconv"
"strings"
@@ -78,7 +77,7 @@ func UploadAttachmentUI(ctx *context.Context) {
}
func EditAttachmentUI(ctx *context.Context) {
-
+
id, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
ctx.Data["PageIsDataset"] = true
attachment, _ := models.GetAttachmentByID(id)
@@ -830,20 +829,6 @@ func GetMultipartUploadUrl(ctx *context.Context) {
})
}
-func GetObsKey(ctx *context.Context) {
- uuid := gouuid.NewV4().String()
- key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
-
- ctx.JSON(200, map[string]string{
- "uuid": uuid,
- "key": key,
- "access_key_id": setting.AccessKeyID,
- "secret_access_key": setting.SecretAccessKey,
- "server": setting.Endpoint,
- "bucket": setting.Bucket,
- })
-}
-
func CompleteMultipart(ctx *context.Context) {
uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID")
@@ -870,13 +855,13 @@ func CompleteMultipart(ctx *context.Context) {
}
if typeCloudBrain == models.TypeCloudBrainOne {
- _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
+ _, err = storage.CompleteMultiPartUpload(uuid, uploadID, fileChunk.TotalChunks)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
return
}
} else {
- err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
+ err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName, fileChunk.TotalChunks)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
return
@@ -907,10 +892,9 @@ func CompleteMultipart(ctx *context.Context) {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}
-
+ attachment.UpdateDatasetUpdateUnix()
repository, _ := models.GetRepositoryByID(dataset.RepoID)
- notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(attachment.Type), attachment.Name, models.ActionUploadAttachment)
-
+ notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment)
if attachment.DatasetID != 0 {
if isCanDecompress(attachment.Name) {
if typeCloudBrain == models.TypeCloudBrainOne {
@@ -947,34 +931,6 @@ func CompleteMultipart(ctx *context.Context) {
})
}
-func UpdateMultipart(ctx *context.Context) {
- uuid := ctx.Query("uuid")
- partNumber := ctx.QueryInt("chunkNumber")
- etag := ctx.Query("etag")
-
- fileChunk, err := models.GetFileChunkByUUID(uuid)
- if err != nil {
- if models.IsErrFileChunkNotExist(err) {
- ctx.Error(404)
- } else {
- ctx.ServerError("GetFileChunkByUUID", err)
- }
- return
- }
-
- fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
-
- err = models.UpdateFileChunk(fileChunk)
- if err != nil {
- ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
- return
- }
-
- ctx.JSON(200, map[string]string{
- "result_code": "0",
- })
-}
-
func HandleUnDecompressAttachment() {
attachs, err := models.GetUnDecompressAttachments()
if err != nil {
@@ -986,23 +942,29 @@ func HandleUnDecompressAttachment() {
if attach.Type == models.TypeCloudBrainOne {
err = worker.SendDecompressTask(contexExt.Background(), attach.UUID, attach.Name)
if err != nil {
- log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
+ log.Error("SendDecompressTask(%s) failed:%s", attach.UUID, err.Error())
} else {
- attach.DecompressState = models.DecompressStateIng
- err = models.UpdateAttachment(attach)
- if err != nil {
- log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
- }
+ updateAttachmentDecompressStateIng(attach)
}
} else if attach.Type == models.TypeCloudBrainTwo {
attachjson, _ := json.Marshal(attach)
- labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
+ err = labelmsg.SendDecompressAttachToLabelOBS(string(attachjson))
+ if err != nil {
+ log.Error("SendDecompressTask to labelsystem (%s) failed:%s", attach.UUID, err.Error())
+ } else {
+ updateAttachmentDecompressStateIng(attach)
+ }
}
-
}
-
return
}
+func updateAttachmentDecompressStateIng(attach *models.Attachment) {
+ attach.DecompressState = models.DecompressStateIng
+ err := models.UpdateAttachment(attach)
+ if err != nil {
+ log.Error("UpdateAttachment state(%s) failed:%s", attach.UUID, err.Error())
+ }
+}
func QueryAllPublicDataset(ctx *context.Context) {
attachs, err := models.GetAllPublicAttachments()
diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go
index 0d007a27d..4ef205af2 100755
--- a/routers/repo/cloudbrain.go
+++ b/routers/repo/cloudbrain.go
@@ -13,6 +13,7 @@ import (
"strconv"
"strings"
"time"
+ "unicode/utf8"
"code.gitea.io/gitea/modules/timeutil"
"github.com/unknwon/i18n"
@@ -39,8 +40,13 @@ const (
tplCloudBrainBenchmarkNew base.TplName = "repo/cloudbrain/benchmark/new"
tplCloudBrainBenchmarkShow base.TplName = "repo/cloudbrain/benchmark/show"
+ tplCloudBrainImageSubmit base.TplName = "repo/cloudbrain/image/submit"
+ tplCloudBrainImageEdit base.TplName = "repo/cloudbrain/image/edit"
+
+
tplCloudBrainTrainJobNew base.TplName = "repo/cloudbrain/trainjob/new"
tplCloudBrainTrainJobShow base.TplName = "repo/cloudbrain/trainjob/show"
+
)
var (
@@ -53,6 +59,7 @@ var (
)
const BENCHMARK_TYPE_CODE = "repo.cloudbrain.benchmark.types"
+const CLONE_FILE_PREFIX = "file:///"
var benchmarkTypesMap = make(map[string]*models.BenchmarkTypes, 0)
@@ -435,15 +442,29 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo
return
}
- if cloudbrain.ResourceSpecs == nil {
- json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs)
- }
- for _, tmp := range cloudbrain.ResourceSpecs.ResourceSpec {
- if tmp.Id == task.ResourceSpecId {
- ctx.Data["GpuNum"] = tmp.GpuNum
- ctx.Data["CpuNum"] = tmp.CpuNum
- ctx.Data["MemMiB"] = tmp.MemMiB
- ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB
+ if task.JobType == string(models.JobTypeTrain) {
+ if cloudbrain.TrainResourceSpecs == nil {
+ json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs)
+ }
+ for _, tmp := range cloudbrain.TrainResourceSpecs.ResourceSpec {
+ if tmp.Id == task.ResourceSpecId {
+ ctx.Data["GpuNum"] = tmp.GpuNum
+ ctx.Data["CpuNum"] = tmp.CpuNum
+ ctx.Data["MemMiB"] = tmp.MemMiB
+ ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB
+ }
+ }
+ } else {
+ if cloudbrain.ResourceSpecs == nil {
+ json.Unmarshal([]byte(setting.ResourceSpecs), &cloudbrain.ResourceSpecs)
+ }
+ for _, tmp := range cloudbrain.ResourceSpecs.ResourceSpec {
+ if tmp.Id == task.ResourceSpecId {
+ ctx.Data["GpuNum"] = tmp.GpuNum
+ ctx.Data["CpuNum"] = tmp.CpuNum
+ ctx.Data["MemMiB"] = tmp.MemMiB
+ ctx.Data["ShareMemMiB"] = tmp.ShareMemMiB
+ }
}
}
@@ -589,26 +610,212 @@ func CloudBrainDebug(ctx *context.Context) {
ctx.Redirect(debugUrl)
}
+func CloudBrainCommitImageShow(ctx *context.Context) {
+ ctx.Data["PageIsCloudBrain"] = true
+ ctx.Data["Type"] = ctx.Cloudbrain.Type
+ ctx.HTML(200, tplCloudBrainImageSubmit)
+}
+
+func CloudBrainImageEdit(ctx *context.Context) {
+ ctx.Data["PageIsImageEdit"] = true
+ ctx.Data["PageFrom"] = ctx.Params(":from")
+ var ID = ctx.Params(":id")
+ id, err := strconv.ParseInt(ID, 10, 64)
+ if err != nil {
+ log.Error("GetImageByID failed:%v", err.Error())
+ ctx.NotFound(ctx.Req.URL.RequestURI(), nil)
+ }
+ image, err := models.GetImageByID(id)
+ if err != nil {
+ log.Error("GetImageByID failed:%v", err.Error())
+ ctx.NotFound(ctx.Req.URL.RequestURI(), nil)
+ }
+ ctx.Data["Image"] = image
+ ctx.HTML(http.StatusOK, tplCloudBrainImageEdit)
+
+}
+
+func CloudBrainImageEditPost(ctx *context.Context, form auth.EditImageCloudBrainForm) {
+
+ if utf8.RuneCountInString(form.Description) > 255 {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err", 255)))
+ return
+ }
+
+ validTopics, errMessage := checkTopics(form.Topics)
+ if errMessage != "" {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr(errMessage)))
+ return
+ }
+ image, err := models.GetImageByID(form.ID)
+ if err != nil {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_not_exist")))
+
+ }
+
+ image.IsPrivate = form.IsPrivate
+ image.Description = form.Description
+
+ err = models.WithTx(func(ctx models.DBContext) error {
+ if err := models.UpdateLocalImage(image); err != nil {
+ return err
+ }
+ if err := models.SaveImageTopics(image.ID, validTopics...); err != nil {
+ return err
+ }
+ return nil
+
+ })
+
+ if err != nil {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_not_exist")))
+
+ } else {
+ ctx.JSON(http.StatusOK, models.BaseOKMessage)
+ }
+
+}
+
+func CloudBrainImageDelete(ctx *context.Context) {
+ var ID = ctx.Params(":id")
+ id, err := strconv.ParseInt(ID, 10, 64)
+ if err != nil {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_not_exist")))
+ return
+ }
+
+ err = models.DeleteLocalImage(id)
+ if err != nil {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_delete_fail")))
+ } else {
+ ctx.JSON(http.StatusOK, models.BaseOKMessage)
+ }
+
+}
+
+func CloudBrainCommitImageCheck(ctx *context.Context, form auth.CommitImageCloudBrainForm) {
+ isExist, _ := models.IsImageExistByUser(form.Tag, ctx.User.ID)
+ if isExist {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.image_overwrite")))
+ } else {
+ ctx.JSON(http.StatusOK, models.BaseOKMessage)
+ }
+
+}
+
+func CloudBrainAdminCommitImage(ctx *context.Context, form auth.CommitAdminImageCloudBrainForm) {
+
+ if !NamePattern.MatchString(form.Tag) {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.title_format_err")))
+ return
+ }
+
+ if utf8.RuneCountInString(form.Description) > 255 {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err", 255)))
+ return
+ }
+
+ validTopics, errMessage := checkTopics(form.Topics)
+ if errMessage != "" {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr(errMessage)))
+ return
+ }
+
+ err := cloudbrain.CommitAdminImage(models.CommitImageParams{
+ CommitImageCloudBrainParams: models.CommitImageCloudBrainParams{
+ ImageDescription: form.Description,
+ ImageTag: form.Tag,
+ },
+ IsPrivate: form.IsPrivate,
+ CloudBrainType: form.Type,
+ Topics: validTopics,
+ UID: ctx.User.ID,
+ Type: models.GetRecommondType(form.IsRecommend),
+ Place: form.Place,
+ })
+ if err != nil {
+ log.Error("CommitImagefailed")
+ if models.IsErrImageTagExist(err) {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_exist")))
+
+ } else if models.IsErrorImageCommitting(err) {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_committing")))
+ } else {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_commit_fail")))
+ }
+
+ return
+ }
+
+ ctx.JSON(200, models.BaseOKMessage)
+}
+
func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrainForm) {
+
+ if !NamePattern.MatchString(form.Tag) {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.title_format_err")))
+ return
+ }
+
+ if utf8.RuneCountInString(form.Description) > 255 {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err", 255)))
+ return
+ }
+
+ validTopics, errMessage := checkTopics(form.Topics)
+ if errMessage != "" {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr(errMessage)))
+ return
+ }
+
err := cloudbrain.CommitImage(ctx.Cloudbrain.JobID, models.CommitImageParams{
- Ip: ctx.Cloudbrain.ContainerIp,
- TaskContainerId: ctx.Cloudbrain.ContainerID,
- ImageDescription: form.Description,
- ImageTag: form.Tag,
+ CommitImageCloudBrainParams: models.CommitImageCloudBrainParams{
+ Ip: ctx.Cloudbrain.ContainerIp,
+ TaskContainerId: ctx.Cloudbrain.ContainerID,
+ ImageDescription: form.Description,
+ ImageTag: form.Tag,
+ },
+ IsPrivate: form.IsPrivate,
+ CloudBrainType: form.Type,
+ Topics: validTopics,
+ UID: ctx.User.ID,
})
if err != nil {
log.Error("CommitImage(%s) failed:%v", ctx.Cloudbrain.JobName, err.Error(), ctx.Data["msgID"])
- ctx.JSON(200, map[string]string{
- "result_code": "-1",
- "error_msg": "CommitImage failed",
- })
+ if models.IsErrImageTagExist(err) {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_exist")))
+
+ } else if models.IsErrorImageCommitting(err) {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_committing")))
+ } else {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_commit_fail")))
+ }
+
return
}
- ctx.JSON(200, map[string]string{
- "result_code": "0",
- "error_msg": "",
- })
+ ctx.JSON(200, models.BaseOKMessage)
+}
+
+func checkTopics(Topics string) ([]string, string) {
+ var topics = make([]string, 0)
+ var topicsStr = strings.TrimSpace(Topics)
+ if len(topicsStr) > 0 {
+ topics = strings.Split(topicsStr, ",")
+ }
+
+ validTopics, invalidTopics := models.SanitizeAndValidateImageTopics(topics)
+
+ if len(validTopics) > 25 {
+ return nil, "repo.topic.count_prompt"
+
+ }
+
+ if len(invalidTopics) > 0 {
+ return nil, "repo.imagetopic.format_prompt"
+
+ }
+ return validTopics, ""
}
func CloudBrainStop(ctx *context.Context) {
@@ -753,8 +960,11 @@ func CloudBrainDel(ctx *context.Context) {
}
var isAdminPage = ctx.Query("isadminpage")
+ var isHomePage = ctx.Query("ishomepage")
if ctx.IsUserSiteAdmin() && isAdminPage == "true" {
ctx.Redirect(setting.AppSubURL + "/admin" + "/cloudbrains")
+ } else if isHomePage == "true" {
+ ctx.Redirect(setting.AppSubURL + "/cloudbrains")
} else {
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob?debugListType=" + listType)
}
@@ -825,35 +1035,106 @@ func CloudBrainShowModels(ctx *context.Context) {
}
func GetPublicImages(ctx *context.Context) {
+ uid := getUID(ctx)
+ opts := models.SearchImageOptions{
+ IncludePublicOnly: true,
+ UID: uid,
+ Keyword: ctx.Query("q"),
+ Topics: ctx.Query("topic"),
+ IncludeOfficialOnly: ctx.QueryBool("recommend"),
+ SearchOrderBy: "type desc, num_stars desc,id desc",
+ Status: models.IMAGE_STATUS_SUCCESS,
+ }
- getImages(ctx, cloudbrain.Public)
+ getImages(ctx, &opts)
}
func GetCustomImages(ctx *context.Context) {
+ uid := getUID(ctx)
+ opts := models.SearchImageOptions{
+ UID: uid,
+ IncludeOwnerOnly: true,
+ Keyword: ctx.Query("q"),
+ Topics: ctx.Query("topic"),
+ Status: -1,
+ SearchOrderBy: "id desc",
+ }
+ getImages(ctx, &opts)
+
+}
+func GetStarImages(ctx *context.Context) {
- getImages(ctx, cloudbrain.Custom)
+ uid := getUID(ctx)
+ opts := models.SearchImageOptions{
+ UID: uid,
+ IncludeStarByMe: true,
+ Keyword: ctx.Query("q"),
+ Topics: ctx.Query("topic"),
+ Status: models.IMAGE_STATUS_SUCCESS,
+ SearchOrderBy: "id desc",
+ }
+ getImages(ctx, &opts)
}
-func getImages(ctx *context.Context, imageType string) {
- log.Info("Get images begin")
+func getUID(ctx *context.Context) int64 {
+ var uid int64 = -1
+ if ctx.IsSigned {
+ uid = ctx.User.ID
+ }
+ return uid
+}
+func GetAllImages(ctx *context.Context) {
+ uid := getUID(ctx)
+ opts := models.SearchImageOptions{
+ UID: uid,
+ Keyword: ctx.Query("q"),
+ Topics: ctx.Query("topic"),
+ IncludeOfficialOnly: ctx.QueryBool("recommend"),
+ SearchOrderBy: "id desc",
+ Status: -1,
+ }
+
+ if ctx.Query("private") != "" {
+ if ctx.QueryBool("private") {
+ opts.IncludePrivateOnly = true
+ } else {
+ opts.IncludePublicOnly = true
+ }
+ }
+ getImages(ctx, &opts)
+
+}
+
+func getImages(ctx *context.Context, opts *models.SearchImageOptions) {
page := ctx.QueryInt("page")
- size := ctx.QueryInt("size")
- name := ctx.Query("name")
- getImagesResult, err := cloudbrain.GetImagesPageable(page, size, imageType, name)
+ if page <= 0 {
+ page = 1
+ }
+
+ pageSize := ctx.QueryInt("pageSize")
+ if pageSize <= 0 {
+ pageSize = 15
+ }
+ opts.ListOptions = models.ListOptions{
+ Page: page,
+ PageSize: pageSize,
+ }
+ imageList, total, err := models.SearchImage(opts)
if err != nil {
log.Error("Can not get images:%v", err)
- ctx.JSON(http.StatusOK, models.GetImagesPayload{
- Count: 0,
- TotalPages: 0,
- ImageInfo: []*models.ImageInfo{},
+ ctx.JSON(http.StatusOK, models.ImagesPageResult{
+ Count: 0,
+ Images: []*models.Image{},
})
} else {
- ctx.JSON(http.StatusOK, getImagesResult.Payload)
+ ctx.JSON(http.StatusOK, models.ImagesPageResult{
+ Count: total,
+ Images: imageList,
+ })
}
- log.Info("Get images end")
}
func GetModelDirs(jobName string, parentDir string) (string, error) {
@@ -909,7 +1190,8 @@ func GetRate(ctx *context.Context) {
}
func downloadCode(repo *models.Repository, codePath, branchName string) error {
- if err := git.Clone(repo.RepoPath(), codePath, git.CloneRepoOptions{Branch: branchName}); err != nil {
+ //add "file:///" prefix to make the depth valid
+ if err := git.Clone(CLONE_FILE_PREFIX+repo.RepoPath(), codePath, git.CloneRepoOptions{Branch: branchName, Depth: 1}); err != nil {
log.Error("Failed to clone repository: %s (%v)", repo.FullName(), err)
return err
}
@@ -969,7 +1251,7 @@ func downloadRateCode(repo *models.Repository, taskName, rateOwnerName, rateRepo
return err
}
- if err := git.Clone(repoExt.RepoPath(), codePath, git.CloneRepoOptions{}); err != nil {
+ if err := git.Clone(CLONE_FILE_PREFIX+repoExt.RepoPath(), codePath, git.CloneRepoOptions{Depth: 1}); err != nil {
log.Error("Failed to clone repository: %s (%v)", repoExt.FullName(), err)
return err
}
@@ -1156,6 +1438,7 @@ func SyncCloudbrainStatus() {
if task.EndTime == 0 && models.IsModelArtsDebugJobTerminal(task.Status) {
task.EndTime = timeutil.TimeStampNow()
}
+ task.CorrectCreateUnix()
task.ComputeAndSetDuration()
err = models.UpdateJob(task)
if err != nil {
@@ -1182,7 +1465,7 @@ func SyncCloudbrainStatus() {
if task.EndTime == 0 && models.IsTrainJobTerminal(task.Status) && task.StartTime > 0 {
task.EndTime = task.StartTime.Add(task.Duration)
}
-
+ task.CorrectCreateUnix()
err = models.UpdateJob(task)
if err != nil {
log.Error("UpdateJob(%s) failed:%v", task.JobName, err)
@@ -1304,6 +1587,7 @@ func handleNoDurationTask(cloudBrains []*models.Cloudbrain) {
task.StartTime = timeutil.TimeStamp(startTime / 1000)
task.EndTime = task.StartTime.Add(duration)
}
+ task.CorrectCreateUnix()
task.ComputeAndSetDuration()
err = models.UpdateJob(task)
if err != nil {
@@ -1693,8 +1977,11 @@ func BenchmarkDel(ctx *context.Context) {
}
var isAdminPage = ctx.Query("isadminpage")
+ var isHomePage = ctx.Query("ishomepage")
if ctx.IsUserSiteAdmin() && isAdminPage == "true" {
ctx.Redirect(setting.AppSubURL + "/admin" + "/cloudbrains")
+ } else if isHomePage == "true" {
+ ctx.Redirect(setting.AppSubURL + "/cloudbrains")
} else {
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/cloudbrain/benchmark")
}
@@ -1747,8 +2034,11 @@ func CloudBrainTrainJobDel(ctx *context.Context) {
}
var isAdminPage = ctx.Query("isadminpage")
+ var isHomePage = ctx.Query("ishomepage")
if ctx.IsUserSiteAdmin() && isAdminPage == "true" {
ctx.Redirect(setting.AppSubURL + "/admin" + "/cloudbrains")
+ } else if isHomePage == "true" {
+ ctx.Redirect(setting.AppSubURL + "/cloudbrains")
} else {
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job?listType=" + listType)
}
diff --git a/routers/repo/dataset.go b/routers/repo/dataset.go
index d23722372..73036a2cc 100755
--- a/routers/repo/dataset.go
+++ b/routers/repo/dataset.go
@@ -4,7 +4,6 @@ import (
"encoding/json"
"fmt"
"net/http"
- "regexp"
"sort"
"strconv"
"strings"
@@ -25,8 +24,6 @@ const (
taskstplIndex base.TplName = "repo/datasets/tasks/index"
)
-var titlePattern = regexp.MustCompile(`^[A-Za-z0-9-_\\.]{1,100}$`)
-
// MustEnableDataset check if repository enable internal dataset
func MustEnableDataset(ctx *context.Context) {
if !ctx.Repo.CanRead(models.UnitTypeDatasets) {
@@ -211,12 +208,12 @@ func CreateDatasetPost(ctx *context.Context, form auth.CreateDatasetForm) {
dataset := &models.Dataset{}
- if !titlePattern.MatchString(form.Title) {
+ if !NamePattern.MatchString(form.Title) {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.title_format_err")))
return
}
if utf8.RuneCountInString(form.Description) > 1024 {
- ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err")))
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err", 1024)))
return
}
@@ -248,12 +245,12 @@ func EditDatasetPost(ctx *context.Context, form auth.EditDatasetForm) {
ctx.Data["Title"] = ctx.Tr("dataset.edit_dataset")
- if !titlePattern.MatchString(form.Title) {
+ if !NamePattern.MatchString(form.Title) {
ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.title_format_err")))
return
}
if utf8.RuneCountInString(form.Description) > 1024 {
- ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err")))
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err", 1024)))
return
}
@@ -361,6 +358,7 @@ func MyDatasets(ctx *context.Context) {
NeedIsPrivate: false,
JustNeedZipFile: true,
NeedRepoInfo: true,
+ RecommendOnly: ctx.QueryBool("recommend"),
})
if err != nil {
ctx.ServerError("datasets", err)
@@ -401,6 +399,7 @@ func PublicDataset(ctx *context.Context) {
Type: cloudbrainType,
JustNeedZipFile: true,
NeedRepoInfo: true,
+ RecommendOnly: ctx.QueryBool("recommend"),
})
if err != nil {
ctx.ServerError("datasets", err)
@@ -457,6 +456,7 @@ func MyFavoriteDataset(ctx *context.Context) {
Type: cloudbrainType,
JustNeedZipFile: true,
NeedRepoInfo: true,
+ RecommendOnly: ctx.QueryBool("recommend"),
})
if err != nil {
ctx.ServerError("datasets", err)
diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go
index e2b75e704..b713f385f 100755
--- a/routers/repo/modelarts.go
+++ b/routers/repo/modelarts.go
@@ -247,7 +247,9 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm
func NotebookShow(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true
debugListType := ctx.Query("debugListType")
-
+ if debugListType == "" {
+ debugListType = "all"
+ }
var ID = ctx.Params(":id")
task, err := models.GetCloudbrainByIDWithDeleted(ID)
if err != nil {
@@ -267,6 +269,7 @@ func NotebookShow(ctx *context.Context) {
if task.DeletedAt.IsZero() { //normal record
if task.Status != result.Status {
task.Status = result.Status
+ models.ParseAndSetDurationFromModelArtsNotebook(result, task)
err = models.UpdateJob(task)
if err != nil {
ctx.Data["error"] = err.Error()
@@ -439,6 +442,7 @@ func NotebookManage(ctx *context.Context) {
param := models.NotebookAction{
Action: action,
}
+ createTime := timeutil.TimeStampNow()
res, err := modelarts.ManageNotebook2(task.JobID, param)
if err != nil {
log.Error("ManageNotebook2(%s) failed:%v", task.JobName, err.Error(), ctx.Data["MsgID"])
@@ -465,6 +469,8 @@ func NotebookManage(ctx *context.Context) {
Image: task.Image,
ComputeResource: task.ComputeResource,
Description: task.Description,
+ CreatedUnix: createTime,
+ UpdatedUnix: createTime,
}
err = models.RestartCloudbrain(task, newTask)
@@ -530,8 +536,11 @@ func NotebookDel(ctx *context.Context) {
}
var isAdminPage = ctx.Query("isadminpage")
+ var isHomePage = ctx.Query("ishomepage")
if ctx.IsUserSiteAdmin() && isAdminPage == "true" {
ctx.Redirect(setting.AppSubURL + "/admin" + "/cloudbrains")
+ } else if isHomePage == "true" {
+ ctx.Redirect(setting.AppSubURL + "/cloudbrains")
} else {
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/debugjob?debugListType=" + listType)
}
@@ -1020,10 +1029,8 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
gitRepo, _ := git.OpenRepository(repo.RepoPath())
commitID, _ := gitRepo.GetBranchCommitID(branch_name)
- if err := git.Clone(repo.RepoPath(), codeLocalPath, git.CloneRepoOptions{
- Branch: branch_name,
- }); err != nil {
- log.Error("Create task failed, server timed out: %s (%v)", repo.FullName(), err)
+ if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
+ log.Error("downloadCode failed, server timed out: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
ctx.RenderWithErr("Create task failed, server timed out", tplModelArtsTrainJobNew, &form)
return
@@ -1238,9 +1245,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
gitRepo, _ := git.OpenRepository(repo.RepoPath())
commitID, _ := gitRepo.GetBranchCommitID(branch_name)
- if err := git.Clone(repo.RepoPath(), codeLocalPath, git.CloneRepoOptions{
- Branch: branch_name,
- }); err != nil {
+ if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
log.Error("Failed git clone repo to local(!: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
ctx.RenderWithErr("Failed git clone repo to local!", tplModelArtsTrainJobVersionNew, &form)
@@ -1685,8 +1690,11 @@ func TrainJobDel(ctx *context.Context) {
}
var isAdminPage = ctx.Query("isadminpage")
+ var isHomePage = ctx.Query("ishomepage")
if ctx.IsUserSiteAdmin() && isAdminPage == "true" {
ctx.Redirect(setting.AppSubURL + "/admin" + "/cloudbrains")
+ } else if isHomePage == "true" {
+ ctx.Redirect(setting.AppSubURL + "/cloudbrains")
} else {
ctx.Redirect(setting.AppSubURL + ctx.Repo.RepoLink + "/modelarts/train-job?listType=" + listType)
}
@@ -1864,9 +1872,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
gitRepo, _ := git.OpenRepository(repo.RepoPath())
commitID, _ := gitRepo.GetBranchCommitID(branch_name)
- if err := git.Clone(repo.RepoPath(), codeLocalPath, git.CloneRepoOptions{
- Branch: branch_name,
- }); err != nil {
+ if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
log.Error("Create task failed, server timed out: %s (%v)", repo.FullName(), err)
inferenceJobErrorNewDataPrepare(ctx, form)
ctx.RenderWithErr("Create task failed, server timed out", tplModelArtsInferenceJobNew, &form)
@@ -2037,6 +2043,7 @@ func InferenceJobNew(ctx *context.Context) {
}
func inferenceJobNewDataPrepare(ctx *context.Context) error {
ctx.Data["PageIsCloudBrain"] = true
+ ctx.Data["newInference"] = true
t := time.Now()
var displayJobName = cutString(ctx.User.Name, 5) + t.Format("2006010215") + strconv.Itoa(int(t.Unix()))[5:]
diff --git a/routers/repo/user_data_analysis.go b/routers/repo/user_data_analysis.go
index 9d906270f..995465b09 100755
--- a/routers/repo/user_data_analysis.go
+++ b/routers/repo/user_data_analysis.go
@@ -40,25 +40,25 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac
dataHeader := map[string]string{
"A1": ctx.Tr("user.static.id"),
"B1": ctx.Tr("user.static.name"),
- "C1": ctx.Tr("user.static.codemergecount"),
- "D1": ctx.Tr("user.static.commitcount"),
- "E1": ctx.Tr("user.static.issuecount"),
- "F1": ctx.Tr("user.static.commentcount"),
- "G1": ctx.Tr("user.static.focusrepocount"),
- "H1": ctx.Tr("user.static.starrepocount"),
- "I1": ctx.Tr("user.static.logincount"),
- "J1": ctx.Tr("user.static.watchedcount"),
- "K1": ctx.Tr("user.static.commitcodesize"),
- "L1": ctx.Tr("user.static.solveissuecount"),
- "M1": ctx.Tr("user.static.encyclopediascount"),
- "N1": ctx.Tr("user.static.createrepocount"),
- "O1": ctx.Tr("user.static.openiindex"),
- "P1": ctx.Tr("user.static.registdate"),
+ "C1": ctx.Tr("user.static.UserIndex"),
+ "D1": ctx.Tr("user.static.codemergecount"),
+ "E1": ctx.Tr("user.static.commitcount"),
+ "F1": ctx.Tr("user.static.issuecount"),
+ "G1": ctx.Tr("user.static.commentcount"),
+ "H1": ctx.Tr("user.static.focusrepocount"),
+ "I1": ctx.Tr("user.static.starrepocount"),
+ "J1": ctx.Tr("user.static.logincount"),
+ "K1": ctx.Tr("user.static.watchedcount"),
+ "L1": ctx.Tr("user.static.commitcodesize"),
+ "M1": ctx.Tr("user.static.solveissuecount"),
+ "N1": ctx.Tr("user.static.encyclopediascount"),
+ "O1": ctx.Tr("user.static.createrepocount"),
+ "P1": ctx.Tr("user.static.openiindex"),
"Q1": ctx.Tr("user.static.CloudBrainTaskNum"),
"R1": ctx.Tr("user.static.CloudBrainRunTime"),
"S1": ctx.Tr("user.static.CommitDatasetNum"),
"T1": ctx.Tr("user.static.CommitModelCount"),
- "U1": ctx.Tr("user.static.UserIndex"),
+ "U1": ctx.Tr("user.static.registdate"),
"V1": ctx.Tr("user.static.countdate"),
}
for k, v := range dataHeader {
@@ -77,29 +77,26 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac
rows := fmt.Sprint(row)
xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID)
xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name)
- xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount)
- xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount)
- xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount)
- xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount)
- xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount)
- xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount)
- xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount)
- xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount)
- xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize)
- xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount)
- xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount)
- xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount)
- xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex))
-
- formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05")
- xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3])
-
+ xlsx.SetCellValue(sheetName, "C"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
+ xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CodeMergeCount)
+ xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount)
+ xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount)
+ xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount)
+ xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount)
+ xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount)
+ xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount)
+ xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount)
+ xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize)
+ xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount)
+ xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount)
+ xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount)
+ xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex))
xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum)
xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600))
xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum)
xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount)
- xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
-
+ formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05")
+ xlsx.SetCellValue(sheetName, "U"+rows, formatTime[0:len(formatTime)-3])
formatTime = userRecord.DataDate
xlsx.SetCellValue(sheetName, "V"+rows, formatTime)
}
@@ -242,25 +239,25 @@ func QueryUserStaticDataPage(ctx *context.Context) {
dataHeader := map[string]string{
"A1": ctx.Tr("user.static.id"),
"B1": ctx.Tr("user.static.name"),
- "C1": ctx.Tr("user.static.codemergecount"),
- "D1": ctx.Tr("user.static.commitcount"),
- "E1": ctx.Tr("user.static.issuecount"),
- "F1": ctx.Tr("user.static.commentcount"),
- "G1": ctx.Tr("user.static.focusrepocount"),
- "H1": ctx.Tr("user.static.starrepocount"),
- "I1": ctx.Tr("user.static.logincount"),
- "J1": ctx.Tr("user.static.watchedcount"),
- "K1": ctx.Tr("user.static.commitcodesize"),
- "L1": ctx.Tr("user.static.solveissuecount"),
- "M1": ctx.Tr("user.static.encyclopediascount"),
- "N1": ctx.Tr("user.static.createrepocount"),
- "O1": ctx.Tr("user.static.openiindex"),
- "P1": ctx.Tr("user.static.registdate"),
+ "C1": ctx.Tr("user.static.UserIndex"),
+ "D1": ctx.Tr("user.static.codemergecount"),
+ "E1": ctx.Tr("user.static.commitcount"),
+ "F1": ctx.Tr("user.static.issuecount"),
+ "G1": ctx.Tr("user.static.commentcount"),
+ "H1": ctx.Tr("user.static.focusrepocount"),
+ "I1": ctx.Tr("user.static.starrepocount"),
+ "J1": ctx.Tr("user.static.logincount"),
+ "K1": ctx.Tr("user.static.watchedcount"),
+ "L1": ctx.Tr("user.static.commitcodesize"),
+ "M1": ctx.Tr("user.static.solveissuecount"),
+ "N1": ctx.Tr("user.static.encyclopediascount"),
+ "O1": ctx.Tr("user.static.createrepocount"),
+ "P1": ctx.Tr("user.static.openiindex"),
"Q1": ctx.Tr("user.static.CloudBrainTaskNum"),
"R1": ctx.Tr("user.static.CloudBrainRunTime"),
"S1": ctx.Tr("user.static.CommitDatasetNum"),
"T1": ctx.Tr("user.static.CommitModelCount"),
- "U1": ctx.Tr("user.static.UserIndex"),
+ "U1": ctx.Tr("user.static.registdate"),
"V1": ctx.Tr("user.static.countdate"),
}
for k, v := range dataHeader {
@@ -273,27 +270,26 @@ func QueryUserStaticDataPage(ctx *context.Context) {
xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID)
xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name)
- xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount)
- xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CommitCount)
- xlsx.SetCellValue(sheetName, "E"+rows, userRecord.IssueCount)
- xlsx.SetCellValue(sheetName, "F"+rows, userRecord.CommentCount)
- xlsx.SetCellValue(sheetName, "G"+rows, userRecord.FocusRepoCount)
- xlsx.SetCellValue(sheetName, "H"+rows, userRecord.StarRepoCount)
- xlsx.SetCellValue(sheetName, "I"+rows, userRecord.LoginCount)
- xlsx.SetCellValue(sheetName, "J"+rows, userRecord.WatchedCount)
- xlsx.SetCellValue(sheetName, "K"+rows, userRecord.CommitCodeSize)
- xlsx.SetCellValue(sheetName, "L"+rows, userRecord.SolveIssueCount)
- xlsx.SetCellValue(sheetName, "M"+rows, userRecord.EncyclopediasCount)
- xlsx.SetCellValue(sheetName, "N"+rows, userRecord.CreateRepoCount)
- xlsx.SetCellValue(sheetName, "O"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex))
-
- formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05")
- xlsx.SetCellValue(sheetName, "P"+rows, formatTime[0:len(formatTime)-3])
+ xlsx.SetCellValue(sheetName, "C"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
+ xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CodeMergeCount)
+ xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount)
+ xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount)
+ xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount)
+ xlsx.SetCellValue(sheetName, "H"+rows, userRecord.FocusRepoCount)
+ xlsx.SetCellValue(sheetName, "I"+rows, userRecord.StarRepoCount)
+ xlsx.SetCellValue(sheetName, "J"+rows, userRecord.LoginCount)
+ xlsx.SetCellValue(sheetName, "K"+rows, userRecord.WatchedCount)
+ xlsx.SetCellValue(sheetName, "L"+rows, userRecord.CommitCodeSize)
+ xlsx.SetCellValue(sheetName, "M"+rows, userRecord.SolveIssueCount)
+ xlsx.SetCellValue(sheetName, "N"+rows, userRecord.EncyclopediasCount)
+ xlsx.SetCellValue(sheetName, "O"+rows, userRecord.CreateRepoCount)
+ xlsx.SetCellValue(sheetName, "P"+rows, fmt.Sprintf("%.2f", userRecord.OpenIIndex))
xlsx.SetCellValue(sheetName, "Q"+rows, userRecord.CloudBrainTaskNum)
xlsx.SetCellValue(sheetName, "R"+rows, fmt.Sprintf("%.2f", float64(userRecord.CloudBrainRunTime)/3600))
xlsx.SetCellValue(sheetName, "S"+rows, userRecord.CommitDatasetNum)
xlsx.SetCellValue(sheetName, "T"+rows, userRecord.CommitModelCount)
- xlsx.SetCellValue(sheetName, "U"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
+ formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05")
+ xlsx.SetCellValue(sheetName, "U"+rows, formatTime[0:len(formatTime)-3])
formatTime = userRecord.DataDate
xlsx.SetCellValue(sheetName, "V"+rows, formatTime)
}
diff --git a/routers/repo/util.go b/routers/repo/util.go
new file mode 100644
index 000000000..f148fc52e
--- /dev/null
+++ b/routers/repo/util.go
@@ -0,0 +1,5 @@
+package repo
+
+import "regexp"
+
+var NamePattern = regexp.MustCompile(`^[A-Za-z0-9-_\\.]{1,100}$`)
diff --git a/routers/routes/routes.go b/routers/routes/routes.go
index 4cffcd10b..8929666e5 100755
--- a/routers/routes/routes.go
+++ b/routers/routes/routes.go
@@ -12,6 +12,8 @@ import (
"text/template"
"time"
+ "code.gitea.io/gitea/routers/image"
+
"code.gitea.io/gitea/routers/authentication"
"code.gitea.io/gitea/modules/cloudbrain"
@@ -333,6 +335,8 @@ func RegisterRoutes(m *macaron.Macaron) {
})
m.Get("/images/public", repo.GetPublicImages)
m.Get("/images/custom", repo.GetCustomImages)
+ m.Get("/images/star", repo.GetStarImages)
+
m.Get("/repos", routers.ExploreRepos)
m.Get("/datasets", routers.ExploreDatasets)
m.Get("/users", routers.ExploreUsers)
@@ -345,6 +349,7 @@ func RegisterRoutes(m *macaron.Macaron) {
Post(bindIgnErr(auth.InstallForm{}), routers.InstallPost)
m.Get("/^:type(issues|pulls)$", reqSignIn, user.Issues)
m.Get("/milestones", reqSignIn, reqMilestonesDashboardPageEnabled, user.Milestones)
+ m.Get("/cloudbrains", reqSignIn, user.Cloudbrains)
// ***** START: User *****
m.Group("/user", func() {
@@ -520,12 +525,20 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/datasets", func() {
m.Get("", admin.Datasets)
+ m.Put("/:id/action/:action", admin.DatasetAction)
// m.Post("/delete", admin.DeleteDataset)
})
m.Group("/cloudbrains", func() {
m.Get("", admin.CloudBrains)
m.Get("/download", admin.DownloadCloudBrains)
})
+ m.Group("/images", func() {
+ m.Get("", admin.Images)
+ m.Get("/data", repo.GetAllImages)
+ m.Get("/commit_image", admin.CloudBrainCommitImageShow)
+ m.Post("/commit_image", bindIgnErr(auth.CommitAdminImageCloudBrainForm{}), repo.CloudBrainAdminCommitImage)
+ })
+ m.Put("/image/:id/action/:action", image.Action)
m.Group("/^:configType(hooks|system-hooks)$", func() {
m.Get("", admin.DefaultOrSystemWebhooks)
@@ -598,12 +611,11 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Put("/obs_proxy_multipart", repo.PutOBSProxyUpload)
m.Get("/obs_proxy_download", repo.GetOBSProxyDownload)
m.Get("/get_multipart_url", repo.GetMultipartUploadUrl)
- m.Post("/complete_multipart", repo.CompleteMultipart)
- m.Post("/update_chunk", repo.UpdateMultipart)
}, reqSignIn)
m.Group("/attachments", func() {
m.Post("/decompress_done_notify", repo.UpdateAttachmentDecompressState)
+ m.Post("/complete_multipart", repo.CompleteMultipart)
})
m.Group("/attachments", func() {
@@ -974,6 +986,12 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/topics", repo.TopicsPost)
}, context.RepoAssignment(), context.RepoMustNotBeArchived(), reqRepoAdmin)
+ m.Group("/image/:id", func() {
+ m.Get("/:from", cloudbrain.AdminOrImageCreaterRight, repo.CloudBrainImageEdit)
+ m.Post("", cloudbrain.AdminOrImageCreaterRight, bindIgnErr(auth.EditImageCloudBrainForm{}), repo.CloudBrainImageEditPost)
+ m.Delete("", cloudbrain.AdminOrImageCreaterRight, repo.CloudBrainImageDelete)
+ m.Put("/action/:action", reqSignIn, image.Action)
+ })
m.Group("/:username/:reponame", func() {
m.Group("", func() {
m.Get("/^:type(issues|pulls)$", repo.Issues)
@@ -1015,6 +1033,8 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/:id", func() {
m.Get("", reqRepoCloudBrainReader, repo.CloudBrainShow)
m.Get("/debug", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainDebug)
+ m.Get("/commit_image", cloudbrain.AdminOrJobCreaterRight, repo.CloudBrainCommitImageShow)
+ m.Post("/commit_image/check", cloudbrain.AdminOrJobCreaterRight, bindIgnErr(auth.CommitImageCloudBrainForm{}), repo.CloudBrainCommitImageCheck)
m.Post("/commit_image", cloudbrain.AdminOrJobCreaterRight, bindIgnErr(auth.CommitImageCloudBrainForm{}), repo.CloudBrainCommitImage)
m.Post("/stop", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainStop)
m.Post("/del", cloudbrain.AdminOrOwnerOrJobCreaterRight, repo.CloudBrainDel)
diff --git a/routers/search.go b/routers/search.go
index c5655b9e1..fe1643c80 100644
--- a/routers/search.go
+++ b/routers/search.go
@@ -68,23 +68,23 @@ func SearchApi(ctx *context.Context) {
if OnlySearchLabel {
searchRepoByLabel(ctx, Key, Page, PageSize)
} else {
- searchRepo(ctx, "repository-es-index", Key, Page, PageSize, OnlyReturnNum)
+ searchRepo(ctx, "repository-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum)
}
return
} else if TableName == "issue" {
- searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "f")
+ searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "f")
return
} else if TableName == "user" {
- searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, true, OnlyReturnNum)
+ searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, true, OnlyReturnNum)
return
} else if TableName == "org" {
- searchUserOrOrg(ctx, "user-es-index", Key, Page, PageSize, false, OnlyReturnNum)
+ searchUserOrOrg(ctx, "user-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, false, OnlyReturnNum)
return
} else if TableName == "dataset" {
- searchDataSet(ctx, "dataset-es-index", Key, Page, PageSize, OnlyReturnNum)
+ searchDataSet(ctx, "dataset-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum)
return
} else if TableName == "pr" {
- searchIssueOrPr(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum, "t")
+ searchIssueOrPr(ctx, "issue-es-index"+setting.INDEXPOSTFIX, Key, Page, PageSize, OnlyReturnNum, "t")
//searchPR(ctx, "issue-es-index", Key, Page, PageSize, OnlyReturnNum)
return
}
@@ -183,7 +183,7 @@ func searchRepoByLabel(ctx *context.Context, Key string, Page int, PageSize int)
topicsQuery := elastic.NewMatchQuery("topics", Key)
boolQ.Should(topicsQuery)
- res, err := client.Search("repository-es-index").Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("topics")).Do(ctx.Req.Context())
+ res, err := client.Search("repository-es-index").Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Highlight(queryHighlight("topics")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -200,15 +200,18 @@ func searchRepoByLabel(ctx *context.Context, Key string, Page int, PageSize int)
}
}
-func getSort(SortBy string, ascending bool) elastic.Sorter {
- var sort elastic.Sorter
- sort = elastic.NewScoreSort()
- if SortBy != "" {
- if SortBy == "default" {
- return sort
+func getSort(SortBy string, ascending bool, secondSortBy string, secondAscending bool) []elastic.Sorter {
+ sort := make([]elastic.Sorter, 0)
+ if SortBy == "default" || SortBy == "" {
+ sort = append(sort, elastic.NewScoreSort())
+ if secondSortBy != "" {
+ log.Info("SortBy=" + SortBy + " secondSortBy=" + secondSortBy)
+ sort = append(sort, elastic.NewFieldSort(secondSortBy).Order(secondAscending))
}
- return elastic.NewFieldSort(SortBy).Order(ascending)
+ } else {
+ sort = append(sort, elastic.NewFieldSort(SortBy).Order(ascending))
}
+ log.Info("sort size=" + fmt.Sprint(len(sort)))
return sort
}
@@ -308,7 +311,7 @@ func searchRepo(ctx *context.Context, TableName string, Key string, Page int, Pa
topicsQuery := elastic.NewMatchQuery("topics", Key).Boost(1).QueryName("f_third")
boolQ.Should(nameQuery, descriptionQuery, topicsQuery)
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("alias", "description", "topics")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "num_stars", false)...).From(from).Size(Size).Highlight(queryHighlight("alias", "description", "topics")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -330,7 +333,7 @@ func searchRepo(ctx *context.Context, TableName string, Key string, Page int, Pa
} else {
log.Info("query all content.")
//搜索的属性要指定{"timestamp":{"unmapped_type":"date"}}
- res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -691,7 +694,7 @@ func searchUserOrOrg(ctx *context.Context, TableName string, Key string, Page in
boolQ.Must(UserOrOrgQuery)
}
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From((Page - 1) * PageSize).Size(PageSize).Highlight(queryHighlight("name", "full_name", "description")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From((Page - 1) * PageSize).Size(PageSize).Highlight(queryHighlight("name", "full_name", "description")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -849,7 +852,7 @@ func searchDataSet(ctx *context.Context, TableName string, Key string, Page int,
fileNameQuery := elastic.NewMatchQuery("file_name", Key).Boost(1).QueryName("f_third")
categoryQuery := elastic.NewMatchQuery("category", Key).Boost(1).QueryName("f_fourth")
boolQ.Should(nameQuery, descQuery, categoryQuery, fileNameQuery)
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("title", "description", "file_name", "category")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Highlight(queryHighlight("title", "description", "file_name", "category")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -864,7 +867,7 @@ func searchDataSet(ctx *context.Context, TableName string, Key string, Page int,
} else {
log.Info("query all datasets.")
//搜索的属性要指定{"timestamp":{"unmapped_type":"date"}}
- res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -1057,7 +1060,7 @@ func searchIssueOrPr(ctx *context.Context, TableName string, Key string, Page in
boolQ.Must(isIssueQuery)
}
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("name", "content", "comment")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Highlight(queryHighlight("name", "content", "comment")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
diff --git a/routers/user/home.go b/routers/user/home.go
index 2fc0c60aa..9c7bed2df 100755
--- a/routers/user/home.go
+++ b/routers/user/home.go
@@ -20,6 +20,7 @@ import (
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup/markdown"
+ "code.gitea.io/gitea/modules/modelarts"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
issue_service "code.gitea.io/gitea/services/issue"
@@ -31,10 +32,11 @@ import (
)
const (
- tplDashboard base.TplName = "user/dashboard/dashboard"
- tplIssues base.TplName = "user/dashboard/issues"
- tplMilestones base.TplName = "user/dashboard/milestones"
- tplProfile base.TplName = "user/profile"
+ tplDashboard base.TplName = "user/dashboard/dashboard"
+ tplIssues base.TplName = "user/dashboard/issues"
+ tplMilestones base.TplName = "user/dashboard/milestones"
+ tplProfile base.TplName = "user/profile"
+ tplCloudbrains base.TplName = "user/dashboard/cloudbrains"
)
// getDashboardContextUser finds out dashboard is viewing as which context user.
@@ -751,3 +753,111 @@ func Email2User(ctx *context.Context) {
}
ctx.Redirect(setting.AppSubURL + "/user/" + u.Name)
}
+
+func Cloudbrains(ctx *context.Context) {
+ ctx.Data["Title"] = ctx.Tr("user.cloudbrains")
+
+ listType := ctx.Query("listType")
+ jobType := ctx.Query("jobType")
+ jobStatus := ctx.Query("jobStatus")
+
+ ctx.Data["ListType"] = listType
+ ctx.Data["JobType"] = jobType
+ ctx.Data["JobStatus"] = jobStatus
+
+ page := ctx.QueryInt("page")
+ if page <= 0 {
+ page = 1
+ }
+ debugType := models.TypeCloudBrainAll
+ if listType == models.GPUResource {
+ debugType = models.TypeCloudBrainOne
+ } else if listType == models.NPUResource {
+ debugType = models.TypeCloudBrainTwo
+ }
+
+ var jobTypes []string
+ jobTypeNot := false
+ if jobType == string(models.JobTypeDebug) {
+ jobTypes = append(jobTypes, string(models.JobTypeSnn4imagenet), string(models.JobTypeBrainScore), string(models.JobTypeDebug))
+ } else if jobType != "all" && jobType != "" {
+ jobTypes = append(jobTypes, jobType)
+ }
+
+ var jobStatuses []string
+ jobStatusNot := false
+ if jobStatus == "other" {
+ jobStatusNot = true
+ jobStatuses = append(jobStatuses, string(models.ModelArtsTrainJobWaiting), string(models.ModelArtsTrainJobFailed), string(models.ModelArtsRunning), string(models.ModelArtsTrainJobCompleted),
+ string(models.ModelArtsStarting), string(models.ModelArtsRestarting), string(models.ModelArtsStartFailed),
+ string(models.ModelArtsStopping), string(models.ModelArtsStopped), string(models.JobSucceeded))
+ } else if jobStatus != "all" && jobStatus != "" {
+ jobStatuses = append(jobStatuses, jobStatus)
+ }
+
+ keyword := strings.Trim(ctx.Query("q"), " ")
+
+ ctxUser := getDashboardContextUser(ctx)
+ if ctx.Written() {
+ return
+ }
+ repos, _, err := models.SearchRepository(&models.SearchRepoOptions{
+ Actor: ctx.User,
+ OwnerID: ctxUser.ID,
+ })
+ if err != nil {
+ ctx.ServerError("SearchRepository", err)
+ return
+ }
+ var repoIDList []int64
+ for i, _ := range repos {
+ repoIDList = append(repoIDList, repos[i].ID)
+ }
+ ciTasks, count, err := models.Cloudbrains(&models.CloudbrainsOptions{
+ ListOptions: models.ListOptions{
+ Page: page,
+ PageSize: setting.UI.IssuePagingNum,
+ },
+ Keyword: keyword,
+ UserID: ctxUser.ID,
+ Type: debugType,
+ JobTypeNot: jobTypeNot,
+ JobStatusNot: jobStatusNot,
+ JobStatus: jobStatuses,
+ JobTypes: jobTypes,
+ NeedRepoInfo: true,
+ IsLatestVersion: modelarts.IsLatestVersion,
+ RepoIDList: repoIDList,
+ })
+ if err != nil {
+ ctx.ServerError("Get job failed:", err)
+ return
+ }
+
+ for i, task := range ciTasks {
+ ciTasks[i].CanDebug = true
+ ciTasks[i].CanDel = true
+ ciTasks[i].Cloudbrain.ComputeResource = task.ComputeResource
+ }
+
+ pager := context.NewPagination(int(count), setting.UI.IssuePagingNum, page, getTotalPage(count, setting.UI.IssuePagingNum))
+ pager.SetDefaultParams(ctx)
+ pager.AddParam(ctx, "listType", "ListType")
+ ctx.Data["Page"] = pager
+ ctx.Data["PageIsUserCloudBrain"] = true
+ ctx.Data["Tasks"] = ciTasks
+ ctx.Data["CanCreate"] = true
+ ctx.Data["Keyword"] = keyword
+
+ ctx.HTML(200, tplCloudbrains)
+
+}
+func getTotalPage(total int64, pageSize int) int {
+
+ another := 0
+ if int(total)%pageSize != 0 {
+ another = 1
+ }
+ return int(total)/pageSize + another
+
+}
diff --git a/routers/user/profile.go b/routers/user/profile.go
index 41d8561d6..f82c03a75 100755
--- a/routers/user/profile.go
+++ b/routers/user/profile.go
@@ -106,9 +106,9 @@ func Profile(ctx *context.Context) {
for _, org := range orgs {
_, repoCount, err := models.SearchRepository(&models.SearchRepoOptions{
- OwnerID: org.ID,
- Private: ctx.IsSigned,
- Actor: ctx.User,
+ OwnerID: org.ID,
+ Private: ctx.IsSigned,
+ Actor: ctx.User,
})
if err != nil {
ctx.ServerError("SearchRepository", err)
@@ -175,6 +175,8 @@ func Profile(ctx *context.Context) {
orderBy = models.SearchOrderByAlphabeticallyReverse
case "alphabetically":
orderBy = models.SearchOrderByAlphabetically
+ case "downloadtimes":
+ orderBy = models.SearchOrderByDownloadTimes
case "moststars":
orderBy = models.SearchOrderByStarsReverse
case "feweststars":
diff --git a/semantic.json b/semantic.json
index fee52af53..bc9750cef 100644
--- a/semantic.json
+++ b/semantic.json
@@ -56,6 +56,7 @@
"tab",
"table",
"text",
- "transition"
+ "transition",
+ "toast"
]
}
diff --git a/services/socketwrap/clientManager.go b/services/socketwrap/clientManager.go
index 61f356a66..6ffa96933 100755
--- a/services/socketwrap/clientManager.go
+++ b/services/socketwrap/clientManager.go
@@ -10,7 +10,7 @@ import (
"github.com/elliotchance/orderedmap"
)
-var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 25, 26, 27, 28, 29, 30, 31}
+var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
type ClientsManager struct {
Clients *orderedmap.OrderedMap
diff --git a/templates/admin/cloudbrain/imagecommit.tmpl b/templates/admin/cloudbrain/imagecommit.tmpl
new file mode 100644
index 000000000..e504f08b0
--- /dev/null
+++ b/templates/admin/cloudbrain/imagecommit.tmpl
@@ -0,0 +1,129 @@
+
+{{template "base/head" .}}
+
{{.i18n.Tr "repo.image_overwrite"}}
+{{.i18n.Tr "repo.images.task_delete_confirm"}}
+{{.i18n.Tr "dataset.settings.delete_desc"}}
- {{.i18n.Tr "dataset.settings.delete_notices_2" `` | Safe}}{{.i18n.Tr "repo.images.task_delete_confirm"}}
+