{{.i18n.Tr "dataset.settings.delete_desc"}}
- {{.i18n.Tr "dataset.settings.delete_notices_2" `` | Safe}}-
diff --git a/models/attachment.go b/models/attachment.go
index 7c95a73dd..ea8f1645f 100755
--- a/models/attachment.go
+++ b/models/attachment.go
@@ -10,6 +10,7 @@ import (
"io"
"path"
"strings"
+ "time"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/obs"
@@ -64,6 +65,7 @@ type AttachmentInfo struct {
Repo *Repository `xorm:"extends"`
RelAvatarLink string `xorm:"extends"`
UserName string `xorm:"extends"`
+ Recommend bool `xorm:"-"`
}
type AttachmentsOptions struct {
@@ -78,6 +80,7 @@ type AttachmentsOptions struct {
JustNeedZipFile bool
NeedRepoInfo bool
Keyword string
+ RecommendOnly bool
}
func (a *Attachment) AfterUpdate() {
@@ -104,6 +107,14 @@ func (a *Attachment) IncreaseDownloadCount() error {
return nil
}
+func (a *Attachment) UpdateDatasetUpdateUnix() error {
+ // Update download count.
+ if _, err := x.Exec("UPDATE `dataset` SET updated_unix="+fmt.Sprint(time.Now().Unix())+" WHERE id=?", a.DatasetID); err != nil {
+ return fmt.Errorf("UpdateDatasetUpdateUnix: %v", err)
+ }
+ return nil
+}
+
// APIFormat converts models.Attachment to api.Attachment
func (a *Attachment) APIFormat() *api.Attachment {
return &api.Attachment{
@@ -570,6 +581,11 @@ func Attachments(opts *AttachmentsOptions) ([]*AttachmentInfo, int64, error) {
builder.Eq{"attachment.is_private": opts.IsPrivate},
)
}
+ if opts.RecommendOnly {
+ cond = cond.And(builder.In("attachment.id", builder.Select("attachment.id").
+ From("attachment").
+ Join("INNER", "dataset", "attachment.dataset_id = dataset.id and dataset.recommend=true")))
+ }
if opts.JustNeedZipFile {
var DecompressState []int32
@@ -618,6 +634,7 @@ func Attachments(opts *AttachmentsOptions) ([]*AttachmentInfo, int64, error) {
if err != nil {
return nil, 0, fmt.Errorf("GetDatasetByID failed error: %v", err)
}
+ attachment.Recommend = dataset.Recommend
repo, err := GetRepositoryByID(dataset.RepoID)
if err == nil {
attachment.Repo = repo
diff --git a/models/cloudbrain.go b/models/cloudbrain.go
index 45ed52bd5..480350c8d 100755
--- a/models/cloudbrain.go
+++ b/models/cloudbrain.go
@@ -580,6 +580,8 @@ type CommitImageParams struct {
Topics []string
CloudBrainType int
UID int64
+ Place string
+ Type int
}
type CommitImageResult struct {
diff --git a/models/cloudbrain_image.go b/models/cloudbrain_image.go
index c88db0f67..eb21e0d87 100644
--- a/models/cloudbrain_image.go
+++ b/models/cloudbrain_image.go
@@ -567,12 +567,12 @@ func isImageStaring(e Engine, userID, imageID int64) bool {
}
func RecommendImage(imageId int64, recommond bool) error {
- image := Image{Type: getRecommondType(recommond)}
+ image := Image{Type: GetRecommondType(recommond)}
_, err := x.ID(imageId).Cols("type").Update(image)
return err
}
-func getRecommondType(recommond bool) int {
+func GetRecommondType(recommond bool) int {
if recommond {
return RECOMMOND_TYPE
diff --git a/models/dataset.go b/models/dataset.go
index 95800100c..e841261c7 100755
--- a/models/dataset.go
+++ b/models/dataset.go
@@ -23,7 +23,8 @@ type Dataset struct {
Category string
Description string `xorm:"TEXT"`
DownloadTimes int64
- NumStars int `xorm:"INDEX NOT NULL DEFAULT 0"`
+ NumStars int `xorm:"INDEX NOT NULL DEFAULT 0"`
+ Recommend bool `xorm:"INDEX NOT NULL DEFAULT false"`
License string
Task string
ReleaseID int64 `xorm:"INDEX"`
@@ -99,6 +100,7 @@ type SearchDatasetOptions struct {
OwnerID int64
RepoID int64
IncludePublic bool
+ RecommendOnly bool
Category string
Task string
License string
@@ -132,6 +134,13 @@ func CreateDataset(dataset *Dataset) (err error) {
}
+func RecommendDataset(dataSetId int64, recommend bool) error {
+
+ dataset := Dataset{Recommend: recommend}
+ _, err := x.ID(dataSetId).Cols("recommend").Update(dataset)
+ return err
+}
+
func SearchDataset(opts *SearchDatasetOptions) (DatasetList, int64, error) {
cond := SearchDatasetCondition(opts)
return SearchDatasetByCondition(opts, cond)
@@ -146,6 +155,9 @@ func SearchDatasetCondition(opts *SearchDatasetOptions) builder.Cond {
if opts.RepoID > 0 {
cond = cond.And(builder.Eq{"dataset.repo_id": opts.RepoID})
}
+ if opts.RecommendOnly {
+ cond = cond.And(builder.Eq{"dataset.recommend": opts.RecommendOnly})
+ }
if opts.IncludePublic {
cond = cond.And(builder.Eq{"dataset.status": DatasetStatusPublic})
@@ -198,7 +210,7 @@ func SearchDatasetByCondition(opts *SearchDatasetOptions, cond builder.Cond) (Da
defer sess.Close()
datasets := make(DatasetList, 0, opts.PageSize)
- selectColumnsSql := "distinct dataset.id,dataset.title, dataset.status, dataset.category, dataset.description, dataset.download_times, dataset.license, dataset.task, dataset.release_id, dataset.user_id, dataset.repo_id, dataset.created_unix,dataset.updated_unix,dataset.num_stars"
+ selectColumnsSql := "distinct dataset.id,dataset.title, dataset.status, dataset.category, dataset.description, dataset.download_times, dataset.license, dataset.task, dataset.release_id, dataset.user_id, dataset.repo_id, dataset.created_unix,dataset.updated_unix,dataset.num_stars,dataset.recommend"
count, err := sess.Distinct("dataset.id").Join("INNER", "repository", "repository.id = dataset.repo_id").
Join("INNER", "attachment", "attachment.dataset_id=dataset.id").
diff --git a/models/repo.go b/models/repo.go
index 25bfb4a74..db2694617 100755
--- a/models/repo.go
+++ b/models/repo.go
@@ -1554,6 +1554,11 @@ func GetAllMirrorRepositoriesCount() (int64, error) {
return x.Where("is_mirror = ?", true).Count(repo)
}
+func GetAllOrgRepositoriesCount() (int64, error) {
+ repo := new(Repository)
+ return x.Table("repository").Join("INNER", []string{"\"user\"", "u"}, "repository.owner_id = u.id and u.type=1").Count(repo)
+}
+
func GetAllForkRepositoriesCount() (int64, error) {
repo := new(Repository)
return x.Where("is_fork = ?", true).Count(repo)
diff --git a/models/summary_statistic.go b/models/summary_statistic.go
index e5cf54b75..7787aa292 100644
--- a/models/summary_statistic.go
+++ b/models/summary_statistic.go
@@ -2,6 +2,8 @@ package models
import (
"fmt"
+ "strconv"
+ "time"
"code.gitea.io/gitea/modules/timeutil"
)
@@ -45,6 +47,7 @@ type SummaryStatistic struct {
NumRepoFork int64 `xorm:"NOT NULL DEFAULT 0"`
NumRepoMirror int64 `xorm:"NOT NULL DEFAULT 0"`
NumRepoSelf int64 `xorm:"NOT NULL DEFAULT 0"`
+ NumRepoOrg int64 `xorm:"NOT NULL DEFAULT 0"`
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
}
@@ -69,6 +72,37 @@ func DeleteSummaryStatisticDaily(date string) error {
return nil
}
+func GetLatest2SummaryStatistic() ([]*SummaryStatistic, error) {
+ summaryStatistics := make([]*SummaryStatistic, 0)
+ err := xStatistic.Desc("created_unix").Limit(2).Find(&summaryStatistics)
+ return summaryStatistics, err
+}
+
+func GetSummaryStatisticByTimeCount(beginTime time.Time, endTime time.Time) (int64, error) {
+ summaryStatistics := new(SummaryStatistic)
+ total, err := xStatistic.Asc("created_unix").Where("created_unix>=" + strconv.FormatInt(beginTime.Unix(), 10) + " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10)).Count(summaryStatistics)
+ return total, err
+}
+
+func GetSummaryStatisticByDateCount(dates []string) (int64, error) {
+ summaryStatistics := new(SummaryStatistic)
+ total, err := xStatistic.Asc("created_unix").In("date", dates).Count(summaryStatistics)
+ return total, err
+}
+
+func GetSummaryStatisticByTime(beginTime time.Time, endTime time.Time, page int, pageSize int) ([]*SummaryStatistic, error) {
+ summaryStatistics := make([]*SummaryStatistic, 0)
+ err := xStatistic.Asc("created_unix").Limit(pageSize+1, (page-1)*pageSize).Where("created_unix>=" + strconv.FormatInt(beginTime.Unix(), 10) + " and created_unix<" + strconv.FormatInt(endTime.Unix(), 10)).Find(&summaryStatistics)
+
+ return summaryStatistics, err
+}
+
+func GetSummaryStatisticByDates(dates []string, page int, pageSize int) ([]*SummaryStatistic, error) {
+ summaryStatistics := make([]*SummaryStatistic, 0)
+ err := xStatistic.Asc("created_unix").In("date", dates).Limit(pageSize+1, (page-1)*pageSize).Find(&summaryStatistics)
+ return summaryStatistics, err
+}
+
func InsertSummaryStatistic(summaryStatistic *SummaryStatistic) (int64, error) {
return xStatistic.Insert(summaryStatistic)
}
diff --git a/models/user_business_analysis.go b/models/user_business_analysis.go
index ca8c5071f..34d84555c 100644
--- a/models/user_business_analysis.go
+++ b/models/user_business_analysis.go
@@ -831,7 +831,12 @@ func getUserIndex(dateRecord UserBusinessAnalysis, ParaWeight map[string]float64
result += float64(dateRecord.StarRepoCount) * getParaWeightValue("StarRepoCount", ParaWeight, 0.1)
result += float64(dateRecord.LoginCount) * getParaWeightValue("LoginCount", ParaWeight, 0.1)
result += float64(dateRecord.WatchedCount) * getParaWeightValue("WatchedCount", ParaWeight, 0.3)
- result += float64(dateRecord.CommitCodeSize) * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1)
+ codeLine := float64(dateRecord.CommitCodeSize) / 1000
+ limitCodeLine := getParaWeightValue("LimitCommitCodeSize", ParaWeight, 100)
+ if codeLine >= limitCodeLine {
+ codeLine = limitCodeLine
+ }
+ result += codeLine * getParaWeightValue("CommitCodeSize", ParaWeight, 0.1)
result += float64(dateRecord.SolveIssueCount) * getParaWeightValue("SolveIssueCount", ParaWeight, 0.2)
result += float64(dateRecord.EncyclopediasCount) * getParaWeightValue("EncyclopediasCount", ParaWeight, 0.1)
result += float64(dateRecord.CreateRepoCount) * getParaWeightValue("CreateRepoCount", ParaWeight, 0.05)
diff --git a/modules/auth/cloudbrain.go b/modules/auth/cloudbrain.go
index 85f3a2127..e5be38084 100755
--- a/modules/auth/cloudbrain.go
+++ b/modules/auth/cloudbrain.go
@@ -33,6 +33,16 @@ type CommitImageCloudBrainForm struct {
Topics string `form:"topics"`
}
+type CommitAdminImageCloudBrainForm struct {
+ Description string `form:"description" binding:"Required"`
+ Type int `form:"type" binding:"Required"`
+ Tag string `form:"tag" binding:"Required;MaxSize(100)" `
+ IsPrivate bool `form:"isPrivate" binding:"Required"`
+ Topics string `form:"topics"`
+ Place string `form:"place" binding:"Required"`
+ IsRecommend bool `form:"isRecommend" binding:"Required"`
+}
+
type EditImageCloudBrainForm struct {
ID int64 `form:"id" binding:"Required"`
Description string `form:"description" binding:"Required"`
diff --git a/modules/cloudbrain/resty.go b/modules/cloudbrain/resty.go
index f1f213bea..1565d3044 100755
--- a/modules/cloudbrain/resty.go
+++ b/modules/cloudbrain/resty.go
@@ -312,12 +312,51 @@ sendjob:
return nil
})
if err == nil {
-
go updateImageStatus(image, isSetCreatedUnix, createTime)
}
return err
}
+func CommitAdminImage(params models.CommitImageParams) error {
+
+ exist, err := models.IsImageExist(params.ImageTag)
+
+ if err != nil {
+ return fmt.Errorf("resty CommitImage: %v", err)
+ }
+ if exist {
+ return models.ErrorImageTagExist{
+ Tag: params.ImageTag,
+ }
+ }
+
+ image := models.Image{
+ CloudbrainType: params.CloudBrainType,
+ UID: params.UID,
+ IsPrivate: params.IsPrivate,
+ Tag: params.ImageTag,
+ Description: params.ImageDescription,
+ Place: params.Place,
+ Status: models.IMAGE_STATUS_SUCCESS,
+ Type: params.Type,
+ }
+
+ err = models.WithTx(func(ctx models.DBContext) error {
+
+ if err := models.CreateLocalImage(&image); err != nil {
+ log.Error("Failed to insert image record.", err)
+ return fmt.Errorf("resty CommitImage: %v", err)
+ }
+
+ if err := models.SaveImageTopics(image.ID, params.Topics...); err != nil {
+ log.Error("Failed to insert image record.", err)
+ return fmt.Errorf("resty CommitImage: %v", err)
+ }
+ return nil
+ })
+ return err
+}
+
func updateImageStatus(image models.Image, isSetCreatedUnix bool, createTime time.Time) {
attemps := 5
commitSuccess := false
diff --git a/modules/storage/minio_ext.go b/modules/storage/minio_ext.go
index 2f738ebad..167cd0488 100755
--- a/modules/storage/minio_ext.go
+++ b/modules/storage/minio_ext.go
@@ -2,6 +2,7 @@ package storage
import (
"encoding/xml"
+ "errors"
"path"
"sort"
"strconv"
@@ -129,7 +130,7 @@ func NewMultiPartUpload(uuid string) (string, error) {
return core.NewMultipartUpload(bucketName, objectName, miniov6.PutObjectOptions{})
}
-func CompleteMultiPartUpload(uuid string, uploadID string) (string, error) {
+func CompleteMultiPartUpload(uuid string, uploadID string, totalChunks int) (string, error) {
client, core, err := getClients()
if err != nil {
log.Error("getClients failed:", err.Error())
@@ -146,6 +147,11 @@ func CompleteMultiPartUpload(uuid string, uploadID string) (string, error) {
return "", err
}
+ if len(partInfos) != totalChunks {
+ log.Error("ListObjectParts number(%d) is not equal the set total chunk number(%d)", len(partInfos), totalChunks)
+ return "", errors.New("the parts is not complete")
+ }
+
var complMultipartUpload completeMultipartUpload
for _, partInfo := range partInfos {
complMultipartUpload.Parts = append(complMultipartUpload.Parts, miniov6.CompletePart{
diff --git a/modules/storage/obs.go b/modules/storage/obs.go
index 08a354359..03349864a 100755
--- a/modules/storage/obs.go
+++ b/modules/storage/obs.go
@@ -59,21 +59,55 @@ func ObsHasObject(path string) (bool, error) {
return hasObject, nil
}
+func listAllParts(uuid, uploadID, key string) (output *obs.ListPartsOutput, err error) {
+ output = &obs.ListPartsOutput{}
+ partNumberMarker := 0
+ for {
+ temp, err := ObsCli.ListParts(&obs.ListPartsInput{
+ Bucket: setting.Bucket,
+ Key: key,
+ UploadId: uploadID,
+ MaxParts: MAX_LIST_PARTS,
+ PartNumberMarker: partNumberMarker,
+ })
+ if err != nil {
+ log.Error("ListParts failed:", err.Error())
+ return output, err
+ }
+
+ partNumberMarker = temp.NextPartNumberMarker
+ log.Info("uuid:%s, MaxParts:%d, PartNumberMarker:%d, NextPartNumberMarker:%d, len:%d", uuid, temp.MaxParts, temp.PartNumberMarker, temp.NextPartNumberMarker, len(temp.Parts))
+
+ for _, partInfo := range temp.Parts {
+ output.Parts = append(output.Parts, obs.Part{
+ PartNumber: partInfo.PartNumber,
+ ETag: partInfo.ETag,
+ })
+ }
+
+ if !temp.IsTruncated {
+ break
+ } else {
+ continue
+ }
+
+ break
+ }
+
+ return output, nil
+}
+
func GetObsPartInfos(uuid, uploadID, fileName string) (string, error) {
key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
- output, err := ObsCli.ListParts(&obs.ListPartsInput{
- Bucket: setting.Bucket,
- Key: key,
- UploadId: uploadID,
- })
+ allParts, err := listAllParts(uuid, uploadID, key)
if err != nil {
- log.Error("ListParts failed:", err.Error())
+ log.Error("listAllParts failed: %v", err)
return "", err
}
var chunks string
- for _, partInfo := range output.Parts {
+ for _, partInfo := range allParts.Parts {
chunks += strconv.Itoa(partInfo.PartNumber) + "-" + partInfo.ETag + ","
}
@@ -94,45 +128,25 @@ func NewObsMultiPartUpload(uuid, fileName string) (string, error) {
return output.UploadId, nil
}
-func CompleteObsMultiPartUpload(uuid, uploadID, fileName string) error {
+func CompleteObsMultiPartUpload(uuid, uploadID, fileName string, totalChunks int) error {
input := &obs.CompleteMultipartUploadInput{}
input.Bucket = setting.Bucket
input.Key = strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, fileName)), "/")
input.UploadId = uploadID
- partNumberMarker := 0
- for {
- output, err := ObsCli.ListParts(&obs.ListPartsInput{
- Bucket: setting.Bucket,
- Key: input.Key,
- UploadId: uploadID,
- MaxParts: MAX_LIST_PARTS,
- PartNumberMarker: partNumberMarker,
- })
- if err != nil {
- log.Error("ListParts failed:", err.Error())
- return err
- }
-
- partNumberMarker = output.NextPartNumberMarker
- log.Info("uuid:%s, MaxParts:%d, PartNumberMarker:%d, NextPartNumberMarker:%d, len:%d", uuid, output.MaxParts, output.PartNumberMarker, output.NextPartNumberMarker, len(output.Parts))
-
- for _, partInfo := range output.Parts {
- input.Parts = append(input.Parts, obs.Part{
- PartNumber: partInfo.PartNumber,
- ETag: partInfo.ETag,
- })
- }
-
- if len(output.Parts) < output.MaxParts {
- break
- } else {
- continue
- }
+ allParts, err := listAllParts(uuid, uploadID, input.Key)
+ if err != nil {
+ log.Error("listAllParts failed: %v", err)
+ return err
+ }
- break
+ if len(allParts.Parts) != totalChunks {
+ log.Error("listAllParts number(%d) is not equal the set total chunk number(%d)", len(allParts.Parts), totalChunks)
+ return errors.New("the parts is not complete")
}
+ input.Parts = allParts.Parts
+
output, err := ObsCli.CompleteMultipartUpload(input)
if err != nil {
log.Error("CompleteMultipartUpload failed:", err.Error())
diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini
index 14d4e19d1..6d1ac24c7 100755
--- a/options/locale/locale_en-US.ini
+++ b/options/locale/locale_en-US.ini
@@ -937,6 +937,15 @@ model_manager = Model
model_noright=No right
model_rename=Duplicate model name, please modify model name.
+date=Date
+repo_add=Project Increment
+repo_total=Project Total
+repo_public_add=Public Project Increment
+repo_private_add=Private Project Increment
+repo_fork_add=Fork Project Increment
+repo_mirror_add=Mirror Project Increment
+repo_self_add=Custom Project Increment
+
debug=Debug
debug_again=Restart
stop=Stop
@@ -1001,7 +1010,9 @@ get_repo_stat_error=Can not get the statistics of the repository.
get_repo_info_error=Can not get the information of the repository.
generate_statistic_file_error=Failed to generate file.
repo_stat_inspect=ProjectAnalysis
+repo_stat_develop=ProjectGrowthAnalysis
all=All
+current_year=Current_Year
computing.all = All
computing.Introduction=Introduction
@@ -1389,6 +1400,7 @@ issues.filter_sort.feweststars = Fewest stars
issues.filter_sort.mostforks = Most forks
issues.filter_sort.fewestforks = Fewest forks
issues.filter_sort.downloadtimes = Most downloaded
+issues.filter_sort.moststars = Most star
issues.action_open = Open
issues.action_close = Close
issues.action_label = Label
@@ -2502,11 +2514,15 @@ repos.contributor=Contributor
repos.yes=Yes
repos.no=No
+images.recommend = Recommend
+images.unrecommend = Unrecommend
datasets.dataset_manage_panel= Dataset Manage
datasets.owner=Owner
datasets.name=name
datasets.private=Private
+datasets.recommend=Set recommend
+datasets.unrecommend=Set unrecommend
cloudbrain.all_task_types=All Task Types
cloudbrain.all_computing_resources=All Computing Resources
@@ -2854,7 +2870,7 @@ mirror_sync_create = synced new reference %[2]s to %[2]s at %[3]s from mirror
approve_pull_request = `approved %s#%[2]s`
reject_pull_request = `suggested changes for %s#%[2]s`
-upload_dataset=`upload dataset %s`
+upload_dataset=`upload dataset %s`
task_gpudebugjob=`created CPU/GPU type debugging task%s`
task_npudebugjob=`created NPU type debugging task %s`
task_nputrainjob=`created NPU training task%s`
@@ -2964,6 +2980,7 @@ snn4imagenet_path = Snn4imagenet script path
brainscore_path = Brainscore script path
start_command = Start command
choose_mirror = select mirror or enter mirror path
+input_mirror = Please enter image path
select_dataset = select dataset
specification = specification
select_specification = select specification
diff --git a/options/locale/locale_zh-CN.ini b/options/locale/locale_zh-CN.ini
index de9f79319..2232c36a1 100755
--- a/options/locale/locale_zh-CN.ini
+++ b/options/locale/locale_zh-CN.ini
@@ -938,6 +938,16 @@ model_manager = 模型
model_noright=无权限操作
model_rename=模型名称重复,请修改模型名称
+
+date=日期
+repo_add=新增项目
+repo_total=累计项目
+repo_public_add=新增公开项目
+repo_private_add=新增私有项目
+repo_fork_add=新增派生项目
+repo_mirror_add=新增镜像项目
+repo_self_add=新增自建项目
+
debug=调试
debug_again=再次调试
stop=停止
@@ -1009,7 +1019,9 @@ get_repo_stat_error=查询当前仓库的统计信息失败。
get_repo_info_error=查询当前仓库信息失败。
generate_statistic_file_error=生成文件失败。
repo_stat_inspect=项目分析
+repo_stat_develop=项目增长趋势
all=所有
+current_year=今年
computing.all=全部
computing.Introduction=简介
@@ -1401,6 +1413,7 @@ issues.filter_sort.feweststars=点赞由少到多
issues.filter_sort.mostforks=派生由多到少
issues.filter_sort.fewestforks=派生由少到多
issues.filter_sort.downloadtimes=下载次数
+issues.filter_sort.moststars=收藏数量
issues.action_open=开启
issues.action_close=关闭
issues.action_label=标签
@@ -2512,11 +2525,15 @@ repos.contributor=贡献者数
repos.yes=是
repos.no=否
+images.recommend = 推荐
+images.unrecommend = 不推荐
datasets.dataset_manage_panel=数据集管理
datasets.owner=所有者
datasets.name=名称
datasets.private=私有
+datasets.recommend=设为推荐
+datasets.unrecommend=取消推荐
cloudbrain.all_task_types=全部任务类型
cloudbrain.all_computing_resources=全部计算资源
@@ -2864,7 +2881,7 @@ mirror_sync_create=从镜像同步了新的引用 %[2]s
mirror_sync_delete=从镜像同步并从 %[3]s 删除了引用 %[2]s
approve_pull_request=`同意了 %s#%[2]s`
reject_pull_request=`建议变更 %s#%[2]s`
-upload_dataset=`上传了数据集文件 %s`
+upload_dataset=`上传了数据集文件 %s`
task_gpudebugjob=`创建了CPU/GPU类型调试任务 %s`
task_npudebugjob=`创建了NPU类型调试任务 %s`
task_nputrainjob=`创建了NPU类型训练任务 %s`
@@ -2974,6 +2991,7 @@ snn4imagenet_path = snn4imagenet脚本存放路径
brainscore_path = brainscore脚本存放路径
start_command = 启动命令
choose_mirror = 选择镜像或输入镜像地址
+input_mirror = 请输入云脑镜像地址
select_dataset = 选择数据集
specification = 规格
select_specification = 选择资源规格
diff --git a/public/home/home.js b/public/home/home.js
index 478c70f21..d8e423def 100755
--- a/public/home/home.js
+++ b/public/home/home.js
@@ -99,6 +99,11 @@ socket.onmessage = function (e) {
console.log("receive action type=" + record.OpType + " name=" + actionName + " but user is null.");
continue;
}
+ if(record.OpType == "24"){
+ if(record.Content.indexOf("true") != -1){
+ continue;
+ }
+ }
var recordPrefix = getMsg(record);
if(record.OpType == "6" || record.OpType == "10" || record.OpType == "12" || record.OpType == "13"){
html += recordPrefix + actionName;
@@ -162,7 +167,7 @@ socket.onmessage = function (e) {
function getTaskLink(record){
var re = getRepoLink(record);
if(record.OpType == 24){
- re = re + "/datasets?type=" + record.Content;
+ re = re + "/datasets";
}else if(record.OpType == 25){
re = re + "/cloudbrain/" + record.Content;
}else if(record.OpType == 26){
diff --git a/public/home/search.js b/public/home/search.js
index e23d27549..c55d1807c 100644
--- a/public/home/search.js
+++ b/public/home/search.js
@@ -101,16 +101,20 @@ function initPageInfo(){
function searchItem(type,sortType){
console.log("enter item 2.");
- currentSearchKeyword = document.getElementById("keyword_input").value;
- if(!isEmpty(currentSearchKeyword)){
- initPageInfo();
- currentSearchTableName = itemType[type];
- currentSearchSortBy = sortBy[sortType];
- currentSearchAscending = sortAscending[sortType];
- OnlySearchLabel =false;
- page(currentPage);
+ if(OnlySearchLabel){
+ doSearchLabel(currentSearchTableName,currentSearchKeyword,sortBy[sortType],sortAscending[sortType])
}else{
- emptySearch();
+ currentSearchKeyword = document.getElementById("keyword_input").value;
+ if(!isEmpty(currentSearchKeyword)){
+ initPageInfo();
+ currentSearchTableName = itemType[type];
+ currentSearchSortBy = sortBy[sortType];
+ currentSearchAscending = sortAscending[sortType];
+ OnlySearchLabel =false;
+ page(currentPage);
+ }else{
+ emptySearch();
+ }
}
}
@@ -806,17 +810,21 @@ var repoAndOrgEN={
function page(current){
currentPage=current;
+ startIndex = currentPage -1;
+ if(startIndex < 1){
+ startIndex = 1;
+ }
+ endIndex = currentPage + 2;
+ if(endIndex >= totalPage){
+ endIndex = totalPage;
+ }
doSearch(currentSearchTableName,currentSearchKeyword,current,pageSize,false,currentSearchSortBy,OnlySearchLabel);
-
}
function nextPage(){
currentPage = currentPage+1;
console.log("currentPage=" + currentPage);
- if(currentPage >= endIndex){
- startIndex=startIndex+1;
- endIndex = endIndex +1;
- }
+
page(currentPage);
}
@@ -824,10 +832,6 @@ function page(current){
console.log("currentPage=" + currentPage);
if(currentPage > 1){
currentPage = currentPage-1;
- if(currentPage <= startIndex && startIndex > 1){
- startIndex = startIndex -1;
- endIndex = endIndex - 1;
- }
console.log("currentPage=" + (currentPage));
page(currentPage);
}
@@ -862,7 +866,7 @@ function getYPosition(e){
showTip(getLabel(isZh,"search_input_large_0"),"warning",left+5,top);
}
else if(goNum<=totalPage){
- page(goNum);
+ page(parseInt(goNum,10));
}
else{
showTip(getLabel(isZh,"search_input_maxed"),"warning",left+5,top);
@@ -908,6 +912,11 @@ function getYPosition(e){
}
}
+ if (endIndex < totalPage-1){
+ html += "...";
+ html += "" + totalPage + "";
+ }
+
if(currentPage >=totalPage){
html += "";
html += "";
diff --git a/routers/admin/cloudbrains.go b/routers/admin/cloudbrains.go
index 91d866093..6687b990a 100755
--- a/routers/admin/cloudbrains.go
+++ b/routers/admin/cloudbrains.go
@@ -21,6 +21,7 @@ import (
const (
tplCloudBrains base.TplName = "admin/cloudbrain/list"
tplImages base.TplName = "admin/cloudbrain/images"
+ tplCommitImages base.TplName = "admin/cloudbrain/imagecommit"
EXCEL_DATE_FORMAT = "20060102150405"
CREATE_TIME_FORMAT = "2006/01/02 15:04:05"
)
@@ -114,6 +115,12 @@ func Images(ctx *context.Context) {
}
+func CloudBrainCommitImageShow(ctx *context.Context) {
+ ctx.Data["PageIsAdminImages"] = true
+ ctx.HTML(200, tplCommitImages)
+
+}
+
func DownloadCloudBrains(ctx *context.Context) {
page := 1
diff --git a/routers/admin/dataset.go b/routers/admin/dataset.go
index a4378cf67..6b29b06ff 100644
--- a/routers/admin/dataset.go
+++ b/routers/admin/dataset.go
@@ -1,6 +1,8 @@
package admin
import (
+ "net/http"
+ "strconv"
"strings"
"code.gitea.io/gitea/models"
@@ -49,6 +51,8 @@ func Datasets(ctx *context.Context) {
orderBy = models.SearchOrderBySizeReverse
case "size":
orderBy = models.SearchOrderBySize
+ case "downloadtimes":
+ orderBy = models.SearchOrderByDownloadTimes
case "moststars":
orderBy = models.SearchOrderByStarsReverse
case "feweststars":
@@ -70,6 +74,7 @@ func Datasets(ctx *context.Context) {
PageSize: setting.UI.ExplorePagingNum,
},
Keyword: keyword,
+ RecommendOnly: ctx.QueryBool("recommend"),
SearchOrderBy: orderBy,
})
if err != nil {
@@ -80,7 +85,7 @@ func Datasets(ctx *context.Context) {
ctx.Data["Keyword"] = keyword
ctx.Data["Total"] = count
ctx.Data["Datasets"] = datasets
-
+ ctx.Data["Recommend"] = ctx.QueryBool("recommend")
pager := context.NewPagination(int(count), setting.UI.ExplorePagingNum, page, 5)
pager.SetDefaultParams(ctx)
ctx.Data["Page"] = pager
@@ -88,6 +93,23 @@ func Datasets(ctx *context.Context) {
ctx.HTML(200, tplDatasets)
}
+func DatasetAction(ctx *context.Context) {
+ var err error
+ datasetId, _ := strconv.ParseInt(ctx.Params(":id"), 10, 64)
+ switch ctx.Params(":action") {
+
+ case "recommend":
+ err = models.RecommendDataset(datasetId, true)
+ case "unrecommend":
+ err = models.RecommendDataset(datasetId, false)
+ }
+ if err != nil {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("repo.star_fail", ctx.Params(":action"))))
+ } else {
+ ctx.JSON(http.StatusOK, models.BaseOKMessage)
+ }
+}
+
func DeleteDataset(ctx *context.Context) {
dataset, err := models.GetDatasetByID(ctx.QueryInt64("id"))
if err != nil {
diff --git a/routers/api/v1/api.go b/routers/api/v1/api.go
index 2b070a4b8..3cc1a6693 100755
--- a/routers/api/v1/api.go
+++ b/routers/api/v1/api.go
@@ -535,6 +535,9 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/restoreFork", repo.RestoreForkNumber)
m.Get("/downloadAll", repo.ServeAllProjectsPeriodStatisticsFile)
m.Get("/downloadAllOpenI", repo.ServeAllProjectsOpenIStatisticsFile)
+ m.Get("/summary", repo.GetLatestProjectsSummaryData)
+ m.Get("/summary/period", repo.GetProjectsSummaryData)
+ m.Get("/summary/download", repo.GetProjectsSummaryDataFile)
m.Group("/project", func() {
m.Get("", repo.GetAllProjectsPeriodStatistics)
m.Get("/numVisit", repo.ProjectNumVisit)
diff --git a/routers/api/v1/repo/repo_dashbord.go b/routers/api/v1/repo/repo_dashbord.go
index 2c42f8a16..7ec315fd9 100644
--- a/routers/api/v1/repo/repo_dashbord.go
+++ b/routers/api/v1/repo/repo_dashbord.go
@@ -20,8 +20,10 @@ import (
const DEFAULT_PAGE_SIZE = 10
const DATE_FORMAT = "2006-01-02"
+const MONTH_FORMAT = "2006-01"
const EXCEL_DATE_FORMAT = "20060102"
const CREATE_TIME_FORMAT = "2006/01/02 15:04:05"
+const UPDATE_TIME_FORMAT = "2006-01-02 15:04:05"
type ProjectsPeriodData struct {
RecordBeginTime string `json:"recordBeginTime"`
@@ -60,6 +62,40 @@ type ProjectLatestData struct {
Top10 []UserInfo `json:"top10"`
}
+type ProjectSummaryBaseData struct {
+ NumReposAdd int64 `json:"numReposAdd"`
+ NumRepoPublicAdd int64 `json:"numRepoPublicAdd"`
+ NumRepoPrivateAdd int64 `json:"numRepoPrivateAdd"`
+ NumRepoForkAdd int64 `json:"numRepoForkAdd"`
+ NumRepoMirrorAdd int64 `json:"numRepoMirrorAdd"`
+ NumRepoSelfAdd int64 `json:"numRepoSelfAdd"`
+ NumRepos int64 `json:"numRepos"`
+ CreatTime string `json:"creatTime"`
+}
+
+type ProjectSummaryData struct {
+ ProjectSummaryBaseData
+ NumRepoPublic int64 `json:"numRepoPublic"`
+ NumRepoPrivate int64 `json:"numRepoPrivate"`
+ NumRepoFork int64 `json:"numRepoFork"`
+ NumRepoMirror int64 `json:"numRepoMirror"`
+ NumRepoSelf int64 `json:"numRepoSelf"`
+
+ NumRepoOrgAdd int64 `json:"numRepoOrgAdd"`
+ NumRepoNotOrgAdd int64 `json:"numRepoNotOrgAdd"`
+
+ NumRepoOrg int64 `json:"numRepoOrg"`
+ NumRepoNotOrg int64 `json:"numRepoNotOrg"`
+}
+
+type ProjectSummaryPeriodData struct {
+ RecordBeginTime string `json:"recordBeginTime"`
+ PageSize int `json:"pageSize"`
+ TotalPage int `json:"totalPage"`
+ TotalCount int64 `json:"totalCount"`
+ PageRecords []*ProjectSummaryBaseData `json:"pageRecords"`
+}
+
func RestoreForkNumber(ctx *context.Context) {
repos, err := models.GetAllRepositories()
if err != nil {
@@ -73,6 +109,144 @@ func RestoreForkNumber(ctx *context.Context) {
ctx.JSON(http.StatusOK, struct{}{})
}
+func GetLatestProjectsSummaryData(ctx *context.Context) {
+ stat, err := models.GetLatest2SummaryStatistic()
+ data := ProjectSummaryData{}
+ if err == nil && len(stat) > 0 {
+ data.NumRepos = stat[0].NumRepos
+ data.NumRepoOrg = stat[0].NumRepoOrg
+ data.NumRepoNotOrg = stat[0].NumRepos - stat[0].NumRepoOrg
+ data.NumRepoFork = stat[0].NumRepoFork
+ data.NumRepoMirror = stat[0].NumRepoMirror
+ data.NumRepoSelf = stat[0].NumRepoSelf
+ data.NumRepoPrivate = stat[0].NumRepoPrivate
+ data.NumRepoPublic = stat[0].NumRepoPublic
+ data.CreatTime = stat[0].CreatedUnix.Format(UPDATE_TIME_FORMAT)
+ if len(stat) == 2 {
+ data.NumReposAdd = stat[0].NumRepos - stat[1].NumRepos
+ data.NumRepoOrgAdd = stat[0].NumRepoOrg - stat[1].NumRepoOrg
+ data.NumRepoNotOrgAdd = (stat[0].NumRepos - stat[0].NumRepoOrg) - (stat[1].NumRepos - stat[1].NumRepoOrg)
+ data.NumRepoForkAdd = stat[0].NumRepoFork - stat[1].NumRepoFork
+ data.NumRepoMirrorAdd = stat[0].NumRepoMirror - stat[1].NumRepoMirror
+ data.NumRepoSelfAdd = stat[0].NumRepoSelf - stat[1].NumRepoSelf
+ data.NumRepoPrivateAdd = stat[0].NumRepoPrivate - stat[1].NumRepoPrivate
+ data.NumRepoPublicAdd = stat[0].NumRepoPublic - stat[1].NumRepoPublic
+ }
+ }
+ ctx.JSON(200, data)
+}
+
+func GetProjectsSummaryData(ctx *context.Context) {
+
+ var datas = make([]*ProjectSummaryBaseData, 0)
+
+ recordBeginTime, err := getRecordBeginTime()
+ if err != nil {
+ log.Error("Can not get record begin time", err)
+ ctx.Error(http.StatusBadRequest, ctx.Tr("repo.record_begintime_get_err"))
+ return
+ }
+ beginTime, endTime, err := getTimePeroid(ctx, recordBeginTime)
+
+ beginTime = beginTime.AddDate(0, 0, -1)
+
+ queryType := ctx.QueryTrim("type")
+
+ page := ctx.QueryInt("page")
+ if page <= 0 {
+ page = 1
+ }
+ pageSize := ctx.QueryInt("pagesize")
+ if pageSize <= 0 {
+ pageSize = DEFAULT_PAGE_SIZE
+ }
+ var count int64
+
+ if queryType == "all" || queryType == "current_year" {
+ dates := getEndOfMonthDates(beginTime, endTime)
+ count, _ = models.GetSummaryStatisticByDateCount(dates)
+ stats, err := models.GetSummaryStatisticByDates(dates, page, pageSize)
+ if err != nil {
+ log.Warn("can not get summary data", err)
+ } else {
+
+ for i, v := range stats {
+ if i == 0 {
+ continue
+ }
+ data := ProjectSummaryBaseData{}
+ setStatisticsData(&data, v, stats[i-1])
+ createTime, _ := time.Parse(DATE_FORMAT, v.Date)
+ data.CreatTime = createTime.Format(MONTH_FORMAT)
+ datas = append(datas, &data)
+ }
+ }
+
+ } else {
+ count, _ = models.GetSummaryStatisticByTimeCount(beginTime, endTime)
+ stats, err := models.GetSummaryStatisticByTime(beginTime, endTime, page, pageSize)
+ if err != nil {
+ log.Warn("can not get summary data", err)
+ } else {
+
+ for i, v := range stats {
+ if i == 0 {
+ continue
+ }
+ data := ProjectSummaryBaseData{}
+ setStatisticsData(&data, v, stats[i-1])
+ data.CreatTime = v.Date
+ datas = append(datas, &data)
+ }
+ }
+
+ }
+ projectSummaryPeriodData := ProjectSummaryPeriodData{
+ TotalCount: count - 1,
+ TotalPage: getTotalPage(count-1, pageSize),
+ RecordBeginTime: recordBeginTime.Format(DATE_FORMAT),
+ PageSize: pageSize,
+ PageRecords: datas,
+ }
+
+ ctx.JSON(200, projectSummaryPeriodData)
+
+}
+
+func setStatisticsData(data *ProjectSummaryBaseData, v *models.SummaryStatistic, stats *models.SummaryStatistic) {
+ data.NumReposAdd = v.NumRepos - stats.NumRepos
+ data.NumRepoPublicAdd = v.NumRepoPublic - stats.NumRepoPublic
+ data.NumRepoPrivateAdd = v.NumRepoPrivate - stats.NumRepoPrivate
+ data.NumRepoMirrorAdd = v.NumRepoMirror - stats.NumRepoMirror
+ data.NumRepoForkAdd = v.NumRepoFork - stats.NumRepoFork
+ data.NumRepoSelfAdd = v.NumRepoSelf - stats.NumRepoSelf
+
+ data.NumRepos = v.NumRepos
+}
+
+func getEndOfMonthDates(beginTime time.Time, endTime time.Time) []string {
+ var dates = []string{}
+ date := endOfMonth(beginTime.AddDate(0, -1, 0))
+ dates = append(dates, date.Format(DATE_FORMAT))
+
+ tempDate := endOfMonth(beginTime)
+
+ for {
+ if tempDate.Before(endTime) {
+ dates = append(dates, tempDate.Format(DATE_FORMAT))
+ tempDate = endOfMonth(tempDate.AddDate(0, 0, 1))
+ } else {
+ break
+ }
+ }
+
+ return dates
+}
+
+func endOfMonth(date time.Time) time.Time {
+ return date.AddDate(0, 1, -date.Day())
+}
+
func GetAllProjectsPeriodStatistics(ctx *context.Context) {
recordBeginTime, err := getRecordBeginTime()
@@ -210,6 +384,118 @@ func ServeAllProjectsPeriodStatisticsFile(ctx *context.Context) {
}
+func GetProjectsSummaryDataFile(ctx *context.Context) {
+
+ recordBeginTime, err := getRecordBeginTime()
+ if err != nil {
+ log.Error("Can not get record begin time", err)
+ ctx.Error(http.StatusBadRequest, ctx.Tr("repo.record_begintime_get_err"))
+ return
+ }
+ beginTime, endTime, err := getTimePeroid(ctx, recordBeginTime)
+ beginTime = beginTime.AddDate(0, 0, -1)
+ if err != nil {
+ log.Error("Parameter is wrong", err)
+ ctx.Error(http.StatusBadRequest, ctx.Tr("repo.parameter_is_wrong"))
+ return
+ }
+
+ page := ctx.QueryInt("page")
+ if page <= 0 {
+ page = 1
+ }
+ pageSize := 100
+
+ if err != nil {
+ log.Error("Can not query the last updated time.", err)
+ ctx.Error(http.StatusBadRequest, ctx.Tr("repo.last_update_time_error"))
+ return
+ }
+
+ var projectAnalysis = ctx.Tr("repo.repo_stat_develop")
+ fileName := getSummaryFileName(ctx, beginTime, endTime, projectAnalysis)
+
+ f := excelize.NewFile()
+
+ index := f.NewSheet(projectAnalysis)
+ f.DeleteSheet("Sheet1")
+
+ for k, v := range allProjectsPeriodSummaryHeader(ctx) {
+ f.SetCellValue(projectAnalysis, k, v)
+ }
+
+ var total int64
+ queryType := ctx.QueryTrim("type")
+ row := 2
+
+ if queryType == "all" || queryType == "current_year" {
+ dates := getEndOfMonthDates(beginTime, endTime)
+ total, _ = models.GetSummaryStatisticByDateCount(dates)
+ totalPage := getTotalPage(total, pageSize)
+
+ for i := 0; i < totalPage; i++ {
+
+ stats, err := models.GetSummaryStatisticByDates(dates, i+1, pageSize)
+ if err != nil {
+ log.Warn("can not get summary data", err)
+ } else {
+ for j, v := range stats {
+ if j == 0 {
+ continue
+ }
+ data := ProjectSummaryBaseData{}
+ setStatisticsData(&data, v, stats[j-1])
+ createTime, _ := time.Parse(DATE_FORMAT, v.Date)
+ data.CreatTime = createTime.Format(MONTH_FORMAT)
+
+ for k, v := range allProjectsPeriodSummaryValues(row, &data, ctx) {
+ f.SetCellValue(projectAnalysis, k, v)
+ }
+ row++
+ }
+
+ }
+
+ }
+
+ } else {
+ total, _ = models.GetSummaryStatisticByTimeCount(beginTime, endTime)
+ totalPage := getTotalPage(total, pageSize)
+
+ for i := 0; i < totalPage; i++ {
+
+ stats, err := models.GetSummaryStatisticByTime(beginTime, endTime, i+1, pageSize)
+ if err != nil {
+ log.Warn("can not get summary data", err)
+ } else {
+ for j, v := range stats {
+ if j == 0 {
+ continue
+ }
+ data := ProjectSummaryBaseData{}
+ setStatisticsData(&data, v, stats[j-1])
+ data.CreatTime = v.Date
+
+ for k, v := range allProjectsPeriodSummaryValues(row, &data, ctx) {
+ f.SetCellValue(projectAnalysis, k, v)
+ }
+ row++
+ }
+
+ }
+
+ }
+ }
+
+ f.SetActiveSheet(index)
+
+ ctx.Resp.Header().Set("Content-Disposition", "attachment; filename="+url.QueryEscape(fileName))
+ ctx.Resp.Header().Set("Content-Type", "application/octet-stream")
+
+ f.WriteTo(ctx.Resp)
+
+}
+
func ServeAllProjectsOpenIStatisticsFile(ctx *context.Context) {
page := ctx.QueryInt("page")
@@ -290,6 +576,20 @@ func getFileName(ctx *context.Context, beginTime time.Time, endTime time.Time, p
return frontName
}
+func getSummaryFileName(ctx *context.Context, beginTime time.Time, endTime time.Time, projectAnalysis string) string {
+ baseName := projectAnalysis + "_"
+
+ if ctx.QueryTrim("type") == "all" {
+ baseName = baseName + ctx.Tr("repo.all")
+ } else if ctx.QueryTrim("type") == "current_year" {
+ baseName = baseName + ctx.Tr("repo.current_year")
+ } else {
+ baseName = baseName + beginTime.Format(EXCEL_DATE_FORMAT) + "_" + endTime.AddDate(0, 0, -1).Format(EXCEL_DATE_FORMAT)
+ }
+ frontName := baseName + ".xlsx"
+ return frontName
+}
+
func allProjectsPeroidHeader(ctx *context.Context) map[string]string {
return map[string]string{"A1": ctx.Tr("admin.repos.id"), "B1": ctx.Tr("admin.repos.projectName"), "C1": ctx.Tr("repo.owner"), "D1": ctx.Tr("admin.repos.isPrivate"), "E1": ctx.Tr("admin.repos.openi"), "F1": ctx.Tr("admin.repos.visit"), "G1": ctx.Tr("admin.repos.download"), "H1": ctx.Tr("admin.repos.pr"), "I1": ctx.Tr("admin.repos.commit"),
@@ -297,6 +597,19 @@ func allProjectsPeroidHeader(ctx *context.Context) map[string]string {
}
+func allProjectsPeriodSummaryHeader(ctx *context.Context) map[string]string {
+
+ return map[string]string{"A1": ctx.Tr("repo.date"), "B1": ctx.Tr("repo.repo_add"), "C1": ctx.Tr("repo.repo_total"), "D1": ctx.Tr("repo.repo_public_add"), "E1": ctx.Tr("repo.repo_private_add"), "F1": ctx.Tr("repo.repo_self_add"), "G1": ctx.Tr("repo.repo_fork_add"), "H1": ctx.Tr("repo.repo_mirror_add")}
+
+}
+
+func allProjectsPeriodSummaryValues(row int, rs *ProjectSummaryBaseData, ctx *context.Context) map[string]string {
+
+ return map[string]string{getCellName("A", row): rs.CreatTime, getCellName("B", row): strconv.FormatInt(rs.NumReposAdd, 10), getCellName("C", row): strconv.FormatInt(rs.NumRepos, 10), getCellName("D", row): strconv.FormatInt(rs.NumRepoPublicAdd, 10), getCellName("E", row): strconv.FormatInt(rs.NumRepoPrivateAdd, 10),
+ getCellName("F", row): strconv.FormatInt(rs.NumRepoSelfAdd, 10), getCellName("G", row): strconv.FormatInt(rs.NumRepoForkAdd, 10), getCellName("H", row): strconv.FormatInt(rs.NumRepoMirrorAdd, 10),
+ }
+}
+
func allProjectsPeroidValues(row int, rs *models.RepoStatistic, ctx *context.Context) map[string]string {
return map[string]string{getCellName("A", row): strconv.FormatInt(rs.RepoID, 10), getCellName("B", row): rs.DisplayName(), getCellName("C", row): rs.OwnerName, getCellName("D", row): getBoolDisplay(rs.IsPrivate, ctx), getCellName("E", row): strconv.FormatFloat(rs.RadarTotal, 'f', 2, 64),
getCellName("F", row): strconv.FormatInt(rs.NumVisits, 10), getCellName("G", row): strconv.FormatInt(rs.NumDownloads, 10), getCellName("H", row): strconv.FormatInt(rs.NumPulls, 10), getCellName("I", row): strconv.FormatInt(rs.NumCommits, 10),
diff --git a/routers/home.go b/routers/home.go
index 324bb1032..5dec05ebe 100755
--- a/routers/home.go
+++ b/routers/home.go
@@ -331,6 +331,7 @@ func ExploreDatasets(ctx *context.Context) {
Task: task,
License: license,
OwnerID: ownerID,
+ RecommendOnly: ctx.QueryBool("recommend"),
ListOptions: models.ListOptions{
Page: page,
PageSize: 30,
@@ -357,6 +358,7 @@ func ExploreDatasets(ctx *context.Context) {
ctx.Data["Category"] = category
ctx.Data["Task"] = task
ctx.Data["License"] = license
+ ctx.Data["Recommend"] = ctx.QueryBool("recommend")
pager.SetDefaultParams(ctx)
ctx.Data["Page"] = pager
diff --git a/routers/repo/attachment.go b/routers/repo/attachment.go
index 3c66a3537..aa52a1400 100755
--- a/routers/repo/attachment.go
+++ b/routers/repo/attachment.go
@@ -11,7 +11,6 @@ import (
"fmt"
"mime/multipart"
"net/http"
- "path"
"strconv"
"strings"
@@ -830,20 +829,6 @@ func GetMultipartUploadUrl(ctx *context.Context) {
})
}
-func GetObsKey(ctx *context.Context) {
- uuid := gouuid.NewV4().String()
- key := strings.TrimPrefix(path.Join(setting.BasePath, path.Join(uuid[0:1], uuid[1:2], uuid, uuid)), "/")
-
- ctx.JSON(200, map[string]string{
- "uuid": uuid,
- "key": key,
- "access_key_id": setting.AccessKeyID,
- "secret_access_key": setting.SecretAccessKey,
- "server": setting.Endpoint,
- "bucket": setting.Bucket,
- })
-}
-
func CompleteMultipart(ctx *context.Context) {
uuid := ctx.Query("uuid")
uploadID := ctx.Query("uploadID")
@@ -870,13 +855,13 @@ func CompleteMultipart(ctx *context.Context) {
}
if typeCloudBrain == models.TypeCloudBrainOne {
- _, err = storage.CompleteMultiPartUpload(uuid, uploadID)
+ _, err = storage.CompleteMultiPartUpload(uuid, uploadID, fileChunk.TotalChunks)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteMultiPartUpload failed: %v", err))
return
}
} else {
- err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName)
+ err = storage.CompleteObsMultiPartUpload(uuid, uploadID, fileName, fileChunk.TotalChunks)
if err != nil {
ctx.Error(500, fmt.Sprintf("CompleteObsMultiPartUpload failed: %v", err))
return
@@ -907,10 +892,9 @@ func CompleteMultipart(ctx *context.Context) {
ctx.Error(500, fmt.Sprintf("InsertAttachment: %v", err))
return
}
-
+ attachment.UpdateDatasetUpdateUnix()
repository, _ := models.GetRepositoryByID(dataset.RepoID)
- notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(attachment.Type), attachment.Name, models.ActionUploadAttachment)
-
+ notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment)
if attachment.DatasetID != 0 {
if isCanDecompress(attachment.Name) {
if typeCloudBrain == models.TypeCloudBrainOne {
@@ -947,34 +931,6 @@ func CompleteMultipart(ctx *context.Context) {
})
}
-func UpdateMultipart(ctx *context.Context) {
- uuid := ctx.Query("uuid")
- partNumber := ctx.QueryInt("chunkNumber")
- etag := ctx.Query("etag")
-
- fileChunk, err := models.GetFileChunkByUUID(uuid)
- if err != nil {
- if models.IsErrFileChunkNotExist(err) {
- ctx.Error(404)
- } else {
- ctx.ServerError("GetFileChunkByUUID", err)
- }
- return
- }
-
- fileChunk.CompletedParts = append(fileChunk.CompletedParts, strconv.Itoa(partNumber)+"-"+strings.Replace(etag, "\"", "", -1))
-
- err = models.UpdateFileChunk(fileChunk)
- if err != nil {
- ctx.Error(500, fmt.Sprintf("UpdateFileChunk: %v", err))
- return
- }
-
- ctx.JSON(200, map[string]string{
- "result_code": "0",
- })
-}
-
func HandleUnDecompressAttachment() {
attachs, err := models.GetUnDecompressAttachments()
if err != nil {
diff --git a/routers/repo/cloudbrain.go b/routers/repo/cloudbrain.go
index 5daf96e78..4ef205af2 100755
--- a/routers/repo/cloudbrain.go
+++ b/routers/repo/cloudbrain.go
@@ -59,6 +59,7 @@ var (
)
const BENCHMARK_TYPE_CODE = "repo.cloudbrain.benchmark.types"
+const CLONE_FILE_PREFIX = "file:///"
var benchmarkTypesMap = make(map[string]*models.BenchmarkTypes, 0)
@@ -702,6 +703,53 @@ func CloudBrainCommitImageCheck(ctx *context.Context, form auth.CommitImageCloud
}
+func CloudBrainAdminCommitImage(ctx *context.Context, form auth.CommitAdminImageCloudBrainForm) {
+
+ if !NamePattern.MatchString(form.Tag) {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.title_format_err")))
+ return
+ }
+
+ if utf8.RuneCountInString(form.Description) > 255 {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr("dataset.description_format_err", 255)))
+ return
+ }
+
+ validTopics, errMessage := checkTopics(form.Topics)
+ if errMessage != "" {
+ ctx.JSON(http.StatusOK, models.BaseErrorMessage(ctx.Tr(errMessage)))
+ return
+ }
+
+ err := cloudbrain.CommitAdminImage(models.CommitImageParams{
+ CommitImageCloudBrainParams: models.CommitImageCloudBrainParams{
+ ImageDescription: form.Description,
+ ImageTag: form.Tag,
+ },
+ IsPrivate: form.IsPrivate,
+ CloudBrainType: form.Type,
+ Topics: validTopics,
+ UID: ctx.User.ID,
+ Type: models.GetRecommondType(form.IsRecommend),
+ Place: form.Place,
+ })
+ if err != nil {
+ log.Error("CommitImagefailed")
+ if models.IsErrImageTagExist(err) {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_exist")))
+
+ } else if models.IsErrorImageCommitting(err) {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_committing")))
+ } else {
+ ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_commit_fail")))
+ }
+
+ return
+ }
+
+ ctx.JSON(200, models.BaseOKMessage)
+}
+
func CloudBrainCommitImage(ctx *context.Context, form auth.CommitImageCloudBrainForm) {
if !NamePattern.MatchString(form.Tag) {
@@ -1142,7 +1190,8 @@ func GetRate(ctx *context.Context) {
}
func downloadCode(repo *models.Repository, codePath, branchName string) error {
- if err := git.Clone(repo.RepoPath(), codePath, git.CloneRepoOptions{Branch: branchName}); err != nil {
+ //add "file:///" prefix to make the depth valid
+ if err := git.Clone(CLONE_FILE_PREFIX+repo.RepoPath(), codePath, git.CloneRepoOptions{Branch: branchName, Depth: 1}); err != nil {
log.Error("Failed to clone repository: %s (%v)", repo.FullName(), err)
return err
}
@@ -1202,7 +1251,7 @@ func downloadRateCode(repo *models.Repository, taskName, rateOwnerName, rateRepo
return err
}
- if err := git.Clone(repoExt.RepoPath(), codePath, git.CloneRepoOptions{}); err != nil {
+ if err := git.Clone(CLONE_FILE_PREFIX+repoExt.RepoPath(), codePath, git.CloneRepoOptions{Depth: 1}); err != nil {
log.Error("Failed to clone repository: %s (%v)", repoExt.FullName(), err)
return err
}
diff --git a/routers/repo/dataset.go b/routers/repo/dataset.go
index 1a3762be3..73036a2cc 100755
--- a/routers/repo/dataset.go
+++ b/routers/repo/dataset.go
@@ -358,6 +358,7 @@ func MyDatasets(ctx *context.Context) {
NeedIsPrivate: false,
JustNeedZipFile: true,
NeedRepoInfo: true,
+ RecommendOnly: ctx.QueryBool("recommend"),
})
if err != nil {
ctx.ServerError("datasets", err)
@@ -398,6 +399,7 @@ func PublicDataset(ctx *context.Context) {
Type: cloudbrainType,
JustNeedZipFile: true,
NeedRepoInfo: true,
+ RecommendOnly: ctx.QueryBool("recommend"),
})
if err != nil {
ctx.ServerError("datasets", err)
@@ -454,6 +456,7 @@ func MyFavoriteDataset(ctx *context.Context) {
Type: cloudbrainType,
JustNeedZipFile: true,
NeedRepoInfo: true,
+ RecommendOnly: ctx.QueryBool("recommend"),
})
if err != nil {
ctx.ServerError("datasets", err)
diff --git a/routers/repo/modelarts.go b/routers/repo/modelarts.go
index 318726e8e..b713f385f 100755
--- a/routers/repo/modelarts.go
+++ b/routers/repo/modelarts.go
@@ -247,7 +247,9 @@ func Notebook2Create(ctx *context.Context, form auth.CreateModelArtsNotebookForm
func NotebookShow(ctx *context.Context) {
ctx.Data["PageIsCloudBrain"] = true
debugListType := ctx.Query("debugListType")
-
+ if debugListType == "" {
+ debugListType = "all"
+ }
var ID = ctx.Params(":id")
task, err := models.GetCloudbrainByIDWithDeleted(ID)
if err != nil {
@@ -1027,10 +1029,8 @@ func TrainJobCreate(ctx *context.Context, form auth.CreateModelArtsTrainJobForm)
gitRepo, _ := git.OpenRepository(repo.RepoPath())
commitID, _ := gitRepo.GetBranchCommitID(branch_name)
- if err := git.Clone(repo.RepoPath(), codeLocalPath, git.CloneRepoOptions{
- Branch: branch_name,
- }); err != nil {
- log.Error("Create task failed, server timed out: %s (%v)", repo.FullName(), err)
+ if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
+ log.Error("downloadCode failed, server timed out: %s (%v)", repo.FullName(), err)
trainJobErrorNewDataPrepare(ctx, form)
ctx.RenderWithErr("Create task failed, server timed out", tplModelArtsTrainJobNew, &form)
return
@@ -1245,9 +1245,7 @@ func TrainJobCreateVersion(ctx *context.Context, form auth.CreateModelArtsTrainJ
gitRepo, _ := git.OpenRepository(repo.RepoPath())
commitID, _ := gitRepo.GetBranchCommitID(branch_name)
- if err := git.Clone(repo.RepoPath(), codeLocalPath, git.CloneRepoOptions{
- Branch: branch_name,
- }); err != nil {
+ if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
log.Error("Failed git clone repo to local(!: %s (%v)", repo.FullName(), err)
versionErrorDataPrepare(ctx, form)
ctx.RenderWithErr("Failed git clone repo to local!", tplModelArtsTrainJobVersionNew, &form)
@@ -1874,9 +1872,7 @@ func InferenceJobCreate(ctx *context.Context, form auth.CreateModelArtsInference
gitRepo, _ := git.OpenRepository(repo.RepoPath())
commitID, _ := gitRepo.GetBranchCommitID(branch_name)
- if err := git.Clone(repo.RepoPath(), codeLocalPath, git.CloneRepoOptions{
- Branch: branch_name,
- }); err != nil {
+ if err := downloadCode(repo, codeLocalPath, branch_name); err != nil {
log.Error("Create task failed, server timed out: %s (%v)", repo.FullName(), err)
inferenceJobErrorNewDataPrepare(ctx, form)
ctx.RenderWithErr("Create task failed, server timed out", tplModelArtsInferenceJobNew, &form)
diff --git a/routers/repo/repo_summary_statistic.go b/routers/repo/repo_summary_statistic.go
index 3af31737c..65ba2cf0b 100644
--- a/routers/repo/repo_summary_statistic.go
+++ b/routers/repo/repo_summary_statistic.go
@@ -60,6 +60,12 @@ func SummaryStatisticDaily(date string) {
}
selfRepositoryNumber := repositoryNumer - mirrorRepositoryNumber - forkRepositoryNumber
+ organizationRepoNumber, err := models.GetAllOrgRepositoriesCount()
+ if err != nil {
+ log.Error("can not get org repository number", err)
+ organizationRepoNumber = 0
+ }
+
//repository size
repositorySize, err := models.GetAllRepositoriesSize()
if err != nil {
@@ -99,6 +105,7 @@ func SummaryStatisticDaily(date string) {
NumRepoPrivate: privateRepositoryNumer,
NumRepoPublic: publicRepositoryNumer,
NumRepoSelf: selfRepositoryNumber,
+ NumRepoOrg: organizationRepoNumber,
NumRepoBigModel: topicsCount[0],
NumRepoAI: topicsCount[1],
NumRepoVision: topicsCount[2],
diff --git a/routers/repo/user_data_analysis.go b/routers/repo/user_data_analysis.go
index 8bc9dc458..995465b09 100755
--- a/routers/repo/user_data_analysis.go
+++ b/routers/repo/user_data_analysis.go
@@ -40,8 +40,8 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac
dataHeader := map[string]string{
"A1": ctx.Tr("user.static.id"),
"B1": ctx.Tr("user.static.name"),
- "C1": ctx.Tr("user.static.codemergecount"),
- "D1": ctx.Tr("user.static.UserIndex"),
+ "C1": ctx.Tr("user.static.UserIndex"),
+ "D1": ctx.Tr("user.static.codemergecount"),
"E1": ctx.Tr("user.static.commitcount"),
"F1": ctx.Tr("user.static.issuecount"),
"G1": ctx.Tr("user.static.commentcount"),
@@ -77,8 +77,8 @@ func queryUserDataPage(ctx *context.Context, tableName string, queryObj interfac
rows := fmt.Sprint(row)
xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID)
xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name)
- xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount)
- xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
+ xlsx.SetCellValue(sheetName, "C"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
+ xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CodeMergeCount)
xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount)
xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount)
xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount)
@@ -239,8 +239,8 @@ func QueryUserStaticDataPage(ctx *context.Context) {
dataHeader := map[string]string{
"A1": ctx.Tr("user.static.id"),
"B1": ctx.Tr("user.static.name"),
- "C1": ctx.Tr("user.static.codemergecount"),
- "D1": ctx.Tr("user.static.UserIndex"),
+ "C1": ctx.Tr("user.static.UserIndex"),
+ "D1": ctx.Tr("user.static.codemergecount"),
"E1": ctx.Tr("user.static.commitcount"),
"F1": ctx.Tr("user.static.issuecount"),
"G1": ctx.Tr("user.static.commentcount"),
@@ -270,8 +270,8 @@ func QueryUserStaticDataPage(ctx *context.Context) {
xlsx.SetCellValue(sheetName, "A"+rows, userRecord.ID)
xlsx.SetCellValue(sheetName, "B"+rows, userRecord.Name)
- xlsx.SetCellValue(sheetName, "C"+rows, userRecord.CodeMergeCount)
- xlsx.SetCellValue(sheetName, "D"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
+ xlsx.SetCellValue(sheetName, "C"+rows, fmt.Sprintf("%.2f", userRecord.UserIndex))
+ xlsx.SetCellValue(sheetName, "D"+rows, userRecord.CodeMergeCount)
xlsx.SetCellValue(sheetName, "E"+rows, userRecord.CommitCount)
xlsx.SetCellValue(sheetName, "F"+rows, userRecord.IssueCount)
xlsx.SetCellValue(sheetName, "G"+rows, userRecord.CommentCount)
diff --git a/routers/routes/routes.go b/routers/routes/routes.go
index 9df429e8b..8929666e5 100755
--- a/routers/routes/routes.go
+++ b/routers/routes/routes.go
@@ -525,6 +525,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/datasets", func() {
m.Get("", admin.Datasets)
+ m.Put("/:id/action/:action", admin.DatasetAction)
// m.Post("/delete", admin.DeleteDataset)
})
m.Group("/cloudbrains", func() {
@@ -534,6 +535,8 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/images", func() {
m.Get("", admin.Images)
m.Get("/data", repo.GetAllImages)
+ m.Get("/commit_image", admin.CloudBrainCommitImageShow)
+ m.Post("/commit_image", bindIgnErr(auth.CommitAdminImageCloudBrainForm{}), repo.CloudBrainAdminCommitImage)
})
m.Put("/image/:id/action/:action", image.Action)
@@ -608,12 +611,11 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Put("/obs_proxy_multipart", repo.PutOBSProxyUpload)
m.Get("/obs_proxy_download", repo.GetOBSProxyDownload)
m.Get("/get_multipart_url", repo.GetMultipartUploadUrl)
- m.Post("/complete_multipart", repo.CompleteMultipart)
- m.Post("/update_chunk", repo.UpdateMultipart)
}, reqSignIn)
m.Group("/attachments", func() {
m.Post("/decompress_done_notify", repo.UpdateAttachmentDecompressState)
+ m.Post("/complete_multipart", repo.CompleteMultipart)
})
m.Group("/attachments", func() {
diff --git a/routers/search.go b/routers/search.go
index 1cf78666e..fe1643c80 100644
--- a/routers/search.go
+++ b/routers/search.go
@@ -183,7 +183,7 @@ func searchRepoByLabel(ctx *context.Context, Key string, Page int, PageSize int)
topicsQuery := elastic.NewMatchQuery("topics", Key)
boolQ.Should(topicsQuery)
- res, err := client.Search("repository-es-index").Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("topics")).Do(ctx.Req.Context())
+ res, err := client.Search("repository-es-index").Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Highlight(queryHighlight("topics")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -200,15 +200,18 @@ func searchRepoByLabel(ctx *context.Context, Key string, Page int, PageSize int)
}
}
-func getSort(SortBy string, ascending bool) elastic.Sorter {
- var sort elastic.Sorter
- sort = elastic.NewScoreSort()
- if SortBy != "" {
- if SortBy == "default" {
- return sort
+func getSort(SortBy string, ascending bool, secondSortBy string, secondAscending bool) []elastic.Sorter {
+ sort := make([]elastic.Sorter, 0)
+ if SortBy == "default" || SortBy == "" {
+ sort = append(sort, elastic.NewScoreSort())
+ if secondSortBy != "" {
+ log.Info("SortBy=" + SortBy + " secondSortBy=" + secondSortBy)
+ sort = append(sort, elastic.NewFieldSort(secondSortBy).Order(secondAscending))
}
- return elastic.NewFieldSort(SortBy).Order(ascending)
+ } else {
+ sort = append(sort, elastic.NewFieldSort(SortBy).Order(ascending))
}
+ log.Info("sort size=" + fmt.Sprint(len(sort)))
return sort
}
@@ -308,7 +311,7 @@ func searchRepo(ctx *context.Context, TableName string, Key string, Page int, Pa
topicsQuery := elastic.NewMatchQuery("topics", Key).Boost(1).QueryName("f_third")
boolQ.Should(nameQuery, descriptionQuery, topicsQuery)
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("alias", "description", "topics")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "num_stars", false)...).From(from).Size(Size).Highlight(queryHighlight("alias", "description", "topics")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -330,7 +333,7 @@ func searchRepo(ctx *context.Context, TableName string, Key string, Page int, Pa
} else {
log.Info("query all content.")
//搜索的属性要指定{"timestamp":{"unmapped_type":"date"}}
- res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -691,7 +694,7 @@ func searchUserOrOrg(ctx *context.Context, TableName string, Key string, Page in
boolQ.Must(UserOrOrgQuery)
}
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From((Page - 1) * PageSize).Size(PageSize).Highlight(queryHighlight("name", "full_name", "description")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From((Page - 1) * PageSize).Size(PageSize).Highlight(queryHighlight("name", "full_name", "description")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -849,7 +852,7 @@ func searchDataSet(ctx *context.Context, TableName string, Key string, Page int,
fileNameQuery := elastic.NewMatchQuery("file_name", Key).Boost(1).QueryName("f_third")
categoryQuery := elastic.NewMatchQuery("category", Key).Boost(1).QueryName("f_fourth")
boolQ.Should(nameQuery, descQuery, categoryQuery, fileNameQuery)
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("title", "description", "file_name", "category")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Highlight(queryHighlight("title", "description", "file_name", "category")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -864,7 +867,7 @@ func searchDataSet(ctx *context.Context, TableName string, Key string, Page int,
} else {
log.Info("query all datasets.")
//搜索的属性要指定{"timestamp":{"unmapped_type":"date"}}
- res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
@@ -1057,7 +1060,7 @@ func searchIssueOrPr(ctx *context.Context, TableName string, Key string, Page in
boolQ.Must(isIssueQuery)
}
- res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending)).From(from).Size(Size).Highlight(queryHighlight("name", "content", "comment")).Do(ctx.Req.Context())
+ res, err := client.Search(TableName).Query(boolQ).SortBy(getSort(SortBy, ascending, "updated_unix.keyword", false)...).From(from).Size(Size).Highlight(queryHighlight("name", "content", "comment")).Do(ctx.Req.Context())
if err == nil {
searchJson, _ := json.Marshal(res)
log.Info("searchJson=" + string(searchJson))
diff --git a/routers/user/profile.go b/routers/user/profile.go
index 41d8561d6..f82c03a75 100755
--- a/routers/user/profile.go
+++ b/routers/user/profile.go
@@ -106,9 +106,9 @@ func Profile(ctx *context.Context) {
for _, org := range orgs {
_, repoCount, err := models.SearchRepository(&models.SearchRepoOptions{
- OwnerID: org.ID,
- Private: ctx.IsSigned,
- Actor: ctx.User,
+ OwnerID: org.ID,
+ Private: ctx.IsSigned,
+ Actor: ctx.User,
})
if err != nil {
ctx.ServerError("SearchRepository", err)
@@ -175,6 +175,8 @@ func Profile(ctx *context.Context) {
orderBy = models.SearchOrderByAlphabeticallyReverse
case "alphabetically":
orderBy = models.SearchOrderByAlphabetically
+ case "downloadtimes":
+ orderBy = models.SearchOrderByDownloadTimes
case "moststars":
orderBy = models.SearchOrderByStarsReverse
case "feweststars":
diff --git a/services/socketwrap/clientManager.go b/services/socketwrap/clientManager.go
index 61f356a66..6ffa96933 100755
--- a/services/socketwrap/clientManager.go
+++ b/services/socketwrap/clientManager.go
@@ -10,7 +10,7 @@ import (
"github.com/elliotchance/orderedmap"
)
-var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 25, 26, 27, 28, 29, 30, 31}
+var opTypes = []int{1, 2, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 17, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
type ClientsManager struct {
Clients *orderedmap.OrderedMap
diff --git a/templates/admin/cloudbrain/imagecommit.tmpl b/templates/admin/cloudbrain/imagecommit.tmpl
new file mode 100644
index 000000000..e504f08b0
--- /dev/null
+++ b/templates/admin/cloudbrain/imagecommit.tmpl
@@ -0,0 +1,129 @@
+
+{{template "base/head" .}}
+
{{.i18n.Tr "repo.image_overwrite"}}
+{{.i18n.Tr "dataset.settings.delete_desc"}}
- {{.i18n.Tr "dataset.settings.delete_notices_2" `` | Safe}}- {{ file_status_text }} - {{ status }} -
-说明:
- - 只有zip格式的数据集才能发起云脑任务;
- - 云脑1提供 CPU / GPU 资源,云脑2提供 Ascend NPU 资源;调试使用的数据集也需要上传到对应的环境。
-