| @@ -570,11 +570,12 @@ type SpecialPools struct { | |||||
| Pools []*SpecialPool `json:"pools"` | Pools []*SpecialPool `json:"pools"` | ||||
| } | } | ||||
| type SpecialPool struct { | type SpecialPool struct { | ||||
| Org string `json:"org"` | |||||
| Type string `json:"type"` | |||||
| IsExclusive bool `json:"isExclusive"` | |||||
| Pool []*GpuInfo `json:"pool"` | |||||
| JobType []string `json:"jobType"` | |||||
| Org string `json:"org"` | |||||
| Type string `json:"type"` | |||||
| IsExclusive bool `json:"isExclusive"` | |||||
| Pool []*GpuInfo `json:"pool"` | |||||
| JobType []string `json:"jobType"` | |||||
| ResourceSpec []*ResourceSpec `json:"resourceSpecs"` | |||||
| } | } | ||||
| type ImageInfosModelArts struct { | type ImageInfosModelArts struct { | ||||
| @@ -211,6 +211,42 @@ func setKeyContributerDict(contributorDistinctDict map[string]int, email string, | |||||
| } | } | ||||
| } | } | ||||
| func GetAllUserPublicRepoKPIStats(startTime time.Time, endTime time.Time) (map[string]*git.UserKPIStats, error) { | |||||
| authors := make(map[string]*git.UserKPIStats) | |||||
| repositorys, err := GetAllRepositoriesByFilterCols("owner_name", "name", "is_private") | |||||
| if err != nil { | |||||
| return nil, err | |||||
| } | |||||
| for _, repository := range repositorys { | |||||
| if repository.IsPrivate { | |||||
| continue | |||||
| } | |||||
| authorsOneRepo, err1 := git.GetUserKPIStats(repository.RepoPath(), startTime, endTime) | |||||
| if err1 != nil { | |||||
| log.Warn("get user kpi status err:"+repository.RepoPath(), err1.Error()) | |||||
| continue | |||||
| } | |||||
| for key, value := range authorsOneRepo { | |||||
| if _, ok := authors[key]; !ok { | |||||
| authors[key] = &git.UserKPIStats{ | |||||
| Name: value.Name, | |||||
| Email: value.Email, | |||||
| Commits: 0, | |||||
| CommitLines: 0, | |||||
| } | |||||
| } | |||||
| authors[key].Commits += value.Commits | |||||
| authors[key].CommitLines += value.CommitLines | |||||
| } | |||||
| } | |||||
| return authors, nil | |||||
| } | |||||
| func GetAllUserKPIStats(startTime time.Time, endTime time.Time) (map[string]*git.UserKPIStats, error) { | func GetAllUserKPIStats(startTime time.Time, endTime time.Time) (map[string]*git.UserKPIStats, error) { | ||||
| authors := make(map[string]*git.UserKPIStats) | authors := make(map[string]*git.UserKPIStats) | ||||
| repositorys, err := GetAllRepositoriesByFilterCols("owner_name", "name") | repositorys, err := GetAllRepositoriesByFilterCols("owner_name", "name") | ||||
| @@ -0,0 +1,437 @@ | |||||
| package models | |||||
| import ( | |||||
| "fmt" | |||||
| "time" | |||||
| "code.gitea.io/gitea/modules/log" | |||||
| "code.gitea.io/gitea/modules/timeutil" | |||||
| ) | |||||
| type UserBusinessAnalysisForActivity struct { | |||||
| ID int64 `xorm:"pk"` | |||||
| CountDate int64 `xorm:"pk"` | |||||
| //action :ActionMergePullRequest // 11 | |||||
| CodeMergeCount int `xorm:"NOT NULL DEFAULT 0"` | |||||
| //action :ActionCommitRepo | |||||
| CommitCount int `xorm:"NOT NULL DEFAULT 0"` | |||||
| //issue // 10 | |||||
| IssueCount int `xorm:"NOT NULL DEFAULT 0"` | |||||
| //comment table current date | |||||
| CommentCount int `xorm:"NOT NULL DEFAULT 0"` | |||||
| //follow table | |||||
| WatchedCount int `xorm:"NOT NULL DEFAULT 0"` | |||||
| CommitCodeSize int `xorm:"NOT NULL DEFAULT 0"` | |||||
| //issue, issueassigees | |||||
| SolveIssueCount int `xorm:"NOT NULL DEFAULT 0"` | |||||
| //use | |||||
| RegistDate timeutil.TimeStamp `xorm:"NOT NULL"` | |||||
| //user | |||||
| Email string `xorm:"NOT NULL"` | |||||
| Phone string `xorm:"NULL"` | |||||
| //user | |||||
| Name string `xorm:"NOT NULL"` | |||||
| DataDate string `xorm:"NULL"` | |||||
| CloudBrainTaskNum int `xorm:"NOT NULL DEFAULT 0"` | |||||
| CommitDatasetNum int `xorm:"NOT NULL DEFAULT 0"` | |||||
| //0 | |||||
| CommitModelCount int `xorm:"NOT NULL DEFAULT 0"` | |||||
| } | |||||
| func QueryDataForActivity(startTime time.Time, endTime time.Time) []*UserBusinessAnalysisForActivity { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| result := make([]*UserBusinessAnalysisForActivity, 0) | |||||
| publicRepo := queryPublicRepo() | |||||
| start_unix := startTime.Unix() | |||||
| end_unix := endTime.Unix() | |||||
| CodeMergeCountMap := queryPullRequestPublic(start_unix, end_unix, publicRepo) | |||||
| CommitCodeSizeMap, err := GetAllUserPublicRepoKPIStats(startTime, endTime) | |||||
| if err != nil { | |||||
| log.Info("error,info=" + err.Error()) | |||||
| } | |||||
| CommitCountMap := queryCommitActionPublic(start_unix, end_unix, 5, publicRepo) | |||||
| IssueCountMap, publicRepoIssueIdMap := queryCreateIssuePublic(start_unix, end_unix, publicRepo) | |||||
| SolveIssueCountMap := querySolveIssuePublic(start_unix, end_unix, publicRepoIssueIdMap) | |||||
| WatchedCountMap, _ := queryFollow(start_unix, end_unix) | |||||
| CommentCountMap := queryCommentPublic(start_unix, end_unix, publicRepoIssueIdMap) | |||||
| PublicDataSet := queryAllPublicDataSet(publicRepo) | |||||
| DatasetFileNums := queryPublicDatasetFileNums(start_unix, end_unix, PublicDataSet) | |||||
| AiModelManageMap := queryUserModelPublic(start_unix, end_unix, publicRepo) | |||||
| cond := "type != 1 and is_active=true" | |||||
| count, err := sess.Where(cond).Count(new(User)) | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| sess.Select("`user`.*").Table("user").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| userList := make([]*User, 0) | |||||
| sess.Find(&userList) | |||||
| for i, userRecord := range userList { | |||||
| var dateRecord UserBusinessAnalysisForActivity | |||||
| dateRecord.ID = userRecord.ID | |||||
| log.Info("i=" + fmt.Sprint(i) + " userName=" + userRecord.Name) | |||||
| dateRecord.Email = userRecord.Email | |||||
| dateRecord.Phone = userRecord.PhoneNumber | |||||
| dateRecord.RegistDate = userRecord.CreatedUnix | |||||
| dateRecord.Name = userRecord.Name | |||||
| dateRecord.CodeMergeCount = getMapValue(dateRecord.ID, CodeMergeCountMap) | |||||
| dateRecord.CommitCount = getMapValue(dateRecord.ID, CommitCountMap) | |||||
| dateRecord.WatchedCount = getMapValue(dateRecord.ID, WatchedCountMap) | |||||
| dateRecord.CommitDatasetNum = getMapValue(dateRecord.ID, DatasetFileNums) | |||||
| dateRecord.IssueCount = getMapValue(dateRecord.ID, IssueCountMap) | |||||
| dateRecord.CommentCount = getMapValue(dateRecord.ID, CommentCountMap) | |||||
| if _, ok := CommitCodeSizeMap[dateRecord.Email]; !ok { | |||||
| dateRecord.CommitCodeSize = 0 | |||||
| } else { | |||||
| dateRecord.CommitCodeSize = int(CommitCodeSizeMap[dateRecord.Email].CommitLines) | |||||
| } | |||||
| dateRecord.SolveIssueCount = getMapValue(dateRecord.ID, SolveIssueCountMap) | |||||
| dateRecord.CommitModelCount = getMapValue(dateRecord.ID, AiModelManageMap) | |||||
| result = append(result, &dateRecord) | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return result | |||||
| } | |||||
| func querySolveIssuePublic(start_unix int64, end_unix int64, publicRepoIssueIdMap map[int64]int) map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| resultMap := make(map[int64]int) | |||||
| cond := "issue.is_closed=true and issue.closed_unix>=" + fmt.Sprint(start_unix) + " and issue.closed_unix<=" + fmt.Sprint(end_unix) | |||||
| count, err := sess.Table("issue_assignees").Join("inner", "issue", "issue.id=issue_assignees.issue_id").Where(cond).Count(new(IssueAssignees)) | |||||
| if err != nil { | |||||
| log.Info("query issue error. return.") | |||||
| return resultMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| issueAssigneesList := make([]*IssueAssignees, 0) | |||||
| sess.Select("issue_assignees.*").Table("issue_assignees"). | |||||
| Join("inner", "issue", "issue.id=issue_assignees.issue_id"). | |||||
| Where(cond).OrderBy("issue_assignees.id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| sess.Find(&issueAssigneesList) | |||||
| log.Info("query IssueAssignees size=" + fmt.Sprint(len(issueAssigneesList))) | |||||
| for _, issueAssigneesRecord := range issueAssigneesList { | |||||
| if isPublicRepo(issueAssigneesRecord.IssueID, publicRepoIssueIdMap) { | |||||
| if _, ok := resultMap[issueAssigneesRecord.AssigneeID]; !ok { | |||||
| resultMap[issueAssigneesRecord.AssigneeID] = 1 | |||||
| } else { | |||||
| resultMap[issueAssigneesRecord.AssigneeID] += 1 | |||||
| } | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultMap | |||||
| } | |||||
| func queryPublicRepo() map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| resultMap := make(map[int64]int) | |||||
| count, err := sess.Table("repository").Count(new(Repository)) | |||||
| if err != nil { | |||||
| log.Info("query Repository error. return.") | |||||
| return resultMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| repositoryList := make([]*Repository, 0) | |||||
| sess.Select("*").Table("repository").OrderBy("id desc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| sess.Find(&repositoryList) | |||||
| log.Info("query repo size=" + fmt.Sprint(len(repositoryList))) | |||||
| for _, repositoryRecord := range repositoryList { | |||||
| if repositoryRecord.IsPrivate { | |||||
| continue | |||||
| } | |||||
| if _, ok := resultMap[repositoryRecord.ID]; !ok { | |||||
| resultMap[repositoryRecord.ID] = 1 | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultMap | |||||
| } | |||||
| func isPublicRepo(repoId int64, publicAllRepo map[int64]int) bool { | |||||
| if _, ok := publicAllRepo[repoId]; !ok { | |||||
| return false | |||||
| } | |||||
| return true | |||||
| } | |||||
| func queryPullRequestPublic(start_unix int64, end_unix int64, publicAllRepo map[int64]int) map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| resultMap := make(map[int64]int) | |||||
| cond := "pull_request.merged_unix>=" + fmt.Sprint(start_unix) + " and pull_request.merged_unix<=" + fmt.Sprint(end_unix) | |||||
| count, err := sess.Table("issue").Join("inner", "pull_request", "issue.id=pull_request.issue_id").Where(cond).Count(new(Issue)) | |||||
| if err != nil { | |||||
| log.Info("query issue error. return.") | |||||
| return resultMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| issueList := make([]*Issue, 0) | |||||
| sess.Select("issue.*").Table("issue").Join("inner", "pull_request", "issue.id=pull_request.issue_id").Where(cond).OrderBy("issue.id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| sess.Find(&issueList) | |||||
| log.Info("query issue(PR) size=" + fmt.Sprint(len(issueList))) | |||||
| for _, issueRecord := range issueList { | |||||
| if isPublicRepo(issueRecord.RepoID, publicAllRepo) { | |||||
| if _, ok := resultMap[issueRecord.PosterID]; !ok { | |||||
| resultMap[issueRecord.PosterID] = 1 | |||||
| } else { | |||||
| resultMap[issueRecord.PosterID] += 1 | |||||
| } | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultMap | |||||
| } | |||||
| func queryCommitActionPublic(start_unix int64, end_unix int64, actionType int64, publicAllRepo map[int64]int) map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| resultMap := make(map[int64]int) | |||||
| cond := "user_id=act_user_id and op_type=" + fmt.Sprint(actionType) + " and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) | |||||
| count, err := sess.Where(cond).Count(new(Action)) | |||||
| if err != nil { | |||||
| log.Info("query action error. return.") | |||||
| return resultMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| sess.Select("id,user_id,op_type,act_user_id,repo_id").Table("action").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| actionList := make([]*Action, 0) | |||||
| sess.Find(&actionList) | |||||
| log.Info("query action size=" + fmt.Sprint(len(actionList))) | |||||
| for _, actionRecord := range actionList { | |||||
| if isPublicRepo(actionRecord.RepoID, publicAllRepo) { | |||||
| if _, ok := resultMap[actionRecord.UserID]; !ok { | |||||
| resultMap[actionRecord.UserID] = 1 | |||||
| } else { | |||||
| resultMap[actionRecord.UserID] += 1 | |||||
| } | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultMap | |||||
| } | |||||
| func queryCreateIssuePublic(start_unix int64, end_unix int64, publicAllRepo map[int64]int) (map[int64]int, map[int64]int) { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| resultMap := make(map[int64]int) | |||||
| publicRepoIssueIdMap := make(map[int64]int) | |||||
| cond := "is_pull=false and created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) | |||||
| count, err := sess.Where(cond).Count(new(Issue)) | |||||
| if err != nil { | |||||
| log.Info("query Issue error. return.") | |||||
| return resultMap, publicRepoIssueIdMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| sess.Select("id,poster_id,repo_id").Table("issue").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| issueList := make([]*Issue, 0) | |||||
| sess.Find(&issueList) | |||||
| log.Info("query issue size=" + fmt.Sprint(len(issueList))) | |||||
| for _, issueRecord := range issueList { | |||||
| if isPublicRepo(issueRecord.RepoID, publicAllRepo) { | |||||
| if _, ok := resultMap[issueRecord.PosterID]; !ok { | |||||
| resultMap[issueRecord.PosterID] = 1 | |||||
| } else { | |||||
| resultMap[issueRecord.PosterID] += 1 | |||||
| } | |||||
| publicRepoIssueIdMap[issueRecord.ID] = 1 | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultMap, publicRepoIssueIdMap | |||||
| } | |||||
| func queryCommentPublic(start_unix int64, end_unix int64, publicRepoIssueIdMap map[int64]int) map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| cond := "created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) | |||||
| resultMap := make(map[int64]int) | |||||
| count, err := sess.Where(cond).Count(new(Comment)) | |||||
| if err != nil { | |||||
| log.Info("query Comment error. return.") | |||||
| return resultMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| sess.Select("id,type,poster_id").Table("comment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| commentList := make([]*Comment, 0) | |||||
| sess.Find(&commentList) | |||||
| log.Info("query Comment size=" + fmt.Sprint(len(commentList))) | |||||
| for _, commentRecord := range commentList { | |||||
| if isPublicRepo(commentRecord.IssueID, publicRepoIssueIdMap) { | |||||
| if _, ok := resultMap[commentRecord.PosterID]; !ok { | |||||
| resultMap[commentRecord.PosterID] = 1 | |||||
| } else { | |||||
| resultMap[commentRecord.PosterID] += 1 | |||||
| } | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultMap | |||||
| } | |||||
| func queryAllPublicDataSet(publicAllRepo map[int64]int) map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| publicDataSetIdMap := make(map[int64]int) | |||||
| count, err := sess.Count(new(Dataset)) | |||||
| if err != nil { | |||||
| log.Info("query dataset error. return.") | |||||
| return publicDataSetIdMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| sess.Select("id,user_id,repo_id").Table(new(Dataset)).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| datasetList := make([]*Dataset, 0) | |||||
| sess.Find(&datasetList) | |||||
| log.Info("query datasetList size=" + fmt.Sprint(len(datasetList))) | |||||
| for _, datasetRecord := range datasetList { | |||||
| if isPublicRepo(datasetRecord.RepoID, publicAllRepo) { | |||||
| publicDataSetIdMap[datasetRecord.ID] = 1 | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return publicDataSetIdMap | |||||
| } | |||||
| func queryPublicDatasetFileNums(start_unix int64, end_unix int64, publicDataSetIdMap map[int64]int) map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| resultNumMap := make(map[int64]int) | |||||
| cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) | |||||
| count, err := sess.Where(cond).Count(new(Attachment)) | |||||
| if err != nil { | |||||
| log.Info("query attachment error. return.") | |||||
| return resultNumMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| sess.Select("id,uploader_id,size,dataset_id").Table("attachment").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| attachmentList := make([]*Attachment, 0) | |||||
| sess.Find(&attachmentList) | |||||
| log.Info("query Attachment size=" + fmt.Sprint(len(attachmentList))) | |||||
| for _, attachRecord := range attachmentList { | |||||
| if isPublicRepo(attachRecord.DatasetID, publicDataSetIdMap) { | |||||
| if _, ok := resultNumMap[attachRecord.UploaderID]; !ok { | |||||
| resultNumMap[attachRecord.UploaderID] = 1 | |||||
| } else { | |||||
| resultNumMap[attachRecord.UploaderID] += 1 | |||||
| } | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultNumMap | |||||
| } | |||||
| func queryUserModelPublic(start_unix int64, end_unix int64, publicAllRepo map[int64]int) map[int64]int { | |||||
| sess := x.NewSession() | |||||
| defer sess.Close() | |||||
| resultMap := make(map[int64]int) | |||||
| cond := " created_unix>=" + fmt.Sprint(start_unix) + " and created_unix<=" + fmt.Sprint(end_unix) | |||||
| count, err := sess.Where(cond).Count(new(AiModelManage)) | |||||
| if err != nil { | |||||
| log.Info("query AiModelManage error. return.") | |||||
| return resultMap | |||||
| } | |||||
| var indexTotal int64 | |||||
| indexTotal = 0 | |||||
| for { | |||||
| sess.Select("id,user_id,repo_id").Table("ai_model_manage").Where(cond).OrderBy("id asc").Limit(PAGE_SIZE, int(indexTotal)) | |||||
| aiModelList := make([]*AiModelManage, 0) | |||||
| sess.Find(&aiModelList) | |||||
| log.Info("query AiModelManage size=" + fmt.Sprint(len(aiModelList))) | |||||
| for _, aiModelRecord := range aiModelList { | |||||
| if isPublicRepo(aiModelRecord.RepoId, publicAllRepo) { | |||||
| if _, ok := resultMap[aiModelRecord.UserId]; !ok { | |||||
| resultMap[aiModelRecord.UserId] = 1 | |||||
| } else { | |||||
| resultMap[aiModelRecord.UserId] += 1 | |||||
| } | |||||
| } | |||||
| } | |||||
| indexTotal += PAGE_SIZE | |||||
| if indexTotal >= count { | |||||
| break | |||||
| } | |||||
| } | |||||
| return resultMap | |||||
| } | |||||
| @@ -105,6 +105,8 @@ type UserBusinessAnalysisAll struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysis struct { | type UserBusinessAnalysis struct { | ||||
| @@ -192,6 +194,8 @@ type UserBusinessAnalysis struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysisQueryOptions struct { | type UserBusinessAnalysisQueryOptions struct { | ||||
| @@ -475,6 +479,7 @@ func QueryUserStaticDataForUserDefine(opts *UserBusinessAnalysisQueryOptions, wi | |||||
| dateRecord.CountDate = CountDate.Unix() | dateRecord.CountDate = CountDate.Unix() | ||||
| dateRecord.DataDate = DataDate | dateRecord.DataDate = DataDate | ||||
| dateRecord.Email = userRecord.Email | dateRecord.Email = userRecord.Email | ||||
| dateRecord.Phone = userRecord.PhoneNumber | |||||
| dateRecord.RegistDate = userRecord.CreatedUnix | dateRecord.RegistDate = userRecord.CreatedUnix | ||||
| dateRecord.Name = userRecord.Name | dateRecord.Name = userRecord.Name | ||||
| dateRecord.UserLocation = userRecord.Location | dateRecord.UserLocation = userRecord.Location | ||||
| @@ -728,6 +733,7 @@ func refreshUserStaticTable(wikiCountMap map[string]int, tableName string, pageS | |||||
| var dateRecordAll UserBusinessAnalysisAll | var dateRecordAll UserBusinessAnalysisAll | ||||
| dateRecordAll.ID = userRecord.ID | dateRecordAll.ID = userRecord.ID | ||||
| dateRecordAll.Email = userRecord.Email | dateRecordAll.Email = userRecord.Email | ||||
| dateRecordAll.Phone = userRecord.PhoneNumber | |||||
| dateRecordAll.RegistDate = userRecord.CreatedUnix | dateRecordAll.RegistDate = userRecord.CreatedUnix | ||||
| dateRecordAll.Name = userRecord.Name | dateRecordAll.Name = userRecord.Name | ||||
| dateRecordAll.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) | dateRecordAll.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) | ||||
| @@ -839,7 +845,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static | |||||
| insertBatchSql := "INSERT INTO public." + tableName + | insertBatchSql := "INSERT INTO public." + tableName + | ||||
| "(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " + | "(id, count_date, code_merge_count, commit_count, issue_count, comment_count, focus_repo_count, star_repo_count, watched_count, gitea_age_month, commit_code_size, commit_dataset_size, " + | ||||
| "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location,focus_other_user,collect_dataset,collected_dataset,recommend_dataset,collect_image,collected_image,recommend_image,user_index_primitive) " + | |||||
| "commit_model_count, solve_issue_count, encyclopedias_count, regist_date, create_repo_count, login_count, open_i_index, email, name, data_date,cloud_brain_task_num,gpu_debug_job,npu_debug_job,gpu_train_job,npu_train_job,npu_inference_job,gpu_bench_mark_job,cloud_brain_run_time,commit_dataset_num,user_index,user_location,focus_other_user,collect_dataset,collected_dataset,recommend_dataset,collect_image,collected_image,recommend_image,user_index_primitive,phone) " + | |||||
| "VALUES" | "VALUES" | ||||
| for i, record := range dateRecords { | for i, record := range dateRecords { | ||||
| @@ -848,7 +854,7 @@ func insertTable(dateRecords []UserBusinessAnalysisAll, tableName string, static | |||||
| ", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) + | ", " + fmt.Sprint(record.WatchedCount) + ", " + fmt.Sprint(record.GiteaAgeMonth) + ", " + fmt.Sprint(record.CommitCodeSize) + ", " + fmt.Sprint(record.CommitDatasetSize) + | ||||
| ", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) + | ", " + fmt.Sprint(record.CommitModelCount) + ", " + fmt.Sprint(record.SolveIssueCount) + ", " + fmt.Sprint(record.EncyclopediasCount) + ", " + fmt.Sprint(record.RegistDate) + | ||||
| ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "'," + fmt.Sprint(record.CloudBrainTaskNum) + "," + fmt.Sprint(record.GpuDebugJob) + "," + fmt.Sprint(record.NpuDebugJob) + "," + fmt.Sprint(record.GpuTrainJob) + "," + fmt.Sprint(record.NpuTrainJob) + "," + fmt.Sprint(record.NpuInferenceJob) + "," + fmt.Sprint(record.GpuBenchMarkJob) + "," + fmt.Sprint(record.CloudBrainRunTime) + "," + fmt.Sprint(record.CommitDatasetNum) + "," + fmt.Sprint(record.UserIndex) + ",'" + record.UserLocation + "'," + | ", " + fmt.Sprint(record.CreateRepoCount) + ", " + fmt.Sprint(record.LoginCount) + ", " + fmt.Sprint(record.OpenIIndex) + ", '" + record.Email + "', '" + record.Name + "', '" + record.DataDate + "'," + fmt.Sprint(record.CloudBrainTaskNum) + "," + fmt.Sprint(record.GpuDebugJob) + "," + fmt.Sprint(record.NpuDebugJob) + "," + fmt.Sprint(record.GpuTrainJob) + "," + fmt.Sprint(record.NpuTrainJob) + "," + fmt.Sprint(record.NpuInferenceJob) + "," + fmt.Sprint(record.GpuBenchMarkJob) + "," + fmt.Sprint(record.CloudBrainRunTime) + "," + fmt.Sprint(record.CommitDatasetNum) + "," + fmt.Sprint(record.UserIndex) + ",'" + record.UserLocation + "'," + | ||||
| fmt.Sprint(record.FocusOtherUser) + "," + fmt.Sprint(record.CollectDataset) + "," + fmt.Sprint(record.CollectedDataset) + "," + fmt.Sprint(record.RecommendDataset) + "," + fmt.Sprint(record.CollectImage) + "," + fmt.Sprint(record.CollectedImage) + "," + fmt.Sprint(record.RecommendImage) + "," + fmt.Sprint(record.UserIndexPrimitive) + ")" | |||||
| fmt.Sprint(record.FocusOtherUser) + "," + fmt.Sprint(record.CollectDataset) + "," + fmt.Sprint(record.CollectedDataset) + "," + fmt.Sprint(record.RecommendDataset) + "," + fmt.Sprint(record.CollectImage) + "," + fmt.Sprint(record.CollectedImage) + "," + fmt.Sprint(record.RecommendImage) + "," + fmt.Sprint(record.UserIndexPrimitive) + ",'" + record.Phone + "')" | |||||
| if i < (len(dateRecords) - 1) { | if i < (len(dateRecords) - 1) { | ||||
| insertBatchSql += "," | insertBatchSql += "," | ||||
| } | } | ||||
| @@ -973,6 +979,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, | |||||
| dateRecord.CountDate = CountDate.Unix() | dateRecord.CountDate = CountDate.Unix() | ||||
| dateRecord.Email = userRecord.Email | dateRecord.Email = userRecord.Email | ||||
| dateRecord.Phone = userRecord.PhoneNumber | |||||
| dateRecord.RegistDate = userRecord.CreatedUnix | dateRecord.RegistDate = userRecord.CreatedUnix | ||||
| dateRecord.Name = userRecord.Name | dateRecord.Name = userRecord.Name | ||||
| dateRecord.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) | dateRecord.GiteaAgeMonth = subMonth(currentTimeNow, userRecord.CreatedUnix.AsTime()) | ||||
| @@ -1028,12 +1035,12 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, | |||||
| setUserMetrics(userMetrics, userRecord, start_unix, end_unix, dateRecord) | setUserMetrics(userMetrics, userRecord, start_unix, end_unix, dateRecord) | ||||
| if getUserActivate(dateRecord) > 0 { | if getUserActivate(dateRecord) > 0 { | ||||
| log.Info("has activity." + userRecord.Name) | log.Info("has activity." + userRecord.Name) | ||||
| addUserToMap(userNewAddActivity, userRecord.CreatedUnix, dateRecord.ID) | |||||
| addUserToMap(userNewAddActivity, userRecord.CreatedUnix, dateRecord.ID, currentTimeNow) | |||||
| } | } | ||||
| if userRecord.IsActive { | if userRecord.IsActive { | ||||
| addUserToMap(userAcitvateJsonMap, userRecord.CreatedUnix, dateRecord.ID) | |||||
| addUserToMap(userAcitvateJsonMap, userRecord.CreatedUnix, dateRecord.ID, currentTimeNow) | |||||
| } | } | ||||
| addUserToMap(userCurrentDayRegistMap, userRecord.CreatedUnix, dateRecord.ID) | |||||
| addUserToMap(userCurrentDayRegistMap, userRecord.CreatedUnix, dateRecord.ID, currentTimeNow) | |||||
| } | } | ||||
| indexTotal += PAGE_SIZE | indexTotal += PAGE_SIZE | ||||
| @@ -1056,7 +1063,7 @@ func CounDataByDateAndReCount(wikiCountMap map[string]int, startTime time.Time, | |||||
| useMetrics.NotActivateRegistUser = getMapKeyStringValue("NotActivateRegistUser", userMetrics) | useMetrics.NotActivateRegistUser = getMapKeyStringValue("NotActivateRegistUser", userMetrics) | ||||
| useMetrics.TotalActivateRegistUser = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) | useMetrics.TotalActivateRegistUser = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) | ||||
| useMetrics.TotalHasActivityUser = getMapKeyStringValue("TotalHasActivityUser", userMetrics) | useMetrics.TotalHasActivityUser = getMapKeyStringValue("TotalHasActivityUser", userMetrics) | ||||
| useMetrics.CurrentDayRegistUser = getMapKeyStringValue("CurrentDayRegistUser", userMetrics) | |||||
| count, err = sess.Where("type=0").Count(new(User)) | count, err = sess.Where("type=0").Count(new(User)) | ||||
| if err != nil { | if err != nil { | ||||
| log.Info("query user error. return.") | log.Info("query user error. return.") | ||||
| @@ -1124,8 +1131,9 @@ func setUniqueUserId(jsonString string, value map[int64]int64) (string, int) { | |||||
| return userIdArray, len(value) | return userIdArray, len(value) | ||||
| } | } | ||||
| func addUserToMap(currentUserActivity map[int64]map[int64]int64, registDate timeutil.TimeStamp, userId int64) { | |||||
| CountDateTime := time.Date(registDate.Year(), registDate.AsTime().Month(), registDate.AsTime().Day(), 0, 1, 0, 0, registDate.AsTime().Location()) | |||||
| func addUserToMap(currentUserActivity map[int64]map[int64]int64, registDate timeutil.TimeStamp, userId int64, currentTimeNow time.Time) { | |||||
| registTime := registDate.AsTimeInLocation(currentTimeNow.Location()) | |||||
| CountDateTime := time.Date(registTime.Year(), registTime.Month(), registTime.Day(), 0, 1, 0, 0, currentTimeNow.Location()) | |||||
| CountDate := CountDateTime.Unix() | CountDate := CountDateTime.Unix() | ||||
| if _, ok := currentUserActivity[CountDate]; !ok { | if _, ok := currentUserActivity[CountDate]; !ok { | ||||
| userIdMap := make(map[int64]int64, 0) | userIdMap := make(map[int64]int64, 0) | ||||
| @@ -1149,6 +1157,7 @@ func setUserMetrics(userMetrics map[string]int, user *User, start_time int64, en | |||||
| } else { | } else { | ||||
| userMetrics["NotActivateRegistUser"] = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + 1 | userMetrics["NotActivateRegistUser"] = getMapKeyStringValue("NotActivateRegistUser", userMetrics) + 1 | ||||
| } | } | ||||
| userMetrics["CurrentDayRegistUser"] = getMapKeyStringValue("CurrentDayRegistUser", userMetrics) + 1 | |||||
| } | } | ||||
| if user.IsActive { | if user.IsActive { | ||||
| userMetrics["TotalActivateRegistUser"] = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + 1 | userMetrics["TotalActivateRegistUser"] = getMapKeyStringValue("TotalActivateRegistUser", userMetrics) + 1 | ||||
| @@ -65,6 +65,8 @@ type UserBusinessAnalysisCurrentYear struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysisLast30Day struct { | type UserBusinessAnalysisLast30Day struct { | ||||
| @@ -130,6 +132,8 @@ type UserBusinessAnalysisLast30Day struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysisLastMonth struct { | type UserBusinessAnalysisLastMonth struct { | ||||
| @@ -195,6 +199,8 @@ type UserBusinessAnalysisLastMonth struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysisCurrentMonth struct { | type UserBusinessAnalysisCurrentMonth struct { | ||||
| @@ -260,6 +266,8 @@ type UserBusinessAnalysisCurrentMonth struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysisCurrentWeek struct { | type UserBusinessAnalysisCurrentWeek struct { | ||||
| @@ -326,6 +334,8 @@ type UserBusinessAnalysisCurrentWeek struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysisYesterday struct { | type UserBusinessAnalysisYesterday struct { | ||||
| @@ -392,6 +402,8 @@ type UserBusinessAnalysisYesterday struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserBusinessAnalysisLastWeek struct { | type UserBusinessAnalysisLastWeek struct { | ||||
| @@ -458,6 +470,8 @@ type UserBusinessAnalysisLastWeek struct { | |||||
| CollectImage int `xorm:"NOT NULL DEFAULT 0"` | CollectImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | CollectedImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | RecommendImage int `xorm:"NOT NULL DEFAULT 0"` | ||||
| Phone string `xorm:"NULL"` | |||||
| } | } | ||||
| type UserAnalysisPara struct { | type UserAnalysisPara struct { | ||||
| @@ -17,7 +17,7 @@ import ( | |||||
| ) | ) | ||||
| const ( | const ( | ||||
| Command = `pip3 install jupyterlab==2.2.5 -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;jupyter lab --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --LabApp.token="" --LabApp.allow_origin="self https://cloudbrain.pcl.ac.cn"` | |||||
| //Command = `pip3 install jupyterlab==2.2.5 -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;jupyter lab --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --LabApp.token="" --LabApp.allow_origin="self https://cloudbrain.pcl.ac.cn"` | |||||
| //CommandBenchmark = `echo "start benchmark";python /code/test.py;echo "end benchmark"` | //CommandBenchmark = `echo "start benchmark";python /code/test.py;echo "end benchmark"` | ||||
| CommandBenchmark = `echo "start benchmark";cd /benchmark && bash run_bk.sh;echo "end benchmark"` | CommandBenchmark = `echo "start benchmark";cd /benchmark && bash run_bk.sh;echo "end benchmark"` | ||||
| CodeMountPath = "/code" | CodeMountPath = "/code" | ||||
| @@ -42,6 +42,7 @@ const ( | |||||
| var ( | var ( | ||||
| ResourceSpecs *models.ResourceSpecs | ResourceSpecs *models.ResourceSpecs | ||||
| TrainResourceSpecs *models.ResourceSpecs | TrainResourceSpecs *models.ResourceSpecs | ||||
| SpecialPools *models.SpecialPools | |||||
| ) | ) | ||||
| type GenerateCloudBrainTaskReq struct { | type GenerateCloudBrainTaskReq struct { | ||||
| @@ -70,6 +71,11 @@ type GenerateCloudBrainTaskReq struct { | |||||
| ResourceSpecId int | ResourceSpecId int | ||||
| } | } | ||||
| func GetCloudbrainDebugCommand() string { | |||||
| var command = `pip3 install jupyterlab==3 -i https://pypi.tuna.tsinghua.edu.cn/simple;service ssh stop;/usr/local/bin/python /usr/local/bin/jupyter-lab --ServerApp.shutdown_no_activity_timeout=` + setting.CullIdleTimeout + ` --TerminalManager.cull_inactive_timeout=` + setting.CullIdleTimeout + ` --TerminalManager.cull_interval=` + setting.CullInterval + ` --MappingKernelManager.cull_idle_timeout=` + setting.CullIdleTimeout + ` --MappingKernelManager.cull_interval=` + setting.CullInterval + ` --MappingKernelManager.cull_connected=True --MappingKernelManager.cull_busy=True --no-browser --ip=0.0.0.0 --allow-root --notebook-dir="/code" --port=80 --ServerApp.token="" --ServerApp.allow_origin="self https://cloudbrain.pcl.ac.cn" ` | |||||
| return command | |||||
| } | |||||
| func isAdminOrOwnerOrJobCreater(ctx *context.Context, job *models.Cloudbrain, err error) bool { | func isAdminOrOwnerOrJobCreater(ctx *context.Context, job *models.Cloudbrain, err error) bool { | ||||
| if !ctx.IsSigned { | if !ctx.IsSigned { | ||||
| return false | return false | ||||
| @@ -222,6 +228,7 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error { | |||||
| for _, spec := range TrainResourceSpecs.ResourceSpec { | for _, spec := range TrainResourceSpecs.ResourceSpec { | ||||
| if req.ResourceSpecId == spec.Id { | if req.ResourceSpecId == spec.Id { | ||||
| resourceSpec = spec | resourceSpec = spec | ||||
| break | |||||
| } | } | ||||
| } | } | ||||
| } else { | } else { | ||||
| @@ -231,10 +238,29 @@ func GenerateTask(req GenerateCloudBrainTaskReq) error { | |||||
| for _, spec := range ResourceSpecs.ResourceSpec { | for _, spec := range ResourceSpecs.ResourceSpec { | ||||
| if req.ResourceSpecId == spec.Id { | if req.ResourceSpecId == spec.Id { | ||||
| resourceSpec = spec | resourceSpec = spec | ||||
| break | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| //如果没有匹配到spec信息,尝试从专属资源池获取 | |||||
| if resourceSpec == nil && SpecialPools != nil { | |||||
| for _, specialPool := range SpecialPools.Pools { | |||||
| if resourceSpec != nil { | |||||
| break | |||||
| } | |||||
| if specialPool.ResourceSpec != nil { | |||||
| if IsElementExist(specialPool.JobType, req.JobType) && IsQueueInSpecialtPool(specialPool.Pool, req.GpuQueue) { | |||||
| for _, spec := range specialPool.ResourceSpec { | |||||
| if req.ResourceSpecId == spec.Id { | |||||
| resourceSpec = spec | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| if resourceSpec == nil { | if resourceSpec == nil { | ||||
| log.Error("no such resourceSpecId(%d)", req.ResourceSpecId, req.Ctx.Data["MsgID"]) | log.Error("no such resourceSpecId(%d)", req.ResourceSpecId, req.Ctx.Data["MsgID"]) | ||||
| @@ -486,7 +512,7 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e | |||||
| GPUNumber: resourceSpec.GpuNum, | GPUNumber: resourceSpec.GpuNum, | ||||
| MemoryMB: resourceSpec.MemMiB, | MemoryMB: resourceSpec.MemMiB, | ||||
| ShmMB: resourceSpec.ShareMemMiB, | ShmMB: resourceSpec.ShareMemMiB, | ||||
| Command: Command, | |||||
| Command: GetCloudbrainDebugCommand(),//Command, | |||||
| NeedIBDevice: false, | NeedIBDevice: false, | ||||
| IsMainRole: false, | IsMainRole: false, | ||||
| UseNNI: false, | UseNNI: false, | ||||
| @@ -538,3 +564,39 @@ func RestartTask(ctx *context.Context, task *models.Cloudbrain, newID *string) e | |||||
| return nil | return nil | ||||
| } | } | ||||
| func InitSpecialPool() { | |||||
| if SpecialPools == nil && setting.SpecialPools != "" { | |||||
| json.Unmarshal([]byte(setting.SpecialPools), &SpecialPools) | |||||
| } | |||||
| } | |||||
| func IsResourceSpecInSpecialPool(resourceSpecs []*models.ResourceSpec, resourceSpecId int) bool { | |||||
| if resourceSpecs == nil || len(resourceSpecs) == 0 { | |||||
| return true | |||||
| } | |||||
| for _, v := range resourceSpecs { | |||||
| if v.Id == resourceSpecId { | |||||
| return true | |||||
| } | |||||
| } | |||||
| return false | |||||
| } | |||||
| func IsQueueInSpecialtPool(pool []*models.GpuInfo, queue string) bool { | |||||
| for _, v := range pool { | |||||
| if v.Queue == queue { | |||||
| return true | |||||
| } | |||||
| } | |||||
| return false | |||||
| } | |||||
| func IsElementExist(s []string, str string) bool { | |||||
| for _, v := range s { | |||||
| if v == str { | |||||
| return true | |||||
| } | |||||
| } | |||||
| return false | |||||
| } | |||||
| @@ -470,12 +470,15 @@ var ( | |||||
| CBCodePathPrefix string | CBCodePathPrefix string | ||||
| JobType string | JobType string | ||||
| GpuTypes string | GpuTypes string | ||||
| SpecialPools string | |||||
| DebugServerHost string | DebugServerHost string | ||||
| ResourceSpecs string | ResourceSpecs string | ||||
| MaxDuration int64 | MaxDuration int64 | ||||
| TrainGpuTypes string | TrainGpuTypes string | ||||
| TrainResourceSpecs string | TrainResourceSpecs string | ||||
| MaxDatasetNum int | MaxDatasetNum int | ||||
| CullIdleTimeout string | |||||
| CullInterval string | |||||
| //benchmark config | //benchmark config | ||||
| IsBenchmarkEnabled bool | IsBenchmarkEnabled bool | ||||
| @@ -1324,7 +1327,11 @@ func NewContext() { | |||||
| MaxDuration = sec.Key("MAX_DURATION").MustInt64(14400) | MaxDuration = sec.Key("MAX_DURATION").MustInt64(14400) | ||||
| TrainGpuTypes = sec.Key("TRAIN_GPU_TYPES").MustString("") | TrainGpuTypes = sec.Key("TRAIN_GPU_TYPES").MustString("") | ||||
| TrainResourceSpecs = sec.Key("TRAIN_RESOURCE_SPECS").MustString("") | TrainResourceSpecs = sec.Key("TRAIN_RESOURCE_SPECS").MustString("") | ||||
| SpecialPools = sec.Key("SPECIAL_POOL").MustString("") | |||||
| MaxDatasetNum = sec.Key("MAX_DATASET_NUM").MustInt(5) | MaxDatasetNum = sec.Key("MAX_DATASET_NUM").MustInt(5) | ||||
| CullIdleTimeout = sec.Key("CULL_IDLE_TIMEOUT").MustString("900") | |||||
| CullInterval = sec.Key("CULL_INTERVAL").MustString("60") | |||||
| sec = Cfg.Section("benchmark") | sec = Cfg.Section("benchmark") | ||||
| IsBenchmarkEnabled = sec.Key("ENABLED").MustBool(false) | IsBenchmarkEnabled = sec.Key("ENABLED").MustBool(false) | ||||
| @@ -18,6 +18,7 @@ import ( | |||||
| "path/filepath" | "path/filepath" | ||||
| "regexp" | "regexp" | ||||
| "runtime" | "runtime" | ||||
| "strconv" | |||||
| "strings" | "strings" | ||||
| texttmpl "text/template" | texttmpl "text/template" | ||||
| "time" | "time" | ||||
| @@ -327,6 +328,7 @@ func NewFuncMap() []template.FuncMap { | |||||
| }, | }, | ||||
| "GetRefType": GetRefType, | "GetRefType": GetRefType, | ||||
| "GetRefName": GetRefName, | "GetRefName": GetRefName, | ||||
| "MB2GB": MB2GB, | |||||
| }} | }} | ||||
| } | } | ||||
| @@ -785,3 +787,14 @@ func GetRefName(ref string) string { | |||||
| reg := regexp.MustCompile(REF_TYPE_PATTERN) | reg := regexp.MustCompile(REF_TYPE_PATTERN) | ||||
| return reg.ReplaceAllString(ref, "") | return reg.ReplaceAllString(ref, "") | ||||
| } | } | ||||
| func MB2GB(size int64) string { | |||||
| s := strconv.FormatFloat(float64(size)/float64(1024), 'f', 2, 64) | |||||
| for strings.HasSuffix(s, "0") { | |||||
| s = strings.TrimSuffix(s, "0") | |||||
| } | |||||
| if strings.HasSuffix(s, ".") { | |||||
| s = strings.TrimSuffix(s, ".") | |||||
| } | |||||
| return s | |||||
| } | |||||
| @@ -559,6 +559,7 @@ static.CollectImage=Collect Image Count | |||||
| static.CollectedImage=Collected Image Count | static.CollectedImage=Collected Image Count | ||||
| static.RecommendImage=Recommended Image Count | static.RecommendImage=Recommended Image Count | ||||
| static.email=Email | static.email=Email | ||||
| static.phone=Phone | |||||
| static.location=Location | static.location=Location | ||||
| static.all=All | static.all=All | ||||
| static.public.user_business_analysis_current_month=Current_Month | static.public.user_business_analysis_current_month=Current_Month | ||||
| @@ -564,6 +564,7 @@ static.CollectImage=收藏镜像数 | |||||
| static.CollectedImage=被收藏镜像数 | static.CollectedImage=被收藏镜像数 | ||||
| static.RecommendImage=被推荐镜像数 | static.RecommendImage=被推荐镜像数 | ||||
| static.email=Email | static.email=Email | ||||
| static.phone=电话 | |||||
| static.location=所在地区 | static.location=所在地区 | ||||
| static.all=所有 | static.all=所有 | ||||
| static.public.user_business_analysis_current_month=本月 | static.public.user_business_analysis_current_month=本月 | ||||
| @@ -570,6 +570,7 @@ func RegisterRoutes(m *macaron.Macaron) { | |||||
| m.Get("/query_user_last_month", operationReq, repo_ext.QueryUserStaticLastMonth) | m.Get("/query_user_last_month", operationReq, repo_ext.QueryUserStaticLastMonth) | ||||
| m.Get("/query_user_yesterday", operationReq, repo_ext.QueryUserStaticYesterday) | m.Get("/query_user_yesterday", operationReq, repo_ext.QueryUserStaticYesterday) | ||||
| m.Get("/query_user_all", operationReq, repo_ext.QueryUserStaticAll) | m.Get("/query_user_all", operationReq, repo_ext.QueryUserStaticAll) | ||||
| m.Get("/query_user_activity", operationReq, repo_ext.QueryUserActivity) | |||||
| //cloudbrain board | //cloudbrain board | ||||
| m.Group("/cloudbrainboard", func() { | m.Group("/cloudbrainboard", func() { | ||||
| m.Get("/downloadAll", repo.DownloadCloudBrainBoard) | m.Get("/downloadAll", repo.DownloadCloudBrainBoard) | ||||
| @@ -752,10 +752,26 @@ func GetCloudbrainsDetailData(ctx *context.Context) { | |||||
| taskDetail.RepoAlias = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Alias | taskDetail.RepoAlias = ciTasks[i].Repo.OwnerName + "/" + ciTasks[i].Repo.Alias | ||||
| } | } | ||||
| if ciTasks[i].Cloudbrain.Status == string(models.JobWaiting) { | if ciTasks[i].Cloudbrain.Status == string(models.JobWaiting) { | ||||
| WaitTimeInt := time.Now().Unix() - ciTasks[i].Cloudbrain.CreatedUnix.AsTime().Unix() | |||||
| taskDetail.WaitTime = models.ConvertDurationToStr(WaitTimeInt) | |||||
| if WaitTimeInt < 0 { | |||||
| taskDetail.WaitTime = "00:00:00" | |||||
| if ciTasks[i].Cloudbrain.DeletedAt != nilTime { | |||||
| WaitTimeInt := ciTasks[i].Cloudbrain.UpdatedUnix.AsTime().Unix() - ciTasks[i].Cloudbrain.CreatedUnix.AsTime().Unix() | |||||
| taskDetail.WaitTime = models.ConvertDurationToStr(WaitTimeInt) | |||||
| if WaitTimeInt < 0 { | |||||
| taskDetail.WaitTime = "00:00:00" | |||||
| } | |||||
| } else { | |||||
| if ciTasks[i].Cloudbrain.StartTime.AsTime().Unix() == 0 { | |||||
| WaitTimeInt := time.Now().Unix() - ciTasks[i].Cloudbrain.CreatedUnix.AsTime().Unix() | |||||
| taskDetail.WaitTime = models.ConvertDurationToStr(WaitTimeInt) | |||||
| if WaitTimeInt < 0 { | |||||
| taskDetail.WaitTime = "00:00:00" | |||||
| } | |||||
| } else { | |||||
| WaitTimeInt := ciTasks[i].Cloudbrain.StartTime.AsTime().Unix() - ciTasks[i].Cloudbrain.CreatedUnix.AsTime().Unix() | |||||
| taskDetail.WaitTime = models.ConvertDurationToStr(WaitTimeInt) | |||||
| if WaitTimeInt < 0 { | |||||
| taskDetail.WaitTime = "00:00:00" | |||||
| } | |||||
| } | |||||
| } | } | ||||
| } else if ciTasks[i].Cloudbrain.Status == string(models.JobStopped) && ciTasks[i].Cloudbrain.StartTime.AsTime().Unix() == 0 { | } else if ciTasks[i].Cloudbrain.Status == string(models.JobStopped) && ciTasks[i].Cloudbrain.StartTime.AsTime().Unix() == 0 { | ||||
| WaitTimeInt := ciTasks[i].Cloudbrain.EndTime.AsTime().Unix() - ciTasks[i].Cloudbrain.CreatedUnix.AsTime().Unix() | WaitTimeInt := ciTasks[i].Cloudbrain.EndTime.AsTime().Unix() - ciTasks[i].Cloudbrain.CreatedUnix.AsTime().Unix() | ||||
| @@ -7,8 +7,10 @@ package repo | |||||
| import ( | import ( | ||||
| "code.gitea.io/gitea/modules/grampus" | "code.gitea.io/gitea/modules/grampus" | ||||
| "code.gitea.io/gitea/modules/setting" | |||||
| "encoding/json" | "encoding/json" | ||||
| "net/http" | "net/http" | ||||
| "path" | |||||
| "strconv" | "strconv" | ||||
| "strings" | "strings" | ||||
| @@ -263,39 +265,49 @@ func TrainJobGetLog(ctx *context.APIContext) { | |||||
| return | return | ||||
| } | } | ||||
| resultLogFile, result, err := trainJobGetLogContent(jobID, versionName, baseLine, order, lines_int) | |||||
| task, err := models.GetCloudbrainByJobIDAndVersionName(jobID, versionName) | |||||
| if err != nil { | |||||
| log.Error("GetCloudbrainByJobID(%s) failed:%v", jobID, err.Error()) | |||||
| return | |||||
| } | |||||
| resultLogFile, result, err := trainJobGetLogContent(jobID, task.VersionID, baseLine, order, lines_int) | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("trainJobGetLog(%s) failed:%v", jobID, err.Error()) | log.Error("trainJobGetLog(%s) failed:%v", jobID, err.Error()) | ||||
| // ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobShow, nil) | // ctx.RenderWithErr(err.Error(), tplModelArtsTrainJobShow, nil) | ||||
| return | return | ||||
| } | } | ||||
| prefix := strings.TrimPrefix(path.Join(setting.TrainJobModelPath, task.JobName, modelarts.LogPath, versionName), "/") + "/job" | |||||
| _, err = storage.GetObsLogFileName(prefix) | |||||
| var canLogDownload bool | |||||
| if err != nil { | |||||
| canLogDownload = false | |||||
| } else { | |||||
| canLogDownload = true | |||||
| } | |||||
| ctx.Data["log_file_name"] = resultLogFile.LogFileList[0] | ctx.Data["log_file_name"] = resultLogFile.LogFileList[0] | ||||
| ctx.JSON(http.StatusOK, map[string]interface{}{ | ctx.JSON(http.StatusOK, map[string]interface{}{ | ||||
| "JobID": jobID, | |||||
| "LogFileName": resultLogFile.LogFileList[0], | |||||
| "StartLine": result.StartLine, | |||||
| "EndLine": result.EndLine, | |||||
| "Content": result.Content, | |||||
| "Lines": result.Lines, | |||||
| "JobID": jobID, | |||||
| "LogFileName": resultLogFile.LogFileList[0], | |||||
| "StartLine": result.StartLine, | |||||
| "EndLine": result.EndLine, | |||||
| "Content": result.Content, | |||||
| "Lines": result.Lines, | |||||
| "CanLogDownload": canLogDownload, | |||||
| }) | }) | ||||
| } | } | ||||
| func trainJobGetLogContent(jobID string, versionName string, baseLine string, order string, lines int) (*models.GetTrainJobLogFileNamesResult, *models.GetTrainJobLogResult, error) { | |||||
| task, err := models.GetCloudbrainByJobIDAndVersionName(jobID, versionName) | |||||
| if err != nil { | |||||
| log.Error("GetCloudbrainByJobID(%s) failed:%v", jobID, err.Error()) | |||||
| return nil, nil, err | |||||
| } | |||||
| func trainJobGetLogContent(jobID string, versionID int64, baseLine string, order string, lines int) (*models.GetTrainJobLogFileNamesResult, *models.GetTrainJobLogResult, error) { | |||||
| resultLogFile, err := modelarts.GetTrainJobLogFileNames(jobID, strconv.FormatInt(task.VersionID, 10)) | |||||
| resultLogFile, err := modelarts.GetTrainJobLogFileNames(jobID, strconv.FormatInt(versionID, 10)) | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("GetTrainJobLogFileNames(%s) failed:%v", jobID, err.Error()) | log.Error("GetTrainJobLogFileNames(%s) failed:%v", jobID, err.Error()) | ||||
| return nil, nil, err | return nil, nil, err | ||||
| } | } | ||||
| result, err := modelarts.GetTrainJobLog(jobID, strconv.FormatInt(task.VersionID, 10), baseLine, resultLogFile.LogFileList[0], order, lines) | |||||
| result, err := modelarts.GetTrainJobLog(jobID, strconv.FormatInt(versionID, 10), baseLine, resultLogFile.LogFileList[0], order, lines) | |||||
| if err != nil { | if err != nil { | ||||
| log.Error("GetTrainJobLog(%s) failed:%v", jobID, err.Error()) | log.Error("GetTrainJobLog(%s) failed:%v", jobID, err.Error()) | ||||
| return nil, nil, err | return nil, nil, err | ||||
| @@ -2,7 +2,6 @@ package repo | |||||
| import ( | import ( | ||||
| "bufio" | "bufio" | ||||
| "code.gitea.io/gitea/modules/grampus" | |||||
| "encoding/json" | "encoding/json" | ||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| @@ -16,6 +15,8 @@ import ( | |||||
| "time" | "time" | ||||
| "unicode/utf8" | "unicode/utf8" | ||||
| "code.gitea.io/gitea/modules/grampus" | |||||
| "code.gitea.io/gitea/modules/timeutil" | "code.gitea.io/gitea/modules/timeutil" | ||||
| "github.com/unknwon/i18n" | "github.com/unknwon/i18n" | ||||
| @@ -135,7 +136,7 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error { | |||||
| } | } | ||||
| ctx.Data["attachments"] = attachs | ctx.Data["attachments"] = attachs | ||||
| ctx.Data["command"] = cloudbrain.Command | |||||
| ctx.Data["command"] = cloudbrain.GetCloudbrainDebugCommand() | |||||
| ctx.Data["code_path"] = cloudbrain.CodeMountPath | ctx.Data["code_path"] = cloudbrain.CodeMountPath | ||||
| ctx.Data["dataset_path"] = cloudbrain.DataSetMountPath | ctx.Data["dataset_path"] = cloudbrain.DataSetMountPath | ||||
| ctx.Data["model_path"] = cloudbrain.ModelMountPath | ctx.Data["model_path"] = cloudbrain.ModelMountPath | ||||
| @@ -149,6 +150,8 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error { | |||||
| ctx.Data["benchmark_types"] = GetBenchmarkTypes(ctx).BenchmarkType | ctx.Data["benchmark_types"] = GetBenchmarkTypes(ctx).BenchmarkType | ||||
| cloudbrain.InitSpecialPool() | |||||
| if gpuInfos == nil { | if gpuInfos == nil { | ||||
| json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) | json.Unmarshal([]byte(setting.GpuTypes), &gpuInfos) | ||||
| } | } | ||||
| @@ -178,6 +181,45 @@ func cloudBrainNewDataPrepare(ctx *context.Context) error { | |||||
| json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs) | json.Unmarshal([]byte(setting.TrainResourceSpecs), &cloudbrain.TrainResourceSpecs) | ||||
| } | } | ||||
| ctx.Data["train_resource_specs"] = cloudbrain.TrainResourceSpecs.ResourceSpec | ctx.Data["train_resource_specs"] = cloudbrain.TrainResourceSpecs.ResourceSpec | ||||
| if cloudbrain.SpecialPools != nil { | |||||
| var debugGpuTypes []*models.GpuInfo | |||||
| var trainGpuTypes []*models.GpuInfo | |||||
| for _, pool := range cloudbrain.SpecialPools.Pools { | |||||
| org, _ := models.GetOrgByName(pool.Org) | |||||
| if org != nil { | |||||
| isOrgMember, _ := models.IsOrganizationMember(org.ID, ctx.User.ID) | |||||
| if isOrgMember { | |||||
| for _, jobType := range pool.JobType { | |||||
| if jobType == string(models.JobTypeDebug) { | |||||
| debugGpuTypes = append(debugGpuTypes, pool.Pool...) | |||||
| if pool.ResourceSpec != nil { | |||||
| ctx.Data["resource_specs"] = pool.ResourceSpec | |||||
| } | |||||
| } else if jobType == string(models.JobTypeTrain) { | |||||
| trainGpuTypes = append(trainGpuTypes, pool.Pool...) | |||||
| if pool.ResourceSpec != nil { | |||||
| ctx.Data["train_resource_specs"] = pool.ResourceSpec | |||||
| } | |||||
| } | |||||
| } | |||||
| break | |||||
| } | |||||
| } | |||||
| } | |||||
| if len(debugGpuTypes) > 0 { | |||||
| ctx.Data["gpu_types"] = debugGpuTypes | |||||
| } | |||||
| if len(trainGpuTypes) > 0 { | |||||
| ctx.Data["train_gpu_types"] = trainGpuTypes | |||||
| } | |||||
| } | |||||
| ctx.Data["params"] = "" | ctx.Data["params"] = "" | ||||
| ctx.Data["branchName"] = ctx.Repo.BranchName | ctx.Data["branchName"] = ctx.Repo.BranchName | ||||
| @@ -217,6 +259,10 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| repo := ctx.Repo.Repository | repo := ctx.Repo.Repository | ||||
| tpl := tplCloudBrainNew | tpl := tplCloudBrainNew | ||||
| if jobType == string(models.JobTypeTrain) { | |||||
| tpl = tplCloudBrainTrainJobNew | |||||
| } | |||||
| tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, jobType, displayJobName) | tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, jobType, displayJobName) | ||||
| if err == nil { | if err == nil { | ||||
| if len(tasks) != 0 { | if len(tasks) != 0 { | ||||
| @@ -269,7 +315,7 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| return | return | ||||
| } | } | ||||
| command := cloudbrain.Command | |||||
| command := cloudbrain.GetCloudbrainDebugCommand() | |||||
| if jobType == string(models.JobTypeTrain) { | if jobType == string(models.JobTypeTrain) { | ||||
| tpl = tplCloudBrainTrainJobNew | tpl = tplCloudBrainTrainJobNew | ||||
| commandTrain, err := getTrainJobCommand(form) | commandTrain, err := getTrainJobCommand(form) | ||||
| @@ -282,6 +328,14 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| command = commandTrain | command = commandTrain | ||||
| } | } | ||||
| errStr := checkCloudBrainSpecialPool(ctx, jobType, gpuQueue, resourceSpecId) | |||||
| if errStr != "" { | |||||
| cloudBrainNewDataPrepare(ctx) | |||||
| ctx.RenderWithErr(errStr, tpl, &form) | |||||
| return | |||||
| } | |||||
| if branchName == "" { | if branchName == "" { | ||||
| branchName = cloudbrain.DefaultBranchName | branchName = cloudbrain.DefaultBranchName | ||||
| } | } | ||||
| @@ -334,6 +388,42 @@ func CloudBrainCreate(ctx *context.Context, form auth.CreateCloudBrainForm) { | |||||
| } | } | ||||
| } | } | ||||
| /** | |||||
| 检查用户传输的参数是否符合专属资源池 | |||||
| */ | |||||
| func checkCloudBrainSpecialPool(ctx *context.Context, jobType string, queue string, resourceSpecId int) string { | |||||
| if cloudbrain.SpecialPools != nil { | |||||
| var isInPoolOrg = false | |||||
| var matchSpecialPool = false | |||||
| for _, specialPool := range cloudbrain.SpecialPools.Pools { | |||||
| if cloudbrain.IsElementExist(specialPool.JobType, jobType) && cloudbrain.IsQueueInSpecialtPool(specialPool.Pool, queue) { | |||||
| if cloudbrain.IsResourceSpecInSpecialPool(specialPool.ResourceSpec, resourceSpecId) { | |||||
| matchSpecialPool = true | |||||
| org, _ := models.GetOrgByName(specialPool.Org) | |||||
| if org != nil { | |||||
| isInPoolOrg, _ = models.IsOrganizationMember(org.ID, ctx.User.ID) | |||||
| if isInPoolOrg { | |||||
| break //传入参数,和专属资源池匹配上了,检查通过 | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| //资源池有匹配上,但是用户不在相应的组织中,返回错误信息。界面已经过滤了选择,界面操作不会到这个逻辑 | |||||
| if matchSpecialPool && !isInPoolOrg { | |||||
| return ctx.Tr("repo.grampus.no_operate_right") | |||||
| } | |||||
| } | |||||
| //没有匹配到资源池或者没有设置专属资源池,检查通过; 获取和资源池完全匹配检查通过 | |||||
| return "" | |||||
| } | |||||
| func CloudBrainRestart(ctx *context.Context) { | func CloudBrainRestart(ctx *context.Context) { | ||||
| var ID = ctx.Params(":id") | var ID = ctx.Params(":id") | ||||
| var resultCode = "0" | var resultCode = "0" | ||||
| @@ -573,7 +663,9 @@ func cloudBrainShow(ctx *context.Context, tpName base.TplName, jobType models.Jo | |||||
| if task.TrainJobDuration == "" { | if task.TrainJobDuration == "" { | ||||
| if task.Duration == 0 { | if task.Duration == 0 { | ||||
| var duration int64 | var duration int64 | ||||
| if task.Status == string(models.JobRunning) { | |||||
| if task.Status == string(models.JobWaiting) { | |||||
| duration = 0 | |||||
| } else if task.Status == string(models.JobRunning) { | |||||
| duration = time.Now().Unix() - int64(task.CreatedUnix) | duration = time.Now().Unix() - int64(task.CreatedUnix) | ||||
| } else { | } else { | ||||
| duration = int64(task.UpdatedUnix) - int64(task.CreatedUnix) | duration = int64(task.UpdatedUnix) - int64(task.CreatedUnix) | ||||
| @@ -2094,7 +2186,7 @@ func ModelBenchmarkCreate(ctx *context.Context, form auth.CreateCloudBrainForm) | |||||
| repo := ctx.Repo.Repository | repo := ctx.Repo.Repository | ||||
| tpl := tplCloudBrainBenchmarkNew | tpl := tplCloudBrainBenchmarkNew | ||||
| command := cloudbrain.Command | |||||
| command := cloudbrain.GetCloudbrainDebugCommand() | |||||
| tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, jobType, displayJobName) | tasks, err := models.GetCloudbrainsByDisplayJobName(repo.ID, jobType, displayJobName) | ||||
| if err == nil { | if err == nil { | ||||
| @@ -112,6 +112,7 @@ func getExcelHeader(ctx *context.Context) map[string]string { | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.RecommendImage")) | excelHeader = append(excelHeader, ctx.Tr("user.static.RecommendImage")) | ||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.email")) | excelHeader = append(excelHeader, ctx.Tr("user.static.email")) | ||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.phone")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.location")) | excelHeader = append(excelHeader, ctx.Tr("user.static.location")) | ||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.registdate")) | excelHeader = append(excelHeader, ctx.Tr("user.static.registdate")) | ||||
| @@ -193,6 +194,9 @@ func writeExcel(row int, xlsx *excelize.File, sheetName string, userRecord *mode | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Email) | xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Email) | ||||
| tmp = tmp + 1 | tmp = tmp + 1 | ||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Phone) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.UserLocation) | xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.UserLocation) | ||||
| tmp = tmp + 1 | tmp = tmp + 1 | ||||
| @@ -268,6 +272,9 @@ func writeExcelPage(row int, xlsx *excelize.File, sheetName string, userRecord * | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Email) | xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Email) | ||||
| tmp = tmp + 1 | tmp = tmp + 1 | ||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Phone) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.UserLocation) | xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.UserLocation) | ||||
| tmp = tmp + 1 | tmp = tmp + 1 | ||||
| @@ -601,7 +608,7 @@ func QueryUserStaticDataPage(ctx *context.Context) { | |||||
| filename := sheetName + "_" + startDate + "_" + endDate + ".xlsx" | filename := sheetName + "_" + startDate + "_" + endDate + ".xlsx" | ||||
| os.Remove(setting.AppDataPath + Excel_File_Path + filename) | os.Remove(setting.AppDataPath + Excel_File_Path + filename) | ||||
| go writeFileToDisk(ctx, count, re, filename) | go writeFileToDisk(ctx, count, re, filename) | ||||
| ctx.JSON(http.StatusOK, ctx.Tr("user.static.downloadinfo")+setting.AppURL+"api/v1/download_user_define_file?filename="+filename) | |||||
| ctx.JSON(http.StatusOK, ctx.Tr("user.static.downloadinfo")+"/api/v1/download_user_define_file?filename="+filename) | |||||
| } else { | } else { | ||||
| mapInterface := make(map[string]interface{}) | mapInterface := make(map[string]interface{}) | ||||
| re, count := models.QueryUserStaticDataPage(pageOpts) | re, count := models.QueryUserStaticDataPage(pageOpts) | ||||
| @@ -721,3 +728,114 @@ func TimingCountData() { | |||||
| startTime := currentTimeNow.AddDate(0, 0, -1).Format("2006-01-02") | startTime := currentTimeNow.AddDate(0, 0, -1).Format("2006-01-02") | ||||
| TimingCountDataByDateAndReCount(startTime, false) | TimingCountDataByDateAndReCount(startTime, false) | ||||
| } | } | ||||
| func QueryUserActivity(ctx *context.Context) { | |||||
| startDate := ctx.Query("beginTime") | |||||
| endDate := ctx.Query("endTime") | |||||
| t, _ := time.Parse("2006-01-02", startDate) | |||||
| startTime := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) | |||||
| startTime = startTime.UTC() | |||||
| t, _ = time.Parse("2006-01-02", endDate) | |||||
| endTime := time.Date(t.Year(), t.Month(), t.Day(), 23, 59, 59, 0, t.Location()) | |||||
| endTime = endTime.UTC() | |||||
| sheetName := ctx.Tr("user.static.sheetname") | |||||
| filename := sheetName + "_" + startDate + "_" + endDate + ".xlsx" | |||||
| filePath := setting.AppDataPath + Excel_File_Path + filename | |||||
| os.Remove(setting.AppDataPath + Excel_File_Path + filename) | |||||
| go writeUserActivityToExcel(startTime, endTime, filePath, ctx) | |||||
| ctx.JSON(http.StatusOK, ctx.Tr("user.static.downloadinfo")+"/api/v1/download_user_define_file?filename="+filename) | |||||
| } | |||||
| func writeUserActivityToExcel(startTime time.Time, endTime time.Time, filePath string, ctx *context.Context) { | |||||
| re := models.QueryDataForActivity(startTime, endTime) | |||||
| log.Info("return count=" + fmt.Sprint(len(re))) | |||||
| //writer exec file. | |||||
| xlsx := excelize.NewFile() | |||||
| sheetName := ctx.Tr("user.static.sheetname") | |||||
| index := xlsx.NewSheet(sheetName) | |||||
| xlsx.DeleteSheet("Sheet1") | |||||
| excelHeader := make([]string, 0) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.id")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.name")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.codemergecount")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.commitcount")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.issuecount")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.commentcount")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.watchedcount")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.commitcodesize")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.solveissuecount")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.CommitDatasetNum")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.CommitModelCount")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.email")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.phone")) | |||||
| excelHeader = append(excelHeader, ctx.Tr("user.static.registdate")) | |||||
| excelHeaderMap := make(map[string]string, 0) | |||||
| var j byte | |||||
| j = 0 | |||||
| for _, value := range excelHeader { | |||||
| excelColumn := getColumn(j) + fmt.Sprint(1) | |||||
| log.Info("excelColumn=" + excelColumn) | |||||
| excelHeaderMap[excelColumn] = value | |||||
| j++ | |||||
| } | |||||
| for k, v := range excelHeaderMap { | |||||
| //设置单元格的值 | |||||
| xlsx.SetCellValue(sheetName, k, v) | |||||
| } | |||||
| for i, userRecord := range re { | |||||
| row := i + 2 | |||||
| rows := fmt.Sprint(row) | |||||
| var tmp byte | |||||
| tmp = 0 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.ID) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Name) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.CodeMergeCount) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.CommitCount) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.IssueCount) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.CommentCount) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.WatchedCount) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.CommitCodeSize) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.SolveIssueCount) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.CommitDatasetNum) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.CommitModelCount) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Email) | |||||
| tmp = tmp + 1 | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, userRecord.Phone) | |||||
| tmp = tmp + 1 | |||||
| formatTime := userRecord.RegistDate.Format("2006-01-02 15:04:05") | |||||
| xlsx.SetCellValue(sheetName, getColumn(tmp)+rows, formatTime[0:len(formatTime)-3]) | |||||
| tmp = tmp + 1 | |||||
| } | |||||
| //设置默认打开的表单 | |||||
| xlsx.SetActiveSheet(index) | |||||
| os.Mkdir(setting.AppDataPath+Excel_File_Path, 0755) | |||||
| if err := xlsx.SaveAs(filePath); err != nil { | |||||
| log.Info("writer exel error." + err.Error()) | |||||
| } else { | |||||
| log.Info("write to file succeed, filepath=" + filePath) | |||||
| } | |||||
| } | |||||
| @@ -480,8 +480,13 @@ | |||||
| </div> | </div> | ||||
| <div id="dir_list{{.VersionName}}"> | <div id="dir_list{{.VersionName}}"> | ||||
| </div> | </div> | ||||
| {{if eq .ComputeResource "CPU/GPU"}} | |||||
| <div style="display:flex;align-items: center;justify-content: end;color: #f2711c;"> | |||||
| <i class="ri-error-warning-line" style="margin-right:0.5rem;"></i> | |||||
| <span>{{$.i18n.Tr "repo.file_limit_100"}}</span> | |||||
| </div> | |||||
| {{end}} | |||||
| </div> | </div> | ||||
| </div> | </div> | ||||
| @@ -488,7 +488,7 @@ | |||||
| <div class="ui tab" data-tab="second{{$k}}"> | <div class="ui tab" data-tab="second{{$k}}"> | ||||
| <div> | <div> | ||||
| <a id="{{.VersionName}}-log-down" | <a id="{{.VersionName}}-log-down" | ||||
| class='{{if and (.CanModify) (eq .Status "KILLED" "FAILED" "START_FAILED" "STOPPED" "COMPLETED") }}ti-download-file{{else}}disabled{{end}}' | |||||
| class='{{if and ($.CanLogDownload) (eq .Status "KILLED" "FAILED" "START_FAILED" "STOPPED" "COMPLETED") }}ti-download-file{{else}}disabled{{end}}' | |||||
| href="{{$.RepoLink}}/modelarts/train-job/{{.JobID}}/download_log_file?version_name={{.VersionName}}"> | href="{{$.RepoLink}}/modelarts/train-job/{{.JobID}}/download_log_file?version_name={{.VersionName}}"> | ||||
| <i class="ri-download-cloud-2-line"></i> | <i class="ri-download-cloud-2-line"></i> | ||||
| <span style="margin-left: 0.3rem;">{{$.i18n.Tr "repo.modelarts.download_log"}}</span> | <span style="margin-left: 0.3rem;">{{$.i18n.Tr "repo.modelarts.download_log"}}</span> | ||||
| @@ -446,24 +446,6 @@ | |||||
| ] | ] | ||||
| }, | }, | ||||
| work_server_number: { | |||||
| identifier : 'work_server_number', | |||||
| rules: [ | |||||
| { | |||||
| type : 'integer[1..25]', | |||||
| prompt : '计算节点需要在1-25之间,请您键入正确的值' | |||||
| } | |||||
| ] | |||||
| }, | |||||
| run_para_list:{ | |||||
| identifier : 'run_para_list', | |||||
| rules: [ | |||||
| { | |||||
| type: 'maxLength[255]', | |||||
| prompt : '所有字符最长不超过255个字符。' | |||||
| } | |||||
| ] | |||||
| }, | |||||
| }, | }, | ||||
| }) | }) | ||||
| @@ -512,24 +494,6 @@ | |||||
| ] | ] | ||||
| }, | }, | ||||
| work_server_number: { | |||||
| identifier : 'work_server_number', | |||||
| rules: [ | |||||
| { | |||||
| type : 'integer[1..25]', | |||||
| prompt : '计算节点需要在1-25之间,请您键入正确的值' | |||||
| } | |||||
| ] | |||||
| }, | |||||
| run_para_list:{ | |||||
| identifier : 'run_para_list', | |||||
| rules: [ | |||||
| { | |||||
| type: 'maxLength[255]', | |||||
| prompt : '所有字符最长不超过255个字符。' | |||||
| } | |||||
| ] | |||||
| }, | |||||
| }, | }, | ||||
| onSuccess: function(){ | onSuccess: function(){ | ||||
| // $('.ui.page.dimmer').dimmer('show') | // $('.ui.page.dimmer').dimmer('show') | ||||
| @@ -226,8 +226,11 @@ | |||||
| width="120px" | width="120px" | ||||
| align="center"> | align="center"> | ||||
| </el-table-column> | </el-table-column> | ||||
| <el-table-column prop="BindPhone" label="是否手机验证" width="120px" align="center"> | |||||
| <template slot-scope="scope"> {{scope.row.BindPhone ? '是' : '否'}} </template> | |||||
| <el-table-column | |||||
| prop="Phone" | |||||
| label="手机" | |||||
| width="120px" | |||||
| align="center"> | |||||
| </el-table-column> | </el-table-column> | ||||
| <el-table-column | <el-table-column | ||||
| prop="RegistDate" | prop="RegistDate" | ||||