| @@ -1861,6 +1861,7 @@ func CreateCloudbrain(cloudbrain *Cloudbrain) (err error) { | |||||
| session.Commit() | session.Commit() | ||||
| go IncreaseDatasetUseCount(cloudbrain.Uuid) | go IncreaseDatasetUseCount(cloudbrain.Uuid) | ||||
| go ResetRepoAITaskNum(cloudbrain.RepoID) | |||||
| return nil | return nil | ||||
| } | } | ||||
| @@ -2010,9 +2011,29 @@ func DeleteJob(job *Cloudbrain) error { | |||||
| func deleteJob(e Engine, job *Cloudbrain) error { | func deleteJob(e Engine, job *Cloudbrain) error { | ||||
| _, err := e.ID(job.ID).Delete(job) | _, err := e.ID(job.ID).Delete(job) | ||||
| if err == nil { | |||||
| go updateAITaskNumWhenDeleteJob(job) | |||||
| } | |||||
| return err | return err | ||||
| } | } | ||||
| func updateAITaskNumWhenDeleteJob(job *Cloudbrain) { | |||||
| repoId := job.RepoID | |||||
| if repoId == 0 { | |||||
| t := &Cloudbrain{} | |||||
| _, tempErr := x.ID(job.ID).Unscoped().Get(t) | |||||
| if tempErr != nil { | |||||
| log.Error("updateAITaskNumWhenDeleteJob error.%v", tempErr) | |||||
| return | |||||
| } | |||||
| repoId = t.RepoID | |||||
| } | |||||
| if repoId > 0 { | |||||
| ResetRepoAITaskNum(repoId) | |||||
| } | |||||
| } | |||||
| func GetCloudbrainByName(jobName string) (*Cloudbrain, error) { | func GetCloudbrainByName(jobName string) (*Cloudbrain, error) { | ||||
| cb := &Cloudbrain{JobName: jobName} | cb := &Cloudbrain{JobName: jobName} | ||||
| return getRepoCloudBrain(cb) | return getRepoCloudBrain(cb) | ||||
| @@ -2138,7 +2159,7 @@ func RestartCloudbrain(old *Cloudbrain, new *Cloudbrain) (err error) { | |||||
| } | } | ||||
| go IncreaseDatasetUseCount(new.Uuid) | go IncreaseDatasetUseCount(new.Uuid) | ||||
| go ResetRepoAITaskNum(new.RepoID) | |||||
| return nil | return nil | ||||
| } | } | ||||
| func CloudbrainAll(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { | func CloudbrainAll(opts *CloudbrainsOptions) ([]*CloudbrainInfo, int64, error) { | ||||
| @@ -2429,6 +2429,75 @@ func CheckRepoStats(ctx context.Context) error { | |||||
| } | } | ||||
| } | } | ||||
| // ***** END: Repository.NumForks ***** | // ***** END: Repository.NumForks ***** | ||||
| // ***** START: Repository.DatasetCnt ***** | |||||
| desc = "repository count 'dataset_cnt'" | |||||
| results, err = x.Query("SELECT repository.id FROM `repository` WHERE repository.dataset_cnt!=(select count(1) from attachment inner join dataset on attachment.dataset_id = dataset.id where dataset.repo_id = repository.id)") | |||||
| if err != nil { | |||||
| log.Error("Select %s: %v", desc, err) | |||||
| } else { | |||||
| for _, result := range results { | |||||
| id := com.StrTo(result["id"]).MustInt64() | |||||
| select { | |||||
| case <-ctx.Done(): | |||||
| log.Warn("CheckRepoStats: Cancelled") | |||||
| return ErrCancelledf("during %s for repo ID %d", desc, id) | |||||
| default: | |||||
| } | |||||
| log.Trace("Updating %s: %d", desc, id) | |||||
| err = ResetRepoDatasetNum(id) | |||||
| if err != nil { | |||||
| log.Error("Update %s[%d]: %v", desc, id, err) | |||||
| } | |||||
| } | |||||
| } | |||||
| // ***** END: Repository.DatasetCnt ***** | |||||
| // ***** START: Repository.ModelCnt ***** | |||||
| desc = "repository count 'model_cnt'" | |||||
| results, err = x.Query("SELECT repository.id FROM `repository` WHERE repository.model_cnt!=(select count(1) from ai_model_manage where repository.id = ai_model_manage.repo_id and ai_model_manage.size > 0 )") | |||||
| if err != nil { | |||||
| log.Error("Select %s: %v", desc, err) | |||||
| } else { | |||||
| for _, result := range results { | |||||
| id := com.StrTo(result["id"]).MustInt64() | |||||
| select { | |||||
| case <-ctx.Done(): | |||||
| log.Warn("CheckRepoStats: Cancelled") | |||||
| return ErrCancelledf("during %s for repo ID %d", desc, id) | |||||
| default: | |||||
| } | |||||
| log.Trace("Updating %s: %d", desc, id) | |||||
| err = ResetRepoModelNum(id) | |||||
| if err != nil { | |||||
| log.Error("Update %s[%d]: %v", desc, id, err) | |||||
| } | |||||
| } | |||||
| } | |||||
| // ***** END: Repository.ModelCnt ***** | |||||
| // ***** START: Repository.AiTaskCnt ***** | |||||
| desc = "repository count 'ai_task_cnt'" | |||||
| results, err = x.Query("SELECT repository.id FROM `repository` WHERE repository.ai_task_cnt!=(select count(1) from cloudbrain where repository.id = cloudbrain.repo_id and (cloudbrain.deleted_at is null or cloudbrain.deleted_at = '0001-01-01 00:00:00') )") | |||||
| if err != nil { | |||||
| log.Error("Select %s: %v", desc, err) | |||||
| } else { | |||||
| for _, result := range results { | |||||
| id := com.StrTo(result["id"]).MustInt64() | |||||
| select { | |||||
| case <-ctx.Done(): | |||||
| log.Warn("CheckRepoStats: Cancelled") | |||||
| return ErrCancelledf("during %s for repo ID %d", desc, id) | |||||
| default: | |||||
| } | |||||
| log.Trace("Updating %s: %d", desc, id) | |||||
| err = ResetRepoAITaskNum(id) | |||||
| if err != nil { | |||||
| log.Error("Update %s[%d]: %v", desc, id, err) | |||||
| } | |||||
| } | |||||
| } | |||||
| // ***** END: Repository.AiTaskCnt ***** | |||||
| return nil | return nil | ||||
| } | } | ||||
| @@ -2825,3 +2894,39 @@ func ReadLatestFileInRepo(userName, repoName, refName, treePath string) (*RepoFi | |||||
| } | } | ||||
| return &RepoFile{CommitId: commitId, Content: d}, nil | return &RepoFile{CommitId: commitId, Content: d}, nil | ||||
| } | } | ||||
| func ResetRepoAITaskNum(repoId int64) error { | |||||
| n, err := x.Where("repo_id = ? ", repoId).Count(&Cloudbrain{}) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| r := Repository{ | |||||
| AiTaskCnt: n, | |||||
| } | |||||
| _, err = x.Cols("ai_task_cnt").Where("id = ?", repoId).Update(&r) | |||||
| return err | |||||
| } | |||||
| func ResetRepoDatasetNum(repoId int64) error { | |||||
| n, err := x.Table("attachment").Join("inner", "dataset", "attachment.dataset_id = dataset.id").Where("dataset.repo_id = ?", repoId).Count() | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| r := Repository{ | |||||
| DatasetCnt: n, | |||||
| } | |||||
| _, err = x.Cols("dataset_cnt").Where("id = ?", repoId).Update(&r) | |||||
| return err | |||||
| } | |||||
| func ResetRepoModelNum(repoId int64) error { | |||||
| n, err := x.Where("repo_id = ? and size > 0 ", repoId).Count(&AiModelManage{}) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| r := Repository{ | |||||
| ModelCnt: n, | |||||
| } | |||||
| _, err = x.Cols("model_cnt").Where("id = ?", repoId).Update(&r) | |||||
| return err | |||||
| } | |||||
| @@ -2,6 +2,7 @@ package repo | |||||
| import ( | import ( | ||||
| "archive/zip" | "archive/zip" | ||||
| "code.gitea.io/gitea/services/repository" | |||||
| "encoding/json" | "encoding/json" | ||||
| "errors" | "errors" | ||||
| "fmt" | "fmt" | ||||
| @@ -120,6 +121,9 @@ func saveModelByParameters(jobId string, versionName string, name string, versio | |||||
| if err != nil { | if err != nil { | ||||
| return "", err | return "", err | ||||
| } | } | ||||
| if modelSize > 0 { | |||||
| go repository.ResetRepoModelNum(aiTask.RepoID) | |||||
| } | |||||
| if len(lastNewModelId) > 0 { | if len(lastNewModelId) > 0 { | ||||
| //udpate status and version count | //udpate status and version count | ||||
| models.ModifyModelNewProperty(lastNewModelId, MODEL_NOT_LATEST, 0) | models.ModifyModelNewProperty(lastNewModelId, MODEL_NOT_LATEST, 0) | ||||
| @@ -305,13 +309,14 @@ func getSize(files []storage.FileInfo) int64 { | |||||
| func UpdateModelSize(modeluuid string) { | func UpdateModelSize(modeluuid string) { | ||||
| model, err := models.QueryModelById(modeluuid) | model, err := models.QueryModelById(modeluuid) | ||||
| if err == nil { | if err == nil { | ||||
| var size int64 | |||||
| if model.Type == models.TypeCloudBrainOne { | if model.Type == models.TypeCloudBrainOne { | ||||
| if strings.HasPrefix(model.Path, setting.Attachment.Minio.Bucket+"/"+Model_prefix) { | if strings.HasPrefix(model.Path, setting.Attachment.Minio.Bucket+"/"+Model_prefix) { | ||||
| files, err := storage.GetAllObjectByBucketAndPrefixMinio(setting.Attachment.Minio.Bucket, model.Path[len(setting.Attachment.Minio.Bucket)+1:]) | files, err := storage.GetAllObjectByBucketAndPrefixMinio(setting.Attachment.Minio.Bucket, model.Path[len(setting.Attachment.Minio.Bucket)+1:]) | ||||
| if err != nil { | if err != nil { | ||||
| log.Info("Failed to query model size from minio. id=" + modeluuid) | log.Info("Failed to query model size from minio. id=" + modeluuid) | ||||
| } | } | ||||
| size := getSize(files) | |||||
| size = getSize(files) | |||||
| models.ModifyModelSize(modeluuid, size) | models.ModifyModelSize(modeluuid, size) | ||||
| } | } | ||||
| } else if model.Type == models.TypeCloudBrainTwo { | } else if model.Type == models.TypeCloudBrainTwo { | ||||
| @@ -320,10 +325,13 @@ func UpdateModelSize(modeluuid string) { | |||||
| if err != nil { | if err != nil { | ||||
| log.Info("Failed to query model size from obs. id=" + modeluuid) | log.Info("Failed to query model size from obs. id=" + modeluuid) | ||||
| } | } | ||||
| size := getSize(files) | |||||
| size = getSize(files) | |||||
| models.ModifyModelSize(modeluuid, size) | models.ModifyModelSize(modeluuid, size) | ||||
| } | } | ||||
| } | } | ||||
| if model.Size == 0 && size > 0 { | |||||
| repository.ResetRepoModelNum(model.RepoId) | |||||
| } | |||||
| } else { | } else { | ||||
| log.Info("not found model,uuid=" + modeluuid) | log.Info("not found model,uuid=" + modeluuid) | ||||
| } | } | ||||
| @@ -438,13 +446,14 @@ func DeleteModelFile(ctx *context.Context) { | |||||
| fileName := ctx.Query("fileName") | fileName := ctx.Query("fileName") | ||||
| model, err := models.QueryModelById(id) | model, err := models.QueryModelById(id) | ||||
| if err == nil { | if err == nil { | ||||
| var totalSize int64 | |||||
| if model.ModelType == MODEL_LOCAL_TYPE { | if model.ModelType == MODEL_LOCAL_TYPE { | ||||
| if model.Type == models.TypeCloudBrainOne { | if model.Type == models.TypeCloudBrainOne { | ||||
| bucketName := setting.Attachment.Minio.Bucket | bucketName := setting.Attachment.Minio.Bucket | ||||
| objectName := model.Path[len(bucketName)+1:] + fileName | objectName := model.Path[len(bucketName)+1:] + fileName | ||||
| log.Info("delete bucket=" + bucketName + " path=" + objectName) | log.Info("delete bucket=" + bucketName + " path=" + objectName) | ||||
| if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { | if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { | ||||
| totalSize := storage.MinioGetFilesSize(bucketName, []string{objectName}) | |||||
| totalSize = storage.MinioGetFilesSize(bucketName, []string{objectName}) | |||||
| err := storage.Attachments.DeleteDir(objectName) | err := storage.Attachments.DeleteDir(objectName) | ||||
| if err != nil { | if err != nil { | ||||
| log.Info("Failed to delete model. id=" + id) | log.Info("Failed to delete model. id=" + id) | ||||
| @@ -464,7 +473,7 @@ func DeleteModelFile(ctx *context.Context) { | |||||
| objectName := model.Path[len(setting.Bucket)+1:] + fileName | objectName := model.Path[len(setting.Bucket)+1:] + fileName | ||||
| log.Info("delete bucket=" + setting.Bucket + " path=" + objectName) | log.Info("delete bucket=" + setting.Bucket + " path=" + objectName) | ||||
| if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { | if strings.HasPrefix(model.Path, bucketName+"/"+Model_prefix) { | ||||
| totalSize := storage.ObsGetFilesSize(bucketName, []string{objectName}) | |||||
| totalSize = storage.ObsGetFilesSize(bucketName, []string{objectName}) | |||||
| err := storage.ObsRemoveObject(bucketName, objectName) | err := storage.ObsRemoveObject(bucketName, objectName) | ||||
| if err != nil { | if err != nil { | ||||
| log.Info("Failed to delete model. id=" + id) | log.Info("Failed to delete model. id=" + id) | ||||
| @@ -481,6 +490,9 @@ func DeleteModelFile(ctx *context.Context) { | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| if (model.Size - totalSize) <= 0 { | |||||
| repository.ResetRepoModelNum(model.RepoId) | |||||
| } | |||||
| } | } | ||||
| ctx.JSON(200, map[string]string{ | ctx.JSON(200, map[string]string{ | ||||
| "code": "0", | "code": "0", | ||||
| @@ -549,6 +561,9 @@ func deleteModelByID(ctx *context.Context, id string) error { | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| if model.Size > 0 { | |||||
| go repository.ResetRepoModelNum(model.RepoId) | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| return err | return err | ||||
| @@ -29,6 +29,7 @@ import ( | |||||
| "code.gitea.io/gitea/modules/storage" | "code.gitea.io/gitea/modules/storage" | ||||
| "code.gitea.io/gitea/modules/upload" | "code.gitea.io/gitea/modules/upload" | ||||
| "code.gitea.io/gitea/modules/worker" | "code.gitea.io/gitea/modules/worker" | ||||
| repo_service "code.gitea.io/gitea/services/repository" | |||||
| gouuid "github.com/satori/go.uuid" | gouuid "github.com/satori/go.uuid" | ||||
| ) | ) | ||||
| @@ -180,6 +181,7 @@ func DeleteAttachment(ctx *context.Context) { | |||||
| ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err)) | ctx.Error(500, fmt.Sprintf("DeleteAttachment: %v", err)) | ||||
| return | return | ||||
| } | } | ||||
| go repo_service.ResetRepoDatasetNumByDatasetId(attach.DatasetID) | |||||
| attachjson, _ := json.Marshal(attach) | attachjson, _ := json.Marshal(attach) | ||||
| labelmsg.SendDeleteAttachToLabelSys(string(attachjson)) | labelmsg.SendDeleteAttachToLabelSys(string(attachjson)) | ||||
| @@ -894,6 +896,7 @@ func CompleteMultipart(ctx *context.Context) { | |||||
| return | return | ||||
| } | } | ||||
| attachment.UpdateDatasetUpdateUnix() | attachment.UpdateDatasetUpdateUnix() | ||||
| go repo_service.ResetRepoDatasetNumByDatasetId(dataset.ID) | |||||
| repository, _ := models.GetRepositoryByID(dataset.RepoID) | repository, _ := models.GetRepositoryByID(dataset.RepoID) | ||||
| notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment) | notification.NotifyOtherTask(ctx.User, repository, fmt.Sprint(repository.IsPrivate, attachment.IsPrivate), attachment.Name, models.ActionUploadAttachment) | ||||
| if attachment.DatasetID != 0 { | if attachment.DatasetID != 0 { | ||||
| @@ -5,18 +5,17 @@ | |||||
| package repository | package repository | ||||
| import ( | import ( | ||||
| "fmt" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "os" | |||||
| "strings" | |||||
| "code.gitea.io/gitea/models" | "code.gitea.io/gitea/models" | ||||
| "code.gitea.io/gitea/modules/log" | "code.gitea.io/gitea/modules/log" | ||||
| "code.gitea.io/gitea/modules/notification" | "code.gitea.io/gitea/modules/notification" | ||||
| repo_module "code.gitea.io/gitea/modules/repository" | repo_module "code.gitea.io/gitea/modules/repository" | ||||
| "code.gitea.io/gitea/modules/setting" | "code.gitea.io/gitea/modules/setting" | ||||
| pull_service "code.gitea.io/gitea/services/pull" | pull_service "code.gitea.io/gitea/services/pull" | ||||
| "fmt" | |||||
| "io/ioutil" | |||||
| "net/http" | |||||
| "os" | |||||
| "strings" | |||||
| ) | ) | ||||
| const SHELL_FLAG_ON = 1 | const SHELL_FLAG_ON = 1 | ||||
| @@ -332,3 +331,19 @@ func IsUploadFileInvalidErr(err error) bool { | |||||
| _, ok := err.(UploadFileInvalidErr) | _, ok := err.(UploadFileInvalidErr) | ||||
| return ok | return ok | ||||
| } | } | ||||
| func ResetRepoDatasetNumByDatasetId(datasetID int64) error { | |||||
| dataset, err := models.GetDatasetByID(datasetID) | |||||
| if err != nil { | |||||
| return err | |||||
| } | |||||
| return models.ResetRepoDatasetNum(dataset.RepoID) | |||||
| } | |||||
| func ResetRepoModelNum(repoId int64) error { | |||||
| return models.ResetRepoModelNum(repoId) | |||||
| } | |||||
| func ResetRepoAITaskNum(repoId int64) error { | |||||
| return models.ResetRepoAITaskNum(repoId) | |||||
| } | |||||