| @@ -14,7 +14,17 @@ create table Node ( | |||
| LastReportTime timestamp comment '节点上次上报时间' | |||
| ) comment = '节点表'; | |||
| insert into Node (NodeID, Name, LocalIP, ExternalIP, LocationID, State) values (0, "LocalNode", "localhost", "localhost", 0, 1); | |||
| insert into | |||
| Node ( | |||
| NodeID, | |||
| Name, | |||
| LocalIP, | |||
| ExternalIP, | |||
| LocationID, | |||
| State | |||
| ) | |||
| values | |||
| (0, "LocalNode", "localhost", "localhost", 0, 1); | |||
| create table Storage ( | |||
| StorageID int not null auto_increment primary key comment '存储服务ID', | |||
| @@ -24,7 +34,10 @@ create table Storage ( | |||
| State varchar(100) comment '状态' | |||
| ) comment = "存储服务表"; | |||
| insert into Storage (StorageID, Name, NodeID, Directory, State) values (1, "HuaWei-Cloud", 1,"/" ,"Online"); | |||
| insert into | |||
| Storage (StorageID, Name, NodeID, Directory, State) | |||
| values | |||
| (1, "HuaWei-Cloud", 1, "/", "Online"); | |||
| create table NodeDelay ( | |||
| SourceNodeID int not null comment '发起检测的节点ID', | |||
| @@ -43,14 +56,22 @@ create table UserBucket ( | |||
| BucketID int not null comment '用户可访问的桶ID', | |||
| primary key(UserID, BucketID) | |||
| ) comment = '用户桶权限表'; | |||
| insert into UserBucket (UserID, BucketID) values (0, 1); | |||
| insert into | |||
| UserBucket (UserID, BucketID) | |||
| values | |||
| (0, 1); | |||
| create table UserNode ( | |||
| UserID int not null comment '用户ID', | |||
| NodeID int not null comment '用户可使用的节点ID', | |||
| primary key(UserID, NodeID) | |||
| ) comment = '用户节点权限表'; | |||
| insert into UserNode (UserID, NodeID) values (0, 1); | |||
| insert into | |||
| UserNode (UserID, NodeID) | |||
| values | |||
| (0, 1); | |||
| create table UserStorage ( | |||
| UserID int not null comment "用户ID", | |||
| @@ -58,7 +79,10 @@ create table UserStorage ( | |||
| primary key(UserID, StorageID) | |||
| ); | |||
| insert into UserStorage (UserID, StorageID) values (0, 1); | |||
| insert into | |||
| UserStorage (UserID, StorageID) | |||
| values | |||
| (0, 1); | |||
| create table Bucket ( | |||
| BucketID int not null auto_increment primary key comment '桶ID', | |||
| @@ -66,29 +90,37 @@ create table Bucket ( | |||
| CreatorID int not null comment '创建者ID' | |||
| ) comment = '桶表'; | |||
| insert into Bucket (BucketID, Name, CreatorID) values (0, "bucket01", 0); | |||
| insert into | |||
| Bucket (BucketID, Name, CreatorID) | |||
| values | |||
| (0, "bucket01", 0); | |||
| create table Object ( | |||
| ObjectID int not null auto_increment primary key comment '对象ID', | |||
| create table Package ( | |||
| PackageID int not null auto_increment primary key comment '包ID', | |||
| Name varchar(100) not null comment '对象名', | |||
| BucketID int not null comment '桶ID', | |||
| State varchar(100) not null comment '对象状态', | |||
| FileSize bigint not null comment '对象大小(Byte)', | |||
| Redundancy varchar(100) not null comment '对象冗余策略' | |||
| DirName varchar(100) not null comment '对象所属文件夹' | |||
| State varchar(100) not null comment '状态', | |||
| Redundancy JSON not null comment '冗余策略' | |||
| ); | |||
| create table Object ( | |||
| ObjectID int not null auto_increment primary key comment '对象ID', | |||
| PackageID int not null comment '包ID', | |||
| Path varchar(1000) not null comment '对象路径', | |||
| Size bigint not null comment '对象大小(Byte)', | |||
| UNIQUE KEY PackagePath (PackageID, Path) | |||
| ) comment = '对象表'; | |||
| create table ObjectRep ( | |||
| ObjectID int not null primary key comment '对象ID', | |||
| RepCount int not null comment '对象的副本数', | |||
| FileHash varchar(100) not null comment '副本哈希值' | |||
| ) comment = '对象副本表'; | |||
| create table ObjectBlock ( | |||
| BlockID int not null auto_increment primary key comment '编码块块ID', | |||
| ObjectID int not null comment '对象ID', | |||
| InnerID int not null comment '编码块在条带内的排序', | |||
| BlockHash varchar(100) not null comment '编码块哈希值' | |||
| Index int not null comment '编码块在条带内的排序', | |||
| FileHash varchar(100) not null comment '编码块哈希值', | |||
| primary key(ObjectID, Index) | |||
| ) comment = '对象编码块表'; | |||
| create table Cache ( | |||
| @@ -100,19 +132,23 @@ create table Cache ( | |||
| primary key(FileHash, NodeID) | |||
| ) comment = '缓存表'; | |||
| create table StorageObject ( | |||
| ObjectID int not null comment '对象ID', | |||
| create table StoragePackage ( | |||
| PackageID int not null comment '包ID', | |||
| StorageID int not null comment '存储服务ID', | |||
| UserID int not null comment '调度了此文件的用户ID', | |||
| State varchar(100) not null comment '对象状态', | |||
| primary key(ObjectID, StorageID, UserID) | |||
| State varchar(100) not null comment '包状态', | |||
| primary key(PackageID, StorageID, UserID) | |||
| ); | |||
| create table Location ( | |||
| LocationID int not null auto_increment primary key comment 'ID', | |||
| Name varchar(128) not null comment '名称' | |||
| ) comment = '地域表'; | |||
| insert into Location (LocationID, Name) values (1, "Local"); | |||
| insert into | |||
| Location (LocationID, Name) | |||
| values | |||
| (1, "Local"); | |||
| create table Ec ( | |||
| EcID int not null comment '纠删码ID', | |||
| @@ -121,5 +157,12 @@ create table Ec ( | |||
| EcN int not null comment 'ecN' | |||
| ) comment = '纠删码表'; | |||
| insert into Ec (EcID, Name, EcK, EcN) values (1, "rs_9_6", 6, 9); | |||
| insert into Ec (EcID, Name, EcK, EcN) values (2, "rs_5_3", 3, 5); | |||
| insert into | |||
| Ec (EcID, Name, EcK, EcN) | |||
| values | |||
| (1, "rs_9_6", 6, 9); | |||
| insert into | |||
| Ec (EcID, Name, EcK, EcN) | |||
| values | |||
| (2, "rs_5_3", 3, 5); | |||
| @@ -10,14 +10,14 @@ const ( | |||
| ) | |||
| const ( | |||
| ObjectStateNormal = "Normal" | |||
| ObjectStateDeleted = "Deleted" | |||
| PackageStateNormal = "Normal" | |||
| PackageStateDeleted = "Deleted" | |||
| ) | |||
| const ( | |||
| StorageObjectStateNormal = "Normal" | |||
| StorageObjectStateDeleted = "Deleted" | |||
| StorageObjectStateOutdated = "Outdated" | |||
| StoragePackageStateNormal = "Normal" | |||
| StoragePackageStateDeleted = "Deleted" | |||
| StoragePackageStateOutdated = "Outdated" | |||
| ) | |||
| const ( | |||
| @@ -15,27 +15,60 @@ require ( | |||
| require ( | |||
| github.com/antonfisher/nested-logrus-formatter v1.3.1 // indirect | |||
| github.com/benbjohnson/clock v1.3.0 // indirect | |||
| github.com/coreos/go-semver v0.3.0 // indirect | |||
| github.com/coreos/go-systemd/v22 v22.5.0 // indirect | |||
| github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect | |||
| github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect | |||
| github.com/gogo/protobuf v1.3.2 // indirect | |||
| github.com/golang/protobuf v1.5.3 // indirect | |||
| github.com/google/uuid v1.3.0 // indirect | |||
| github.com/gopherjs/gopherjs v1.17.2 // indirect | |||
| github.com/hashicorp/errwrap v1.1.0 // indirect | |||
| github.com/hashicorp/go-multierror v1.1.1 // indirect | |||
| github.com/ipfs/boxo v0.8.0 // indirect | |||
| github.com/ipfs/go-cid v0.4.0 // indirect | |||
| github.com/ipfs/go-ipfs-api v0.6.0 // indirect | |||
| github.com/json-iterator/go v1.1.12 // indirect | |||
| github.com/jtolds/gls v4.20.0+incompatible // indirect | |||
| github.com/klauspost/cpuid/v2 v2.2.3 // indirect | |||
| github.com/libp2p/go-buffer-pool v0.1.0 // indirect | |||
| github.com/libp2p/go-flow-metrics v0.1.0 // indirect | |||
| github.com/libp2p/go-libp2p v0.26.3 // indirect | |||
| github.com/minio/sha256-simd v1.0.0 // indirect | |||
| github.com/mitchellh/go-homedir v1.1.0 // indirect | |||
| github.com/mitchellh/mapstructure v1.5.0 // indirect | |||
| github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect | |||
| github.com/modern-go/reflect2 v1.0.2 // indirect | |||
| github.com/mr-tron/base58 v1.2.0 // indirect | |||
| github.com/multiformats/go-base32 v0.1.0 // indirect | |||
| github.com/multiformats/go-base36 v0.2.0 // indirect | |||
| github.com/multiformats/go-multiaddr v0.8.0 // indirect | |||
| github.com/multiformats/go-multibase v0.1.1 // indirect | |||
| github.com/multiformats/go-multicodec v0.8.1 // indirect | |||
| github.com/multiformats/go-multihash v0.2.1 // indirect | |||
| github.com/multiformats/go-multistream v0.4.1 // indirect | |||
| github.com/multiformats/go-varint v0.0.7 // indirect | |||
| github.com/sirupsen/logrus v1.9.2 // indirect | |||
| github.com/smartystreets/assertions v1.13.1 // indirect | |||
| github.com/spaolacci/murmur3 v1.1.0 // indirect | |||
| github.com/streadway/amqp v1.1.0 // indirect | |||
| github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c // indirect | |||
| github.com/zyedidia/generic v1.2.1 // indirect | |||
| go.etcd.io/etcd/api/v3 v3.5.9 // indirect | |||
| go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect | |||
| go.etcd.io/etcd/client/v3 v3.5.9 // indirect | |||
| go.uber.org/atomic v1.10.0 // indirect | |||
| go.uber.org/multierr v1.9.0 // indirect | |||
| go.uber.org/zap v1.24.0 // indirect | |||
| golang.org/x/crypto v0.6.0 // indirect | |||
| golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect | |||
| golang.org/x/net v0.8.0 // indirect | |||
| golang.org/x/sync v0.1.0 // indirect | |||
| golang.org/x/sys v0.6.0 // indirect | |||
| golang.org/x/text v0.8.0 // indirect | |||
| google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd // indirect | |||
| lukechampine.com/blake3 v1.1.7 // indirect | |||
| ) | |||
| go 1.20 | |||
| @@ -4,14 +4,29 @@ github.com/baohan10/reedsolomon v0.0.0-20230406042632-43574cac9fa7 h1:wcvD6enR// | |||
| github.com/baohan10/reedsolomon v0.0.0-20230406042632-43574cac9fa7/go.mod h1:rAxMF6pVaFK/s6T4gGczvloccNbtwzuYaP2Y7W6flE8= | |||
| github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw= | |||
| github.com/beevik/etree v1.2.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= | |||
| github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= | |||
| github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= | |||
| github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= | |||
| github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= | |||
| github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= | |||
| github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= | |||
| github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= | |||
| github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= | |||
| github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= | |||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||
| github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= | |||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |||
| github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= | |||
| github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= | |||
| github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= | |||
| github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw= | |||
| github.com/go-ping/ping v1.1.0/go.mod h1:xIFjORFzTxqIV/tDVGO4eDy/bLuSyawEeojSm3GfRGk= | |||
| github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= | |||
| github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= | |||
| github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= | |||
| github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= | |||
| github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= | |||
| github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= | |||
| github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= | |||
| github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= | |||
| github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= | |||
| @@ -28,24 +43,63 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY | |||
| github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= | |||
| github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= | |||
| github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= | |||
| github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs= | |||
| github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA= | |||
| github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA= | |||
| github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= | |||
| github.com/ipfs/go-ipfs-api v0.6.0 h1:JARgG0VTbjyVhO5ZfesnbXv9wTcMvoKRBLF1SzJqzmg= | |||
| github.com/ipfs/go-ipfs-api v0.6.0/go.mod h1:iDC2VMwN9LUpQV/GzEeZ2zNqd8NUdRmWcFM+K/6odf0= | |||
| github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= | |||
| github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= | |||
| github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= | |||
| github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= | |||
| github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= | |||
| github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= | |||
| github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= | |||
| github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= | |||
| github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= | |||
| github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= | |||
| github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= | |||
| github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= | |||
| github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= | |||
| github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= | |||
| github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= | |||
| github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= | |||
| github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= | |||
| github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= | |||
| github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM= | |||
| github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8= | |||
| github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= | |||
| github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= | |||
| github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= | |||
| github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= | |||
| github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= | |||
| github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= | |||
| github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= | |||
| github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= | |||
| github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= | |||
| github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= | |||
| github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= | |||
| github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= | |||
| github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= | |||
| github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= | |||
| github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= | |||
| github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= | |||
| github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= | |||
| github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= | |||
| github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU= | |||
| github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs= | |||
| github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI= | |||
| github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8= | |||
| github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8= | |||
| github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= | |||
| github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108= | |||
| github.com/multiformats/go-multihash v0.2.1/go.mod h1:WxoMcYG85AZVQUyRyo9s4wULvW5qrI9vb2Lt6evduFc= | |||
| github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= | |||
| github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= | |||
| github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= | |||
| github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= | |||
| github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= | |||
| github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | |||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | |||
| github.com/samber/lo v1.36.0 h1:4LaOxH1mHnbDGhTVE0i1z8v/lWaQW8AIfOD3HU4mSaw= | |||
| @@ -56,23 +110,59 @@ github.com/smartystreets/assertions v1.13.1 h1:Ef7KhSmjZcK6AVf9YbJdvPYG9avaF0Zxu | |||
| github.com/smartystreets/assertions v1.13.1/go.mod h1:cXr/IwVfSo/RbCSPhoAPv73p3hlSdrBH/b3SdnW/LMY= | |||
| github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL+JXWq3w= | |||
| github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg= | |||
| github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= | |||
| github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= | |||
| github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM= | |||
| github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg= | |||
| github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | |||
| github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= | |||
| github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | |||
| github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= | |||
| github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= | |||
| github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= | |||
| github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c h1:GGsyl0dZ2jJgVT+VvWBf/cNijrHRhkrTjkmp5wg7li0= | |||
| github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c/go.mod h1:xxcJeBb7SIUl/Wzkz1eVKJE/CB34YNrqX2TQI6jY9zs= | |||
| github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | |||
| github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | |||
| github.com/zyedidia/generic v1.2.1 h1:Zv5KS/N2m0XZZiuLS82qheRG4X1o5gsWreGb0hR7XDc= | |||
| github.com/zyedidia/generic v1.2.1/go.mod h1:ly2RBz4mnz1yeuVbQA/VFwGjK3mnHGRj1JuoG336Bis= | |||
| go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= | |||
| go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= | |||
| go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= | |||
| go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= | |||
| go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= | |||
| go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= | |||
| go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= | |||
| go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= | |||
| go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= | |||
| go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= | |||
| go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= | |||
| go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= | |||
| go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= | |||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | |||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | |||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | |||
| golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= | |||
| golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= | |||
| golang.org/x/exp v0.0.0-20230519143937-03e91628a987 h1:3xJIFvzUFbu4ls0BTBYcgbCGhA63eAOEMxIHugyXJqA= | |||
| golang.org/x/exp v0.0.0-20230519143937-03e91628a987/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= | |||
| golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | |||
| golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | |||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | |||
| golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
| golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | |||
| golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= | |||
| golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= | |||
| golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= | |||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= | |||
| golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | |||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
| golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
| golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |||
| golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | |||
| @@ -80,11 +170,18 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc | |||
| golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= | |||
| golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | |||
| golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | |||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | |||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | |||
| golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= | |||
| golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= | |||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | |||
| golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |||
| golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= | |||
| golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | |||
| golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
| golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
| golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |||
| google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd h1:sLpv7bNL1AsX3fdnWh9WVh7ejIzXdOc1RRHGeAmeStU= | |||
| google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= | |||
| google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= | |||
| @@ -94,5 +191,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ | |||
| google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= | |||
| google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= | |||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | |||
| gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= | |||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |||
| gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | |||
| lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= | |||
| lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= | |||
| @@ -17,11 +17,11 @@ func NewRedundancyRepData(fileHash string) RepRedundancyData { | |||
| } | |||
| type ECRedundancyData struct { | |||
| Ec EC `json:"ec"` | |||
| Blocks []ObjectBlock `json:"blocks"` | |||
| Ec EC `json:"ec"` | |||
| Blocks []ObjectBlockData `json:"blocks"` | |||
| } | |||
| func NewRedundancyEcData(ec EC, blocks []ObjectBlock) ECRedundancyData { | |||
| func NewRedundancyEcData(ec EC, blocks []ObjectBlockData) ECRedundancyData { | |||
| return ECRedundancyData{ | |||
| Ec: ec, | |||
| Blocks: blocks, | |||
| @@ -35,15 +35,17 @@ type EC struct { | |||
| EcN int `json:"ecN"` | |||
| } | |||
| type ObjectBlock struct { | |||
| Index int `json:"index"` | |||
| FileHash string `json:"fileHash"` | |||
| type ObjectBlockData struct { | |||
| Index int `json:"index"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| func NewObjectBlock(index int, fileHash string) ObjectBlock { | |||
| return ObjectBlock{ | |||
| func NewObjectBlockData(index int, fileHash string, nodeIDs []int64) ObjectBlockData { | |||
| return ObjectBlockData{ | |||
| Index: index, | |||
| FileHash: fileHash, | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| } | |||
| @@ -55,3 +57,27 @@ func NewEc(id int, name string, ecK int, ecN int) EC { | |||
| EcN: ecN, | |||
| } | |||
| } | |||
| type ObjectRepData struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| func NewObjectRepData(objectID int64, fileHash string, nodeIDs []int64) ObjectRepData { | |||
| return ObjectRepData{ | |||
| ObjectID: objectID, | |||
| FileHash: fileHash, | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| } | |||
| type ObjectECData struct { | |||
| Blocks []ObjectBlockData `json:"blocks"` | |||
| } | |||
| func NewObjectECData(blocks []ObjectBlockData) ObjectECData { | |||
| return ObjectECData{ | |||
| Blocks: blocks, | |||
| } | |||
| } | |||
| @@ -0,0 +1,447 @@ | |||
| package cmd | |||
| import ( | |||
| "bytes" | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "os" | |||
| "path/filepath" | |||
| "sync" | |||
| "time" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| mygrpc "gitlink.org.cn/cloudream/storage-common/utils/grpc" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/ec" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/iterator" | |||
| agtmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/agent" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| agentcaller "gitlink.org.cn/cloudream/storage-common/pkgs/proto" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/credentials/insecure" | |||
| ) | |||
| type CreateECPackage struct { | |||
| userID int64 | |||
| bucketID int64 | |||
| name string | |||
| objectIter iterator.UploadingObjectIterator | |||
| redundancy models.ECRedundancyInfo | |||
| ecPacketSize int64 | |||
| uploadConfig UploadConfig | |||
| Result CreateECPackageResult | |||
| } | |||
| type CreateECPackageResult struct { | |||
| PackageID int64 | |||
| ObjectResults []ECObjectUploadResult | |||
| } | |||
| type ECObjectUploadResult struct { | |||
| Info *iterator.IterUploadingObject | |||
| Error error | |||
| ObjectID int64 | |||
| } | |||
| func NewCreateECPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy models.ECRedundancyInfo, ecPacketSize int64, uploadConfig UploadConfig) *CreateECPackage { | |||
| return &CreateECPackage{ | |||
| userID: userID, | |||
| bucketID: bucketID, | |||
| name: name, | |||
| objectIter: objIter, | |||
| redundancy: redundancy, | |||
| ecPacketSize: ecPacketSize, | |||
| uploadConfig: uploadConfig, | |||
| } | |||
| } | |||
| func (t *CreateECPackage) Execute(ctx TaskContext, complete CompleteFn) { | |||
| err := t.do(ctx) | |||
| t.objectIter.Close() | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| func (t *CreateECPackage) do(ctx TaskContext) error { | |||
| // TODO2 | |||
| /* | |||
| reqBlder := reqbuilder.NewBuilder() | |||
| for _, uploadObject := range t.Objects { | |||
| reqBlder.Metadata(). | |||
| // 用于防止创建了多个同名对象 | |||
| Object().CreateOne(t.bucketID, uploadObject.ObjectName) | |||
| } | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if t.uploadConfig.LocalNodeID != nil { | |||
| reqBlder.IPFS().CreateAnyRep(*t.uploadConfig.LocalNodeID) | |||
| } | |||
| mutex, err := reqBlder. | |||
| Metadata(). | |||
| // 用于判断用户是否有桶的权限 | |||
| UserBucket().ReadOne(t.userID, t.bucketID). | |||
| // 用于查询可用的上传节点 | |||
| Node().ReadAny(). | |||
| // 用于设置Rep配置 | |||
| ObjectRep().CreateAny(). | |||
| // 用于创建Cache记录 | |||
| Cache().CreateAny(). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| */ | |||
| createPkgResp, err := ctx.Coordinator().CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name, | |||
| models.NewTypedRedundancyInfo(models.RedundancyRep, t.redundancy))) | |||
| if err != nil { | |||
| return fmt.Errorf("creating package: %w", err) | |||
| } | |||
| getUserNodesResp, err := ctx.Coordinator().GetUserNodes(coormq.NewGetUserNodes(t.userID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := ctx.Coordinator().FindClientLocation(coormq.NewFindClientLocation(t.uploadConfig.ExternalIP)) | |||
| if err != nil { | |||
| return fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| uploadNodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| return UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| } | |||
| }) | |||
| getECResp, err := ctx.Coordinator().GetECConfig(coormq.NewGetECConfig(t.redundancy.ECName)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting ec: %w", err) | |||
| } | |||
| /* | |||
| TODO2 | |||
| // 防止上传的副本被清除 | |||
| mutex2, err := reqbuilder.NewBuilder(). | |||
| IPFS().CreateAnyRep(uploadNode.Node.NodeID). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex2.Unlock() | |||
| */ | |||
| rets, err := uploadAndUpdateECPackage(ctx, createPkgResp.PackageID, t.objectIter, uploadNodeInfos, getECResp.Config, t.ecPacketSize, t.uploadConfig) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| t.Result.PackageID = createPkgResp.PackageID | |||
| t.Result.ObjectResults = rets | |||
| return nil | |||
| } | |||
| func uploadAndUpdateECPackage(ctx TaskContext, packageID int64, objectIter iterator.UploadingObjectIterator, uploadNodes []UploadNodeInfo, ec model.Ec, ecPacketSize int64, uploadConfig UploadConfig) ([]ECObjectUploadResult, error) { | |||
| var uploadRets []ECObjectUploadResult | |||
| //上传文件夹 | |||
| var adds []coormq.AddECObjectInfo | |||
| for { | |||
| objInfo, err := objectIter.MoveNext() | |||
| if err == iterator.ErrNoMoreItem { | |||
| break | |||
| } | |||
| if err != nil { | |||
| return nil, fmt.Errorf("reading object: %w", err) | |||
| } | |||
| fileHashes, uploadedNodeIDs, err := uploadECObject(ctx, objInfo, uploadNodes, ec, ecPacketSize, uploadConfig) | |||
| uploadRets = append(uploadRets, ECObjectUploadResult{ | |||
| Info: objInfo, | |||
| Error: err, | |||
| }) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("uploading object: %w", err) | |||
| } | |||
| adds = append(adds, coormq.NewAddECObjectInfo(objInfo.Path, objInfo.Size, fileHashes, uploadedNodeIDs)) | |||
| } | |||
| _, err := ctx.Coordinator().UpdateECPackage(coormq.NewUpdateECPackage(packageID, adds, nil)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("updating package: %w", err) | |||
| } | |||
| return uploadRets, nil | |||
| } | |||
| // 上传文件 | |||
| func uploadECObject(ctx TaskContext, obj *iterator.IterUploadingObject, uploadNodes []UploadNodeInfo, ec model.Ec, ecPacketSize int64, uploadConfig UploadConfig) ([]string, []int64, error) { | |||
| //生成纠删码的写入节点序列 | |||
| nodes := make([]UploadNodeInfo, ec.EcN) | |||
| numNodes := len(uploadNodes) | |||
| startWriteNodeID := rand.Intn(numNodes) | |||
| for i := 0; i < ec.EcN; i++ { | |||
| nodes[i] = uploadNodes[(startWriteNodeID+i)%numNodes] | |||
| } | |||
| hashs, err := ecWrite(obj.File, obj.Size, ec.EcK, ec.EcN, nodes, ecPacketSize, uploadConfig) | |||
| if err != nil { | |||
| return nil, nil, fmt.Errorf("EcWrite failed, err: %w", err) | |||
| } | |||
| nodeIDs := make([]int64, len(nodes)) | |||
| for i := 0; i < len(nodes); i++ { | |||
| nodeIDs[i] = nodes[i].Node.NodeID | |||
| } | |||
| return hashs, nodeIDs, nil | |||
| } | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (t *CreateECPackage) chooseUploadNode(nodes []UploadNodeInfo) UploadNodeInfo { | |||
| sameLocationNodes := lo.Filter(nodes, func(e UploadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationNodes) > 0 { | |||
| return sameLocationNodes[rand.Intn(len(sameLocationNodes))] | |||
| } | |||
| return nodes[rand.Intn(len(nodes))] | |||
| } | |||
| func ecWrite(file io.ReadCloser, fileSize int64, ecK int, ecN int, nodes []UploadNodeInfo, ecPacketSize int64, uploadConfig UploadConfig) ([]string, error) { | |||
| // TODO 需要参考RepWrite函数的代码逻辑,做好错误处理 | |||
| //获取文件大小 | |||
| var coefs = [][]int64{{1, 1, 1}, {1, 2, 3}} //2应替换为ecK,3应替换为ecN | |||
| //计算每个块的packet数 | |||
| numPacket := (fileSize + int64(ecK)*ecPacketSize - 1) / (int64(ecK) * ecPacketSize) | |||
| //fmt.Println(numPacket) | |||
| //创建channel | |||
| loadBufs := make([]chan []byte, ecN) | |||
| encodeBufs := make([]chan []byte, ecN) | |||
| for i := 0; i < ecN; i++ { | |||
| loadBufs[i] = make(chan []byte) | |||
| } | |||
| for i := 0; i < ecN; i++ { | |||
| encodeBufs[i] = make(chan []byte) | |||
| } | |||
| hashs := make([]string, ecN) | |||
| //正式开始写入 | |||
| go load(file, loadBufs[:ecN], ecK, numPacket*int64(ecK), ecPacketSize) //从本地文件系统加载数据 | |||
| go encode(loadBufs[:ecN], encodeBufs[:ecN], ecK, coefs, numPacket) | |||
| var wg sync.WaitGroup | |||
| wg.Add(ecN) | |||
| /*mutex, err := reqbuilder.NewBuilder(). | |||
| // 防止上传的副本被清除 | |||
| IPFS().CreateAnyRep(node.ID). | |||
| MutexLock(svc.distlock) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| */ | |||
| for i := 0; i < ecN; i++ { | |||
| go send(nodes[i], encodeBufs[i], numPacket, &wg, hashs, i, uploadConfig) | |||
| } | |||
| wg.Wait() | |||
| return hashs, nil | |||
| } | |||
| func load(file io.ReadCloser, loadBufs []chan []byte, ecK int, totalNumPacket int64, ecPacketSize int64) error { | |||
| for i := 0; int64(i) < totalNumPacket; i++ { | |||
| buf := make([]byte, ecPacketSize) | |||
| idx := i % ecK | |||
| _, err := file.Read(buf) | |||
| if err != nil { | |||
| return fmt.Errorf("read file falied, err:%w", err) | |||
| } | |||
| loadBufs[idx] <- buf | |||
| if idx == ecK-1 { | |||
| for j := ecK; j < len(loadBufs); j++ { | |||
| zeroPkt := make([]byte, ecPacketSize) | |||
| loadBufs[j] <- zeroPkt | |||
| } | |||
| } | |||
| if err != nil && err != io.EOF { | |||
| return fmt.Errorf("load file to buf failed, err:%w", err) | |||
| } | |||
| } | |||
| for i := 0; i < len(loadBufs); i++ { | |||
| close(loadBufs[i]) | |||
| } | |||
| file.Close() | |||
| return nil | |||
| } | |||
| func encode(inBufs []chan []byte, outBufs []chan []byte, ecK int, coefs [][]int64, numPacket int64) { | |||
| var tmpIn [][]byte | |||
| tmpIn = make([][]byte, len(outBufs)) | |||
| enc := ec.NewRsEnc(ecK, len(outBufs)) | |||
| for i := 0; int64(i) < numPacket; i++ { | |||
| for j := 0; j < len(outBufs); j++ { | |||
| tmpIn[j] = <-inBufs[j] | |||
| } | |||
| enc.Encode(tmpIn) | |||
| for j := 0; j < len(outBufs); j++ { | |||
| outBufs[j] <- tmpIn[j] | |||
| } | |||
| } | |||
| for i := 0; i < len(outBufs); i++ { | |||
| close(outBufs[i]) | |||
| } | |||
| } | |||
| func send(node UploadNodeInfo, inBuf chan []byte, numPacket int64, wg *sync.WaitGroup, hashs []string, idx int, uploadConfig UploadConfig) error { | |||
| // TODO zkx 先直接复制client\internal\task\upload_rep_objects.go中的uploadToNode和uploadToLocalIPFS来替代这部分逻辑 | |||
| // 方便之后异步化处理 | |||
| // uploadToAgent的逻辑反了,而且中间步骤失败,就必须打印日志后停止后续操作 | |||
| uploadToAgent := true | |||
| if uploadConfig.LocalIPFS != nil { //使用IPFS传输 | |||
| //创建IPFS文件 | |||
| logger.Infof("try to use local IPFS to upload block") | |||
| writer, err := uploadConfig.LocalIPFS.CreateFile() | |||
| if err != nil { | |||
| uploadToAgent = false | |||
| fmt.Errorf("create IPFS file failed, err: %w", err) | |||
| } | |||
| //逐packet写进ipfs | |||
| for i := 0; int64(i) < numPacket; i++ { | |||
| buf := <-inBuf | |||
| reader := bytes.NewReader(buf) | |||
| _, err = io.Copy(writer, reader) | |||
| if err != nil { | |||
| uploadToAgent = false | |||
| fmt.Errorf("copying block data to IPFS file failed, err: %w", err) | |||
| } | |||
| } | |||
| //finish, 获取哈希 | |||
| fileHash, err := writer.Finish() | |||
| if err != nil { | |||
| logger.Warnf("upload block to local IPFS failed, so try to upload by agent, err: %s", err.Error()) | |||
| uploadToAgent = false | |||
| fmt.Errorf("finish writing blcok to IPFS failed, err: %w", err) | |||
| } | |||
| hashs[idx] = fileHash | |||
| if err != nil { | |||
| } | |||
| nodeID := node.Node.NodeID | |||
| // 然后让最近节点pin本地上传的文件 | |||
| agentClient, err := agtmq.NewClient(nodeID, uploadConfig.MQ) | |||
| if err != nil { | |||
| uploadToAgent = false | |||
| fmt.Errorf("create agent client to %d failed, err: %w", nodeID, err) | |||
| } | |||
| defer agentClient.Close() | |||
| pinObjResp, err := agentClient.StartPinningObject(agtmq.NewStartPinningObject(fileHash)) | |||
| if err != nil { | |||
| uploadToAgent = false | |||
| fmt.Errorf("start pinning object: %w", err) | |||
| } | |||
| for { | |||
| waitResp, err := agentClient.WaitPinningObject(agtmq.NewWaitPinningObject(pinObjResp.TaskID, int64(time.Second)*5)) | |||
| if err != nil { | |||
| uploadToAgent = false | |||
| fmt.Errorf("waitting pinning object: %w", err) | |||
| } | |||
| if waitResp.IsComplete { | |||
| if waitResp.Error != "" { | |||
| uploadToAgent = false | |||
| fmt.Errorf("agent pinning object: %s", waitResp.Error) | |||
| } | |||
| break | |||
| } | |||
| } | |||
| if uploadToAgent == false { | |||
| return nil | |||
| } | |||
| } | |||
| //////////////////////////////通过Agent上传 | |||
| if uploadToAgent == true { | |||
| // 如果客户端与节点在同一个地域,则使用内网地址连接节点 | |||
| nodeIP := node.Node.ExternalIP | |||
| if node.IsSameLocation { | |||
| nodeIP = node.Node.LocalIP | |||
| logger.Infof("client and node %d are at the same location, use local ip\n", node.Node.NodeID) | |||
| } | |||
| grpcAddr := fmt.Sprintf("%s:%d", nodeIP, uploadConfig.GRPCPort) | |||
| grpcCon, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) | |||
| if err != nil { | |||
| return fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err) | |||
| } | |||
| defer grpcCon.Close() | |||
| client := agentcaller.NewFileTransportClient(grpcCon) | |||
| upload, err := mygrpc.SendFileAsStream(client) | |||
| if err != nil { | |||
| return fmt.Errorf("request to send file failed, err: %w", err) | |||
| } | |||
| // 发送文件数据 | |||
| for i := 0; int64(i) < numPacket; i++ { | |||
| buf := <-inBuf | |||
| reader := bytes.NewReader(buf) | |||
| _, err = io.Copy(upload, reader) | |||
| if err != nil { | |||
| // 发生错误则关闭连接 | |||
| upload.Abort(io.ErrClosedPipe) | |||
| return fmt.Errorf("copy block date to upload stream failed, err: %w", err) | |||
| } | |||
| } | |||
| // 发送EOF消息,并获得FileHash | |||
| fileHash, err := upload.Finish() | |||
| if err != nil { | |||
| upload.Abort(io.ErrClosedPipe) | |||
| return fmt.Errorf("send EOF failed, err: %w", err) | |||
| } | |||
| hashs[idx] = fileHash | |||
| wg.Done() | |||
| } | |||
| return nil | |||
| } | |||
| func persist(inBuf []chan []byte, numPacket int64, localFilePath string, wg *sync.WaitGroup) { | |||
| fDir, err := os.Executable() | |||
| if err != nil { | |||
| panic(err) | |||
| } | |||
| fURL := filepath.Join(filepath.Dir(fDir), "assets") | |||
| _, err = os.Stat(fURL) | |||
| if os.IsNotExist(err) { | |||
| os.MkdirAll(fURL, os.ModePerm) | |||
| } | |||
| file, err := os.Create(filepath.Join(fURL, localFilePath)) | |||
| if err != nil { | |||
| return | |||
| } | |||
| for i := 0; int64(i) < numPacket; i++ { | |||
| for j := 0; j < len(inBuf); j++ { | |||
| tmp := <-inBuf[j] | |||
| fmt.Println(tmp) | |||
| file.Write(tmp) | |||
| } | |||
| } | |||
| file.Close() | |||
| wg.Done() | |||
| } | |||
| @@ -0,0 +1,317 @@ | |||
| package cmd | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "time" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock/reqbuilder" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/utils/ipfs" | |||
| mygrpc "gitlink.org.cn/cloudream/storage-common/utils/grpc" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/iterator" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| agtmq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/agent" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| agentcaller "gitlink.org.cn/cloudream/storage-common/pkgs/proto" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/credentials/insecure" | |||
| ) | |||
| type UploadNodeInfo struct { | |||
| Node model.Node | |||
| IsSameLocation bool | |||
| } | |||
| type CreateRepPackage struct { | |||
| userID int64 | |||
| bucketID int64 | |||
| name string | |||
| objectIter iterator.UploadingObjectIterator | |||
| redundancy models.RepRedundancyInfo | |||
| uploadConfig UploadConfig | |||
| Result CreateRepPackageResult | |||
| } | |||
| type UploadConfig struct { | |||
| LocalIPFS *ipfs.IPFS | |||
| LocalNodeID *int64 | |||
| ExternalIP string | |||
| GRPCPort int | |||
| MQ *mymq.Config | |||
| } | |||
| type CreateRepPackageResult struct { | |||
| PackageID int64 | |||
| ObjectResults []RepObjectUploadResult | |||
| } | |||
| type RepObjectUploadResult struct { | |||
| Info *iterator.IterUploadingObject | |||
| Error error | |||
| FileHash string | |||
| ObjectID int64 | |||
| } | |||
| func NewCreateRepPackage(userID int64, bucketID int64, name string, objIter iterator.UploadingObjectIterator, redundancy models.RepRedundancyInfo, uploadConfig UploadConfig) *CreateRepPackage { | |||
| return &CreateRepPackage{ | |||
| userID: userID, | |||
| bucketID: bucketID, | |||
| name: name, | |||
| objectIter: objIter, | |||
| redundancy: redundancy, | |||
| uploadConfig: uploadConfig, | |||
| } | |||
| } | |||
| func (t *CreateRepPackage) Execute(ctx TaskContext, complete CompleteFn) { | |||
| err := t.do(ctx) | |||
| t.objectIter.Close() | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| func (t *CreateRepPackage) do(ctx TaskContext) error { | |||
| /* | |||
| // TODO2 | |||
| reqBlder := reqbuilder.NewBuilder() | |||
| for _, uploadObject := range t.Objects { | |||
| reqBlder.Metadata(). | |||
| // 用于防止创建了多个同名对象 | |||
| Object().CreateOne(t.bucketID, uploadObject.ObjectName) | |||
| } | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if t.uploadConfig.LocalNodeID != nil { | |||
| reqBlder.IPFS().CreateAnyRep(*t.uploadConfig.LocalNodeID) | |||
| } | |||
| mutex, err := reqBlder. | |||
| Metadata(). | |||
| // 用于判断用户是否有桶的权限 | |||
| UserBucket().ReadOne(t.userID, t.bucketID). | |||
| // 用于查询可用的上传节点 | |||
| Node().ReadAny(). | |||
| // 用于设置Rep配置 | |||
| ObjectRep().CreateAny(). | |||
| // 用于创建Cache记录 | |||
| Cache().CreateAny(). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| */ | |||
| createPkgResp, err := ctx.Coordinator().CreatePackage(coormq.NewCreatePackage(t.userID, t.bucketID, t.name, | |||
| models.NewTypedRedundancyInfo(models.RedundancyRep, t.redundancy))) | |||
| if err != nil { | |||
| return fmt.Errorf("creating package: %w", err) | |||
| } | |||
| getUserNodesResp, err := ctx.Coordinator().GetUserNodes(coormq.NewGetUserNodes(t.userID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := ctx.Coordinator().FindClientLocation(coormq.NewFindClientLocation(t.uploadConfig.ExternalIP)) | |||
| if err != nil { | |||
| return fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| return UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| } | |||
| }) | |||
| uploadNode := t.chooseUploadNode(nodeInfos) | |||
| // 防止上传的副本被清除 | |||
| mutex2, err := reqbuilder.NewBuilder(). | |||
| IPFS().CreateAnyRep(uploadNode.Node.NodeID). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex2.Unlock() | |||
| rets, err := uploadAndUpdateRepPackage(ctx, createPkgResp.PackageID, t.objectIter, uploadNode, t.uploadConfig) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| t.Result.PackageID = createPkgResp.PackageID | |||
| t.Result.ObjectResults = rets | |||
| return nil | |||
| } | |||
| func uploadAndUpdateRepPackage(ctx TaskContext, packageID int64, objectIter iterator.UploadingObjectIterator, uploadNode UploadNodeInfo, uploadConfig UploadConfig) ([]RepObjectUploadResult, error) { | |||
| var uploadRets []RepObjectUploadResult | |||
| //上传文件夹 | |||
| var adds []coormq.AddRepObjectInfo | |||
| for { | |||
| objInfo, err := objectIter.MoveNext() | |||
| if err == iterator.ErrNoMoreItem { | |||
| break | |||
| } | |||
| if err != nil { | |||
| return nil, fmt.Errorf("reading object: %w", err) | |||
| } | |||
| fileHash, uploadedNodeIDs, err := uploadObject(ctx, objInfo, uploadNode, uploadConfig) | |||
| uploadRets = append(uploadRets, RepObjectUploadResult{ | |||
| Info: objInfo, | |||
| Error: err, | |||
| FileHash: fileHash, | |||
| }) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("uploading object: %w", err) | |||
| } | |||
| adds = append(adds, coormq.NewAddRepObjectInfo(objInfo.Path, objInfo.Size, fileHash, uploadedNodeIDs)) | |||
| } | |||
| _, err := ctx.Coordinator().UpdateRepPackage(coormq.NewUpdateRepPackage(packageID, adds, nil)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("updating package: %w", err) | |||
| } | |||
| return uploadRets, nil | |||
| } | |||
| // 上传文件 | |||
| func uploadObject(ctx TaskContext, obj *iterator.IterUploadingObject, uploadNode UploadNodeInfo, uploadConfig UploadConfig) (string, []int64, error) { | |||
| // 本地有IPFS,则直接从本地IPFS上传 | |||
| if uploadConfig.LocalIPFS != nil { | |||
| logger.Infof("try to use local IPFS to upload file") | |||
| // 只有本地IPFS不是存储系统中的一个节点,才需要Pin文件 | |||
| fileHash, err := uploadToLocalIPFS(uploadConfig.LocalIPFS, obj.File, uploadNode.Node.NodeID, uploadConfig.LocalNodeID == nil, uploadConfig) | |||
| if err == nil { | |||
| return fileHash, []int64{*uploadConfig.LocalNodeID}, nil | |||
| } else { | |||
| logger.Warnf("upload to local IPFS failed, so try to upload to node %d, err: %s", uploadNode.Node.NodeID, err.Error()) | |||
| } | |||
| } | |||
| // 否则发送到agent上传 | |||
| // 如果客户端与节点在同一个地域,则使用内网地址连接节点 | |||
| nodeIP := uploadNode.Node.ExternalIP | |||
| if uploadNode.IsSameLocation { | |||
| nodeIP = uploadNode.Node.LocalIP | |||
| logger.Infof("client and node %d are at the same location, use local ip\n", uploadNode.Node.NodeID) | |||
| } | |||
| fileHash, err := uploadToNode(obj.File, nodeIP, uploadConfig) | |||
| if err != nil { | |||
| return "", nil, fmt.Errorf("upload to node %s failed, err: %w", nodeIP, err) | |||
| } | |||
| return fileHash, []int64{uploadNode.Node.NodeID}, nil | |||
| } | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (t *CreateRepPackage) chooseUploadNode(nodes []UploadNodeInfo) UploadNodeInfo { | |||
| sameLocationNodes := lo.Filter(nodes, func(e UploadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationNodes) > 0 { | |||
| return sameLocationNodes[rand.Intn(len(sameLocationNodes))] | |||
| } | |||
| return nodes[rand.Intn(len(nodes))] | |||
| } | |||
| func uploadToNode(file io.ReadCloser, nodeIP string, uploadConfig UploadConfig) (string, error) { | |||
| // 建立grpc连接,发送请求 | |||
| grpcAddr := fmt.Sprintf("%s:%d", nodeIP, uploadConfig.GRPCPort) | |||
| grpcCon, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) | |||
| if err != nil { | |||
| return "", fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err) | |||
| } | |||
| defer grpcCon.Close() | |||
| client := agentcaller.NewFileTransportClient(grpcCon) | |||
| upload, err := mygrpc.SendFileAsStream(client) | |||
| if err != nil { | |||
| return "", fmt.Errorf("request to send file failed, err: %w", err) | |||
| } | |||
| // 发送文件数据 | |||
| _, err = io.Copy(upload, file) | |||
| if err != nil { | |||
| // 发生错误则关闭连接 | |||
| upload.Abort(io.ErrClosedPipe) | |||
| return "", fmt.Errorf("copy file date to upload stream failed, err: %w", err) | |||
| } | |||
| // 发送EOF消息,并获得FileHash | |||
| fileHash, err := upload.Finish() | |||
| if err != nil { | |||
| upload.Abort(io.ErrClosedPipe) | |||
| return "", fmt.Errorf("send EOF failed, err: %w", err) | |||
| } | |||
| return fileHash, nil | |||
| } | |||
| func uploadToLocalIPFS(ipfs *ipfs.IPFS, file io.ReadCloser, nodeID int64, shouldPin bool, uploadConfig UploadConfig) (string, error) { | |||
| // 从本地IPFS上传文件 | |||
| writer, err := ipfs.CreateFile() | |||
| if err != nil { | |||
| return "", fmt.Errorf("create IPFS file failed, err: %w", err) | |||
| } | |||
| _, err = io.Copy(writer, file) | |||
| if err != nil { | |||
| return "", fmt.Errorf("copy file data to IPFS failed, err: %w", err) | |||
| } | |||
| fileHash, err := writer.Finish() | |||
| if err != nil { | |||
| return "", fmt.Errorf("finish writing IPFS failed, err: %w", err) | |||
| } | |||
| if !shouldPin { | |||
| return fileHash, nil | |||
| } | |||
| // 然后让最近节点pin本地上传的文件 | |||
| agentClient, err := agtmq.NewClient(nodeID, uploadConfig.MQ) | |||
| if err != nil { | |||
| return "", fmt.Errorf("create agent client to %d failed, err: %w", nodeID, err) | |||
| } | |||
| defer agentClient.Close() | |||
| pinObjResp, err := agentClient.StartPinningObject(agtmq.NewStartPinningObject(fileHash)) | |||
| if err != nil { | |||
| return "", fmt.Errorf("start pinning object: %w", err) | |||
| } | |||
| for { | |||
| waitResp, err := agentClient.WaitPinningObject(agtmq.NewWaitPinningObject(pinObjResp.TaskID, int64(time.Second)*5)) | |||
| if err != nil { | |||
| return "", fmt.Errorf("waitting pinning object: %w", err) | |||
| } | |||
| if waitResp.IsComplete { | |||
| if waitResp.Error != "" { | |||
| return "", fmt.Errorf("agent pinning object: %s", waitResp.Error) | |||
| } | |||
| break | |||
| } | |||
| } | |||
| return fileHash, nil | |||
| } | |||
| @@ -0,0 +1,135 @@ | |||
| package cmd | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "os" | |||
| "path/filepath" | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/iterator" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| ) | |||
| type DownloadPackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| outputPath string | |||
| } | |||
| func NewDownloadPackage(userID int64, packageID int64, outputPath string) *DownloadPackage { | |||
| return &DownloadPackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| outputPath: outputPath, | |||
| } | |||
| } | |||
| func (t *DownloadPackage) Execute(ctx TaskContext, complete CompleteFn) { | |||
| err := t.do(ctx) | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| func (t *DownloadPackage) do(ctx TaskContext) error { | |||
| getPkgResp, err := ctx.Coordinator().GetPackage(coormq.NewGetPackage(t.userID, t.packageID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting package: %w", err) | |||
| } | |||
| var objIter iterator.DownloadingObjectIterator | |||
| if getPkgResp.Redundancy.Type == models.RedundancyRep { | |||
| objIter, err = t.downloadRep(ctx) | |||
| } else { | |||
| objIter, err = t.downloadEC(ctx, getPkgResp.Package) | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer objIter.Close() | |||
| return t.writeObject(objIter) | |||
| } | |||
| func (t *DownloadPackage) downloadRep(ctx TaskContext) (iterator.DownloadingObjectIterator, error) { | |||
| getObjsResp, err := ctx.Coordinator().GetPackageObjects(coormq.NewGetPackageObjects(t.userID, t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package objects: %w", err) | |||
| } | |||
| getObjRepDataResp, err := ctx.Coordinator().GetPackageObjectRepData(coormq.NewGetPackageObjectRepData(t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package object rep data: %w", err) | |||
| } | |||
| iter := iterator.NewRepObjectIterator(getObjsResp.Objects, getObjRepDataResp.Data, ctx.Coordinator(), svc.distlock, myos.DownloadConfig{ | |||
| LocalIPFS: svc.ipfs, | |||
| ExternalIP: config.Cfg().ExternalIP, | |||
| GRPCPort: config.Cfg().GRPCPort, | |||
| MQ: &config.Cfg().RabbitMQ, | |||
| }) | |||
| return iter, nil | |||
| } | |||
| func (t *DownloadPackage) downloadEC(ctx TaskContext, pkg model.Package) (iterator.DownloadingObjectIterator, error) { | |||
| getObjsResp, err := ctx.Coordinator().GetPackageObjects(coormq.NewGetPackageObjects(t.userID, t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package objects: %w", err) | |||
| } | |||
| getObjECDataResp, err := ctx.Coordinator().GetPackageObjectECData(coormq.NewGetPackageObjectECData(t.packageID)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting package object ec data: %w", err) | |||
| } | |||
| var ecRed models.ECRedundancyInfo | |||
| if err := serder.AnyToAny(pkg.Redundancy.Info, &ecRed); err != nil { | |||
| return nil, fmt.Errorf("get ec redundancy info: %w", err) | |||
| } | |||
| getECResp, err := ctx.Coordinator().GetECConfig(coormq.NewGetECConfig(ecRed.ECName)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting ec: %w", err) | |||
| } | |||
| iter := iterator.NewECObjectIterator(getObjsResp.Objects, getObjECDataResp.Data, ctx.Coordinator(), svc.distlock, getECResp.Config, config.Cfg().ECPacketSize, myos.DownloadConfig{ | |||
| LocalIPFS: svc.ipfs, | |||
| ExternalIP: config.Cfg().ExternalIP, | |||
| GRPCPort: config.Cfg().GRPCPort, | |||
| MQ: &config.Cfg().RabbitMQ, | |||
| }) | |||
| return iter, nil | |||
| } | |||
| func (t *DownloadPackage) writeObject(objIter iterator.DownloadingObjectIterator) error { | |||
| for { | |||
| objInfo, err := objIter.MoveNext() | |||
| if err == iterator.ErrNoMoreItem { | |||
| break | |||
| } | |||
| if err != nil { | |||
| return err | |||
| } | |||
| defer objInfo.File.Close() | |||
| outputFile, err := os.Create(filepath.Join(t.outputPath, objInfo.Object.Path)) | |||
| if err != nil { | |||
| return fmt.Errorf("creating object file: %w", err) | |||
| } | |||
| defer outputFile.Close() | |||
| _, err = io.Copy(outputFile, objInfo.File) | |||
| if err != nil { | |||
| return fmt.Errorf("copy object data to local file failed, err: %w", err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,148 @@ | |||
| package cmd | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| mysort "gitlink.org.cn/cloudream/common/utils/sort" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/iterator" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| ) | |||
| type UpdateECPackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| objectIter iterator.UploadingObjectIterator | |||
| ecPacketSize int64 | |||
| uploadConfig UploadConfig | |||
| Result UpdateECPackageResult | |||
| } | |||
| type UpdateECPackageResult struct { | |||
| ObjectResults []ECObjectUploadResult | |||
| } | |||
| func NewUpdateECPackage(userID int64, packageID int64, objIter iterator.UploadingObjectIterator, ecPacketSize int64, uploadConfig UploadConfig) *UpdateECPackage { | |||
| return &UpdateECPackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| objectIter: objIter, | |||
| ecPacketSize: ecPacketSize, | |||
| uploadConfig: uploadConfig, | |||
| } | |||
| } | |||
| func (t *UpdateECPackage) Execute(ctx TaskContext, complete CompleteFn) { | |||
| err := t.do(ctx) | |||
| t.objectIter.Close() | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| func (t *UpdateECPackage) do(ctx TaskContext) error { | |||
| /* | |||
| TODO2 | |||
| reqBlder := reqbuilder.NewBuilder() | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if t.uploadConfig.LocalNodeID != nil { | |||
| reqBlder.IPFS().CreateAnyRep(*t.uploadConfig.LocalNodeID) | |||
| } | |||
| // TODO2 | |||
| mutex, err := reqBlder. | |||
| Metadata(). | |||
| // 用于判断用户是否有对象的权限 | |||
| UserBucket().ReadAny(). | |||
| // 用于读取、修改对象信息 | |||
| Object().WriteOne(t.objectID). | |||
| // 用于更新Rep配置 | |||
| ObjectRep().WriteOne(t.objectID). | |||
| // 用于查询可用的上传节点 | |||
| Node().ReadAny(). | |||
| // 用于创建Cache记录 | |||
| Cache().CreateAny(). | |||
| // 用于修改Move此Object的记录的状态 | |||
| StorageObject().WriteAny(). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| */ | |||
| getPkgResp, err := ctx.Coordinator().GetPackage(coormq.NewGetPackage(t.userID, t.packageID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting package: %w", err) | |||
| } | |||
| getUserNodesResp, err := ctx.Coordinator().GetUserNodes(coormq.NewGetUserNodes(t.userID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := ctx.Coordinator().FindClientLocation(coormq.NewFindClientLocation(t.uploadConfig.ExternalIP)) | |||
| if err != nil { | |||
| return fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UploadNodeInfo { | |||
| return UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| } | |||
| }) | |||
| var ecRed models.ECRedundancyInfo | |||
| if err := serder.AnyToAny(getPkgResp.Package.Redundancy.Info, &ecRed); err != nil { | |||
| return fmt.Errorf("get ec redundancy info: %w", err) | |||
| } | |||
| getECResp, err := ctx.Coordinator().GetECConfig(coormq.NewGetECConfig(ecRed.ECName)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting ec: %w", err) | |||
| } | |||
| /* | |||
| TODO2 | |||
| // 防止上传的副本被清除 | |||
| mutex2, err := reqbuilder.NewBuilder(). | |||
| IPFS().CreateAnyRep(uploadNode.Node.NodeID). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex2.Unlock() | |||
| */ | |||
| rets, err := uploadAndUpdateECPackage(ctx, t.packageID, t.objectIter, nodeInfos, getECResp.Config, t.ecPacketSize, t.uploadConfig) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| t.Result.ObjectResults = rets | |||
| return nil | |||
| } | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (t *UpdateECPackage) chooseUploadNode(nodes []UpdateNodeInfo) UpdateNodeInfo { | |||
| mysort.Sort(nodes, func(left, right UpdateNodeInfo) int { | |||
| v := -mysort.CmpBool(left.HasOldObject, right.HasOldObject) | |||
| if v != 0 { | |||
| return v | |||
| } | |||
| return -mysort.CmpBool(left.IsSameLocation, right.IsSameLocation) | |||
| }) | |||
| return nodes[0] | |||
| } | |||
| @@ -0,0 +1,140 @@ | |||
| package cmd | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock/reqbuilder" | |||
| mysort "gitlink.org.cn/cloudream/common/utils/sort" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/iterator" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| ) | |||
| type UpdateRepPackage struct { | |||
| userID int64 | |||
| packageID int64 | |||
| objectIter iterator.UploadingObjectIterator | |||
| uploadConfig UploadConfig | |||
| Result UpdateRepPackageResult | |||
| } | |||
| type UpdateNodeInfo struct { | |||
| UploadNodeInfo | |||
| HasOldObject bool | |||
| } | |||
| type UpdateRepPackageResult struct { | |||
| ObjectResults []RepObjectUploadResult | |||
| } | |||
| func NewUpdateRepPackage(userID int64, packageID int64, objectIter iterator.UploadingObjectIterator, uploadConfig UploadConfig) *UpdateRepPackage { | |||
| return &UpdateRepPackage{ | |||
| userID: userID, | |||
| packageID: packageID, | |||
| objectIter: objectIter, | |||
| uploadConfig: uploadConfig, | |||
| } | |||
| } | |||
| func (t *UpdateRepPackage) Execute(ctx TaskContext, complete CompleteFn) { | |||
| err := t.do(ctx) | |||
| t.objectIter.Close() | |||
| complete(err, CompleteOption{ | |||
| RemovingDelay: time.Minute, | |||
| }) | |||
| } | |||
| func (t *UpdateRepPackage) do(ctx TaskContext) error { | |||
| /* | |||
| TODO2 | |||
| reqBlder := reqbuilder.NewBuilder() | |||
| // 如果本地的IPFS也是存储系统的一个节点,那么从本地上传时,需要加锁 | |||
| if t.uploadConfig.LocalNodeID != nil { | |||
| reqBlder.IPFS().CreateAnyRep(*t.uploadConfig.LocalNodeID) | |||
| } | |||
| // TODO2 | |||
| mutex, err := reqBlder. | |||
| Metadata(). | |||
| // 用于判断用户是否有对象的权限 | |||
| UserBucket().ReadAny(). | |||
| // 用于读取、修改对象信息 | |||
| Object().WriteOne(t.objectID). | |||
| // 用于更新Rep配置 | |||
| ObjectRep().WriteOne(t.objectID). | |||
| // 用于查询可用的上传节点 | |||
| Node().ReadAny(). | |||
| // 用于创建Cache记录 | |||
| Cache().CreateAny(). | |||
| // 用于修改Move此Object的记录的状态 | |||
| StorageObject().WriteAny(). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| */ | |||
| getUserNodesResp, err := ctx.Coordinator().GetUserNodes(coormq.NewGetUserNodes(t.userID)) | |||
| if err != nil { | |||
| return fmt.Errorf("getting user nodes: %w", err) | |||
| } | |||
| findCliLocResp, err := ctx.Coordinator().FindClientLocation(coormq.NewFindClientLocation(t.uploadConfig.ExternalIP)) | |||
| if err != nil { | |||
| return fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| nodeInfos := lo.Map(getUserNodesResp.Nodes, func(node model.Node, index int) UpdateNodeInfo { | |||
| return UpdateNodeInfo{ | |||
| UploadNodeInfo: UploadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == findCliLocResp.Location.LocationID, | |||
| }, | |||
| } | |||
| }) | |||
| // 上传文件的方式优先级: | |||
| // 1. 本地IPFS | |||
| // 2. 包含了旧文件,且与客户端在同地域的节点 | |||
| // 3. 不在同地域,但包含了旧文件的节点 | |||
| // 4. 同地域节点 | |||
| // TODO 需要考虑在多文件的情况下的规则 | |||
| uploadNode := t.chooseUploadNode(nodeInfos) | |||
| // 防止上传的副本被清除 | |||
| mutex2, err := reqbuilder.NewBuilder(). | |||
| IPFS().CreateAnyRep(uploadNode.Node.NodeID). | |||
| MutexLock(ctx.DistLock()) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex2.Unlock() | |||
| rets, err := uploadAndUpdateRepPackage(ctx, t.packageID, t.objectIter, uploadNode.UploadNodeInfo, t.uploadConfig) | |||
| if err != nil { | |||
| return err | |||
| } | |||
| t.Result.ObjectResults = rets | |||
| return nil | |||
| } | |||
| // chooseUploadNode 选择一个上传文件的节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (t *UpdateRepPackage) chooseUploadNode(nodes []UpdateNodeInfo) UpdateNodeInfo { | |||
| mysort.Sort(nodes, func(left, right UpdateNodeInfo) int { | |||
| v := -mysort.CmpBool(left.HasOldObject, right.HasOldObject) | |||
| if v != 0 { | |||
| return v | |||
| } | |||
| return -mysort.CmpBool(left.IsSameLocation, right.IsSameLocation) | |||
| }) | |||
| return nodes[0] | |||
| } | |||
| @@ -102,18 +102,18 @@ func (db *BucketDB) Delete(ctx SQLContext, bucketID int64) error { | |||
| return fmt.Errorf("delete bucket failed, err: %w", err) | |||
| } | |||
| // 删除Bucket内的Object | |||
| // 删除Bucket内的Package | |||
| var objIDs []int64 | |||
| err = sqlx.Select(ctx, &objIDs, "select ObjectID from Object where BucketID = ?", bucketID) | |||
| err = sqlx.Select(ctx, &objIDs, "select PackageID from Package where BucketID = ?", bucketID) | |||
| if err != nil { | |||
| return fmt.Errorf("query object failed, err: %w", err) | |||
| return fmt.Errorf("query package failed, err: %w", err) | |||
| } | |||
| for _, objID := range objIDs { | |||
| // TODO 不一定所有的错误都要中断后续过程 | |||
| err = db.Object().SoftDelete(ctx, objID) | |||
| err = db.Package().SoftDelete(ctx, objID) | |||
| if err != nil { | |||
| return fmt.Errorf("set object seleted failed, err: %w", err) | |||
| return fmt.Errorf("set package seleted failed, err: %w", err) | |||
| } | |||
| } | |||
| return nil | |||
| @@ -3,15 +3,11 @@ package db | |||
| import ( | |||
| "context" | |||
| "database/sql" | |||
| "errors" | |||
| "fmt" | |||
| "time" | |||
| _ "github.com/go-sql-driver/mysql" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage-common/consts" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/config" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type DB struct { | |||
| @@ -62,177 +58,3 @@ func (db *DB) DoTx(isolation sql.IsolationLevel, fn func(tx *sqlx.Tx) error) err | |||
| func (db *DB) SQLCtx() SQLContext { | |||
| return db.d | |||
| } | |||
| // 纠删码对象表插入 | |||
| // TODO 需要使用事务保证查询之后插入的正确性 | |||
| func (db *DB) InsertECObject(objectName string, bucketID int, fileSize int64, ecName string) (int64, error) { | |||
| // TODO 参考CreateRepObject重写 | |||
| // 根据objectname和bucketid查询,若不存在则插入,若存在则不操作 | |||
| //查询 | |||
| /*type Object struct { | |||
| ObjectID int64 `db:"ObjectID"` | |||
| Name string `db:"Name"` | |||
| BucketID int `db:"BucketID"` | |||
| } | |||
| var x Object | |||
| err := db.d.Get(&x, "select ObjectID, Name, BucketID from Object where Name=? AND BucketID=?", objectName, bucketID) | |||
| //不存在才插入 | |||
| if errors.Is(err, sql.ErrNoRows) { | |||
| sql := "insert into Object(Name, BucketID, FileSize, Redundancy, NumRep, EcName) values(?,?,?,?,?,?)" | |||
| r, err := db.d.Exec(sql, objectName, bucketID, fileSize, false, "-1", ecName) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| id, err := r.LastInsertId() | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| // TODO 需要考虑失败后的处理 | |||
| return id, nil | |||
| } else if err == nil { | |||
| return x.ObjectID, nil | |||
| } | |||
| return 0, err*/ | |||
| panic("not implement yet") | |||
| } | |||
| // QueryObjectBlock 查询对象编码块表 | |||
| func (db *DB) QueryObjectBlock(objectID int64) ([]model.ObjectBlock, error) { | |||
| var x []model.ObjectBlock | |||
| sql := "select * from ObjectBlock where ObjectID=?" | |||
| err := db.d.Select(&x, sql, objectID) | |||
| return x, err | |||
| } | |||
| // 对象编码块表Echash插入 | |||
| func (db *DB) InsertECHash(objectID int64, hashes []string) { | |||
| for i := 0; i < len(hashes); i++ { | |||
| sql := "update ObjectBlock set BlockHash =? where ObjectID = ? AND InnerID = ?" | |||
| // TODO 需要处理错误 | |||
| db.d.Exec(sql, hashes[i], objectID, i) | |||
| } | |||
| } | |||
| // 对象编码块表插入 | |||
| func (db *DB) InsertEcObjectBlock(objectID int64, innerID int) error { | |||
| // 根据objectID查询,若不存在则插入,若存在则不操作 | |||
| _, err := db.d.Exec( | |||
| "insert into ObjectBlock(ObjectID, InnerID) select ?, ? where not exists (select ObjectID from ObjectBlock where ObjectID=? AND InnerID=?)", | |||
| objectID, | |||
| innerID, | |||
| objectID, | |||
| innerID, | |||
| ) | |||
| return err | |||
| } | |||
| // BatchInsertOrUpdateCache 批量更新缓存表 | |||
| func (db *DB) BatchInsertOrUpdateCache(blockHashes []string, nodeID int64) error { | |||
| //jh:将hashs中的hash,IP插入缓存表中,TempOrPin字段为true,Time为插入时的时间戳 | |||
| //-如果要插入的hash、IP在表中已存在且所对应的TempOrPin字段为false,则不做任何操作 | |||
| //-如果要插入的hash、IP在表中已存在且所对应的TempOrPin字段为true,则更新Time | |||
| tx, err := db.d.BeginTxx(context.Background(), &sql.TxOptions{ | |||
| Isolation: sql.LevelSerializable, | |||
| }) | |||
| if err != nil { | |||
| return fmt.Errorf("start transaction failed, err: %w", err) | |||
| } | |||
| for _, blockHash := range blockHashes { | |||
| //根据hash和nodeip查询缓存表里是否存在此条记录 | |||
| var cache model.Cache | |||
| err := tx.Get( | |||
| &cache, | |||
| "select NodeID, TempOrPin, Cachetime from Cache where FileHash=? AND NodeID=?", | |||
| blockHash, | |||
| nodeID, | |||
| ) | |||
| // 不存在记录则创建新记录 | |||
| if errors.Is(err, sql.ErrNoRows) { | |||
| _, err := tx.Exec("insert into Cache values(?,?,?,?)", blockHash, nodeID, true, time.Now()) | |||
| if err != nil { | |||
| tx.Rollback() | |||
| return fmt.Errorf("insert cache failed, err: %w", err) | |||
| } | |||
| } else if err == nil && cache.State == consts.CacheStateTemp { | |||
| //若在表中已存在且所对应的TempOrPin字段为true,则更新Time | |||
| _, err := tx.Exec( | |||
| "update Cache set Cachetime=? where FileHash=? AND NodeID=?", | |||
| time.Now(), | |||
| blockHash, | |||
| nodeID, | |||
| ) | |||
| if err != nil { | |||
| tx.Rollback() | |||
| return fmt.Errorf("update cache failed, err: %w", err) | |||
| } | |||
| } | |||
| } | |||
| err = tx.Commit() | |||
| if err != nil { | |||
| tx.Rollback() | |||
| return fmt.Errorf("commit transaction failed, err: %w", err) | |||
| } | |||
| return nil | |||
| } | |||
| // 查询节点延迟表 | |||
| func (db *DB) QueryNodeDelay(inNodeIP string, outNodeIP string) (int, error) { | |||
| //节点延迟结构体 | |||
| var x struct { | |||
| DelayInMs int `db:"DelayInMs"` | |||
| } | |||
| sql := "select DelayInMs from NodeDelay where InNodeIP=? AND OutNodeIP=?" | |||
| err := db.d.Get(&x, sql, inNodeIP, outNodeIP) | |||
| return x.DelayInMs, err | |||
| } | |||
| // 节点延迟表插入 | |||
| // TODO 需要使用事务确保插入的记录完整 | |||
| func (db *DB) InsertNodeDelay(srcNodeID int64, dstNodeIDs []int64, delay []int) { | |||
| insSql := "insert into NodeDelay values(?,?,?)" | |||
| updateSql := "UPDATE NodeDelay SET DelayInMs=? WHERE SourceNodeID=? AND DestinationNodeID=?" | |||
| for i := 0; i < len(dstNodeIDs); i++ { | |||
| _, err := db.d.Exec(insSql, srcNodeID, dstNodeIDs[i], delay[i]) | |||
| if err != nil { | |||
| // TODO 处理错误 | |||
| db.d.Exec(updateSql, delay[i], srcNodeID, dstNodeIDs[i]) | |||
| } | |||
| } | |||
| } | |||
| // 节点表插入 | |||
| // TODO 需要使用事务保证查询之后插入的正确性 | |||
| func (db *DB) InsertNode(nodeip string, nodelocation string, ipfsstatus string, localdirstatus string) error { | |||
| // 根据NodeIP查询,若不存在则插入,若存在则更新 | |||
| //查询 | |||
| type Node struct { | |||
| NodeIP string `db:"NodeIP"` | |||
| } | |||
| var x Node | |||
| err := db.d.Get(&x, "select NodeIP from Node where NodeIP=?", nodeip) | |||
| //local和ipfs同时可达才可达 | |||
| // TODO 将status字段改成字符串(枚举值) | |||
| NodeStatus := ipfsstatus == consts.IPFSStateOK && localdirstatus == consts.StorageDirectoryStateOK | |||
| //不存在才插入 | |||
| if errors.Is(err, sql.ErrNoRows) { | |||
| sql := "insert into Node values(?,?,?)" | |||
| _, err := db.d.Exec(sql, nodeip, nodelocation, NodeStatus) | |||
| return err | |||
| } | |||
| //存在则更新 | |||
| sql := "update Node set NodeStatus=? where NodeIP=?" | |||
| _, err = db.d.Exec(sql, NodeStatus, nodeip) | |||
| return err | |||
| } | |||
| @@ -0,0 +1,32 @@ | |||
| package db | |||
| import ( | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type LocationDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) Location() *LocationDB { | |||
| return &LocationDB{DB: db} | |||
| } | |||
| func (*LocationDB) GetByID(ctx SQLContext, id int64) (model.Location, error) { | |||
| var ret model.Location | |||
| err := sqlx.Get(ctx, &ret, "select * from Location where LocationID = ?", id) | |||
| return ret, err | |||
| } | |||
| func (db *LocationDB) FindLocationByExternalIP(ctx SQLContext, ip string) (model.Location, error) { | |||
| var locID int64 | |||
| err := sqlx.Get(ctx, &locID, "select LocationID from Node where ExternalIP = ?", ip) | |||
| if err != nil { | |||
| return model.Location{}, fmt.Errorf("find node by external ip: %w", err) | |||
| } | |||
| return db.GetByID(ctx, locID) | |||
| } | |||
| @@ -1,6 +1,10 @@ | |||
| package model | |||
| import "time" | |||
| import ( | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| ) | |||
| type Node struct { | |||
| NodeID int64 `db:"NodeID" json:"nodeID"` | |||
| @@ -52,27 +56,30 @@ type Bucket struct { | |||
| CreatorID int64 `db:"CreatorID" json:"creatorID"` | |||
| } | |||
| type Package struct { | |||
| PackageID int64 `db:"PackageID" json:"packageID"` | |||
| Name string `db:"Name" json:"name"` | |||
| BucketID int64 `db:"BucketID" json:"bucketID"` | |||
| State string `db:"State" json:"state"` | |||
| Redundancy models.TypedRedundancyInfo `db:"Redundancy" json:"redundancy"` | |||
| } | |||
| type Object struct { | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| Name string `db:"Name" json:"name"` | |||
| BucketID int64 `db:"BucketID" json:"bucketID"` | |||
| State string `db:"State" json:"state"` | |||
| FileSize int64 `db:"FileSize" json:"fileSize,string"` | |||
| Redundancy string `db:"Redundancy" json:"redundancy"` | |||
| DirName string `db:"DirName" json:"dirName"` | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| PackageID int64 `db:"PackageID" json:"packageID"` | |||
| Path string `db:"Path" json:"path"` | |||
| Size int64 `db:"Size" json:"size,string"` | |||
| } | |||
| type ObjectRep struct { | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| RepCount int `db:"RepCount" json:"repCount"` | |||
| FileHash string `db:"FileHash" json:"fileHash"` | |||
| } | |||
| type ObjectBlock struct { | |||
| BlockID int64 `db:"BlockID" json:"blockID"` | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| InnerID int `db:"InnerID" json:"innerID"` | |||
| BlockHash string `db:"BlockHash" json:"blockHash"` | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| Index int `db:"Index" json:"index"` | |||
| FileHash string `db:"FileHash" json:"fileHash"` | |||
| } | |||
| type Cache struct { | |||
| @@ -83,8 +90,8 @@ type Cache struct { | |||
| Priority int `db:"Priority" json:"priority"` | |||
| } | |||
| type StorageObject struct { | |||
| ObjectID int64 `db:"ObjectID" json:"objectID"` | |||
| type StoragePackage struct { | |||
| PackageID int64 `db:"PackageID" json:"packageID"` | |||
| StorageID int64 `db:"StorageID" json:"storageID"` | |||
| UserID int64 `db:"UserID" json:"userID"` | |||
| State string `db:"State" json:"state"` | |||
| @@ -27,13 +27,6 @@ func (db *NodeDB) GetAllNodes(ctx SQLContext) ([]model.Node, error) { | |||
| return ret, err | |||
| } | |||
| // GetByExternalIP 根据外网IP查找节点 | |||
| func (db *NodeDB) GetByExternalIP(ctx SQLContext, exterIP string) (model.Node, error) { | |||
| var ret model.Node | |||
| err := sqlx.Get(ctx, &ret, "select * from Node where ExternalIP = ?", exterIP) | |||
| return ret, err | |||
| } | |||
| // GetUserNodes 根据用户id查询可用node | |||
| func (db *NodeDB) GetUserNodes(ctx SQLContext, userID int64) ([]model.Node, error) { | |||
| var nodes []model.Node | |||
| @@ -1,15 +1,12 @@ | |||
| package db | |||
| import ( | |||
| "database/sql" | |||
| "errors" | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/storage-common/consts" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| ) | |||
| type ObjectDB struct { | |||
| @@ -26,158 +23,53 @@ func (db *ObjectDB) GetByID(ctx SQLContext, objectID int64) (model.Object, error | |||
| return ret, err | |||
| } | |||
| func (db *ObjectDB) GetByName(ctx SQLContext, bucketID int64, name string) (model.Object, error) { | |||
| var ret model.Object | |||
| err := sqlx.Get(ctx, &ret, "select * from Object where BucketID = ? and Name = ?", bucketID, name) | |||
| return ret, err | |||
| } | |||
| func (db *ObjectDB) GetBucketObjects(ctx SQLContext, userID int64, bucketID int64) ([]model.Object, error) { | |||
| var ret []model.Object | |||
| err := sqlx.Select(ctx, &ret, "select Object.* from UserBucket, Object where UserID = ? and UserBucket.BucketID = ? and UserBucket.BucketID = Object.BucketID", userID, bucketID) | |||
| return ret, err | |||
| } | |||
| func (db *ObjectDB) GetByDirName(ctx SQLContext, dirName string) ([]model.Object, error) { | |||
| var ret []model.Object | |||
| err := sqlx.Select(ctx, &ret, "select * from Object where DirName = ? ", dirName) | |||
| return ret, err | |||
| } | |||
| // IsAvailable 判断一个用户是否拥有指定对象 | |||
| func (db *ObjectDB) IsAvailable(ctx SQLContext, userID int64, objectID int64) (bool, error) { | |||
| var objID int64 | |||
| // 先根据ObjectID找到Object,然后判断此Object所在的Bucket是不是归此用户所有 | |||
| err := sqlx.Get(ctx, &objID, | |||
| "select Object.ObjectID from Object, UserBucket where "+ | |||
| "Object.ObjectID = ? and "+ | |||
| "Object.BucketID = UserBucket.BucketID and "+ | |||
| "UserBucket.UserID = ?", | |||
| objectID, userID) | |||
| if err == sql.ErrNoRows { | |||
| return false, nil | |||
| } | |||
| if err != nil { | |||
| return false, fmt.Errorf("find object failed, err: %w", err) | |||
| } | |||
| return true, nil | |||
| } | |||
| // GetUserObject 获得Object,如果用户没有权限访问,则不会获得结果 | |||
| func (db *ObjectDB) GetUserObject(ctx SQLContext, userID int64, objectID int64) (model.Object, error) { | |||
| var ret model.Object | |||
| err := sqlx.Get(ctx, &ret, | |||
| "select Object.* from Object, UserBucket where "+ | |||
| "Object.ObjectID = ? and "+ | |||
| "Object.BucketID = UserBucket.BucketID and "+ | |||
| "UserBucket.UserID = ?", | |||
| objectID, userID) | |||
| return ret, err | |||
| } | |||
| // CreateRepObject 创建多副本对象相关的记录 | |||
| func (db *ObjectDB) CreateRepObject(ctx SQLContext, bucketID int64, objectName string, fileSize int64, repCount int, nodeIDs []int64, fileHash string, dirName string) (int64, error) { | |||
| // 根据objectname和bucketid查询,若不存在则插入,若存在则返回错误 | |||
| var objectID int64 | |||
| err := sqlx.Get(ctx, &objectID, "select ObjectID from Object where Name = ? AND BucketID = ?", objectName, bucketID) | |||
| // 无错误代表存在记录 | |||
| if err == nil { | |||
| return 0, fmt.Errorf("object with given Name and BucketID already exists") | |||
| } | |||
| // 错误不是记录不存在 | |||
| if err != nil && !errors.Is(err, sql.ErrNoRows) { | |||
| return 0, fmt.Errorf("query Object by ObjectName and BucketID failed, err: %w", err) | |||
| } | |||
| // 创建对象的记录 | |||
| sql := "insert into Object(Name, BucketID, State, FileSize, Redundancy, DirName) values(?,?,?,?,?,?)" | |||
| r, err := ctx.Exec(sql, objectName, bucketID, consts.ObjectStateNormal, fileSize, models.RedundancyRep, dirName) | |||
| func (db *ObjectDB) Create(ctx SQLContext, packageID int64, path string, size int64) (int64, error) { | |||
| sql := "insert into Object(PackageID, Path, Size) values(?,?,?)" | |||
| ret, err := ctx.Exec(sql, packageID, path, size) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("insert object failed, err: %w", err) | |||
| } | |||
| objectID, err = r.LastInsertId() | |||
| objectID, err := ret.LastInsertId() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get id of inserted object failed, err: %w", err) | |||
| } | |||
| // 创建对象副本的记录 | |||
| _, err = ctx.Exec("insert into ObjectRep(ObjectID, RepCount, FileHash) values(?,?,?)", objectID, repCount, fileHash) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("insert object rep failed, err: %w", err) | |||
| } | |||
| // 创建缓存记录 | |||
| priority := 0 //优先级暂时设置为0 | |||
| for _, nodeID := range nodeIDs { | |||
| err = db.Cache().CreatePinned(ctx, fileHash, nodeID, priority) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("create cache failed, err: %w", err) | |||
| } | |||
| } | |||
| return objectID, nil | |||
| } | |||
| func (db *ObjectDB) CreateEcObject(ctx SQLContext, bucketID int64, objectName string, fileSize int64, userID int64, nodeIDs []int64, hashs []string, ecName string, dirName string) (int64, error) { | |||
| // 根据objectname和bucketid查询,若不存在则插入,若存在则返回错误 | |||
| var objectID int64 | |||
| err := sqlx.Get(ctx, &objectID, "select ObjectID from Object where Name = ? AND BucketID = ?", objectName, bucketID) | |||
| // 无错误代表存在记录 | |||
| if err == nil { | |||
| return 0, fmt.Errorf("object with given Name and BucketID already exists") | |||
| } | |||
| // 错误不是记录不存在 | |||
| if err != nil && !errors.Is(err, sql.ErrNoRows) { | |||
| return 0, fmt.Errorf("query Object by ObjectName and BucketID failed, err: %w", err) | |||
| } | |||
| // 创建对象的记录 | |||
| sql := "insert into Object(Name, BucketID, State, FileSize, Redundancy, DirName) values(?,?,?,?,?,?)" | |||
| r, err := ctx.Exec(sql, objectName, bucketID, consts.ObjectStateNormal, fileSize, ecName, dirName) | |||
| // 创建或者更新记录,返回值true代表是创建,false代表是更新 | |||
| func (db *ObjectDB) CreateOrUpdate(ctx SQLContext, packageID int64, path string, size int64) (int64, bool, error) { | |||
| sql := "insert into Object(PackageID, Path, Size) values(?,?,?) on duplicate key update Size = ?" | |||
| ret, err := ctx.Exec(sql, packageID, path, size, size) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("insert Ec object failed, err: %w", err) | |||
| return 0, false, fmt.Errorf("insert object failed, err: %w", err) | |||
| } | |||
| objectID, err = r.LastInsertId() | |||
| affs, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get id of inserted object failed, err: %w", err) | |||
| return 0, false, fmt.Errorf("getting affected rows: %w", err) | |||
| } | |||
| // 创建编码块的记录 | |||
| for i := 0; i < len(hashs); i++ { | |||
| _, err = ctx.Exec("insert into ObjectBlock(ObjectID, InnerID, BlockHash) values(?,?,?)", objectID, i, hashs[i]) | |||
| // 影响行数为1时是插入,为2时是更新 | |||
| if affs == 1 { | |||
| objectID, err := ret.LastInsertId() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("insert object rep failed, err: %w", err) | |||
| return 0, false, fmt.Errorf("get id of inserted object failed, err: %w", err) | |||
| } | |||
| return objectID, true, nil | |||
| } | |||
| // 创建缓存记录 | |||
| priority := 0 //优先级暂时设置为0 | |||
| i := 0 | |||
| for _, nodeID := range nodeIDs { | |||
| err = db.Cache().CreatePinned(ctx, hashs[i], nodeID, priority) | |||
| i += 1 | |||
| if err != nil { | |||
| return 0, fmt.Errorf("create cache failed, err: %w", err) | |||
| } | |||
| var objID int64 | |||
| if err = sqlx.Get(ctx, &objID, "select ObjectID from Object where PackageID = ? and Path = ?", packageID, path); err != nil { | |||
| return 0, false, fmt.Errorf("getting object id: %w", err) | |||
| } | |||
| return objectID, nil | |||
| return objID, false, nil | |||
| } | |||
| func (db *ObjectDB) UpdateRepObject(ctx SQLContext, objectID int64, fileSize int64, nodeIDs []int64, fileHash string) error { | |||
| obj, err := db.GetByID(ctx, objectID) | |||
| if err != nil { | |||
| return fmt.Errorf("get object failed, err: %w", err) | |||
| } | |||
| if obj.Redundancy != models.RedundancyRep { | |||
| return fmt.Errorf("object is not a rep object") | |||
| } | |||
| _, err = db.UpdateFileInfo(ctx, objectID, fileSize) | |||
| _, err := db.UpdateFileInfo(ctx, objectID, fileSize) | |||
| if err != nil { | |||
| if err != nil { | |||
| return fmt.Errorf("update rep object failed, err: %w", err) | |||
| @@ -189,18 +81,13 @@ func (db *ObjectDB) UpdateRepObject(ctx SQLContext, objectID int64, fileSize int | |||
| return fmt.Errorf("get object rep failed, err: %w", err) | |||
| } | |||
| // 如果新文件与旧文件的Hash不同,则需要更新关联的FileHash,设置Storage中的文件已过期,重新插入Cache记录 | |||
| // 如果新文件与旧文件的Hash不同,则需要更新关联的FileHash,重新插入Cache记录 | |||
| if objRep.FileHash != fileHash { | |||
| _, err := db.ObjectRep().UpdateFileHash(ctx, objectID, fileHash) | |||
| _, err := db.ObjectRep().Update(ctx, objectID, fileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("update rep object file hash failed, err: %w", err) | |||
| } | |||
| _, err = db.StorageObject().SetAllObjectOutdated(ctx, objectID) | |||
| if err != nil { | |||
| return fmt.Errorf("set storage object outdated failed, err: %w", err) | |||
| } | |||
| for _, nodeID := range nodeIDs { | |||
| err := db.Cache().CreatePinned(ctx, fileHash, nodeID, 0) //priority = 0 | |||
| if err != nil { | |||
| @@ -210,7 +97,6 @@ func (db *ObjectDB) UpdateRepObject(ctx SQLContext, objectID int64, fileSize int | |||
| } else { | |||
| // 如果相同,则只增加Cache中不存在的记录 | |||
| cachedNodes, err := db.Cache().GetCachingFileNodes(ctx, fileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("find caching file nodes failed, err: %w", err) | |||
| @@ -233,84 +119,157 @@ func (db *ObjectDB) UpdateRepObject(ctx SQLContext, objectID int64, fileSize int | |||
| return nil | |||
| } | |||
| // SoftDelete 设置一个对象被删除,并将相关数据删除 | |||
| func (db *ObjectDB) SoftDelete(ctx SQLContext, objectID int64) error { | |||
| obj, err := db.GetByID(ctx, objectID) | |||
| func (*ObjectDB) BatchGetAllEcObjectIDs(ctx SQLContext, start int, count int) ([]int64, error) { | |||
| var ret []int64 | |||
| rep := "rep" | |||
| err := sqlx.Select(ctx, &ret, "SELECT ObjectID FROM object where Redundancy != ? limit ?, ?", rep, start, count) | |||
| return ret, err | |||
| } | |||
| func (*ObjectDB) UpdateFileInfo(ctx SQLContext, objectID int64, fileSize int64) (bool, error) { | |||
| ret, err := ctx.Exec("update Object set FileSize = ? where ObjectID = ?", fileSize, objectID) | |||
| if err != nil { | |||
| return false, err | |||
| } | |||
| cnt, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return fmt.Errorf("get object failed, err: %w", err) | |||
| return false, fmt.Errorf("get affected rows failed, err: %w", err) | |||
| } | |||
| return cnt > 0, nil | |||
| } | |||
| func (*ObjectDB) GetPackageObjects(ctx SQLContext, packageID int64) ([]model.Object, error) { | |||
| var ret []model.Object | |||
| err := sqlx.Select(ctx, &ret, "select * from Object where PackageID = ? order by ObjectID asc", packageID) | |||
| return ret, err | |||
| } | |||
| func (db *ObjectDB) BatchAddRep(ctx SQLContext, packageID int64, objs []coormq.AddRepObjectInfo) ([]int64, error) { | |||
| var objIDs []int64 | |||
| for _, obj := range objs { | |||
| // 创建对象的记录 | |||
| objID, isCreate, err := db.CreateOrUpdate(ctx, packageID, obj.Path, obj.Size) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating object: %w", err) | |||
| } | |||
| objIDs = append(objIDs, objID) | |||
| if isCreate { | |||
| if err := db.createRep(ctx, objID, obj); err != nil { | |||
| return nil, err | |||
| } | |||
| } else { | |||
| if err := db.updateRep(ctx, objID, obj); err != nil { | |||
| return nil, err | |||
| } | |||
| } | |||
| } | |||
| // 不是正常状态的Object,则不删除 | |||
| // TODO 未来可能有其他状态 | |||
| if obj.State != consts.ObjectStateNormal { | |||
| return nil | |||
| return objIDs, nil | |||
| } | |||
| func (db *ObjectDB) createRep(ctx SQLContext, objID int64, obj coormq.AddRepObjectInfo) error { | |||
| // 创建对象副本的记录 | |||
| if err := db.ObjectRep().Create(ctx, objID, obj.FileHash); err != nil { | |||
| return fmt.Errorf("creating object rep: %w", err) | |||
| } | |||
| err = db.ChangeState(ctx, objectID, consts.ObjectStateDeleted) | |||
| // 创建缓存记录 | |||
| priority := 0 //优先级暂时设置为0 | |||
| for _, nodeID := range obj.NodeIDs { | |||
| if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, priority); err != nil { | |||
| return fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| func (db *ObjectDB) updateRep(ctx SQLContext, objID int64, obj coormq.AddRepObjectInfo) error { | |||
| objRep, err := db.ObjectRep().GetByID(ctx, objID) | |||
| if err != nil { | |||
| return fmt.Errorf("change object state failed, err: %w", err) | |||
| return fmt.Errorf("getting object rep: %w", err) | |||
| } | |||
| if obj.Redundancy == models.RedundancyRep { | |||
| err = db.ObjectRep().Delete(ctx, objectID) | |||
| // 如果新文件与旧文件的Hash不同,则需要更新关联的FileHash,重新插入Cache记录 | |||
| if objRep.FileHash != obj.FileHash { | |||
| _, err := db.ObjectRep().Update(ctx, objID, obj.FileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("delete from object rep failed, err: %w", err) | |||
| return fmt.Errorf("updating rep object file hash: %w", err) | |||
| } | |||
| for _, nodeID := range obj.NodeIDs { | |||
| if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, 0); err != nil { | |||
| return fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| } else { | |||
| err = db.ObjectBlock().Delete(ctx, objectID) | |||
| // 如果相同,则只增加Cache中不存在的记录 | |||
| cachedNodes, err := db.Cache().GetCachingFileNodes(ctx, obj.FileHash) | |||
| if err != nil { | |||
| return fmt.Errorf("delete from object rep failed, err: %w", err) | |||
| return fmt.Errorf("finding caching file nodes: %w", err) | |||
| } | |||
| } | |||
| _, err = db.StorageObject().SetAllObjectDeleted(ctx, objectID) | |||
| if err != nil { | |||
| return fmt.Errorf("set storage object deleted failed, err: %w", err) | |||
| // 筛选出不在cachedNodes中的id | |||
| newNodeIDs := lo.Filter(obj.NodeIDs, func(id int64, index int) bool { | |||
| return lo.NoneBy(cachedNodes, func(node model.Node) bool { | |||
| return node.NodeID == id | |||
| }) | |||
| }) | |||
| for _, nodeID := range newNodeIDs { | |||
| if err := db.Cache().CreatePinned(ctx, obj.FileHash, nodeID, 0); err != nil { | |||
| return fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| } | |||
| return nil | |||
| } | |||
| // DeleteUnused 删除一个已经是Deleted状态,且不再被使用的对象。目前可能被使用的地方只有StorageObject | |||
| func (ObjectDB) DeleteUnused(ctx SQLContext, objectID int64) error { | |||
| _, err := ctx.Exec("delete from Object where ObjectID = ? and State = ? and "+ | |||
| "not exists(select StorageID from StorageObject where ObjectID = ?)", | |||
| objectID, | |||
| consts.ObjectStateDeleted, | |||
| objectID, | |||
| ) | |||
| func (db *ObjectDB) BatchAddEC(ctx SQLContext, packageID int64, objs []coormq.AddECObjectInfo) ([]int64, error) { | |||
| objIDs := make([]int64, 0, len(objs)) | |||
| for _, obj := range objs { | |||
| // 创建对象的记录 | |||
| objID, isCreate, err := db.CreateOrUpdate(ctx, packageID, obj.Path, obj.Size) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating object: %w", err) | |||
| } | |||
| return err | |||
| } | |||
| objIDs = append(objIDs, objID) | |||
| func (*ObjectDB) BatchGetAllObjectIDs(ctx SQLContext, start int, count int) ([]int64, error) { | |||
| var ret []int64 | |||
| err := sqlx.Select(ctx, &ret, "select ObjectID from Object limit ?, ?", start, count) | |||
| return ret, err | |||
| } | |||
| if !isCreate { | |||
| // 删除原本所有的编码块记录,重新添加 | |||
| if err = db.ObjectBlock().DeleteObjectAll(ctx, objID); err != nil { | |||
| return nil, fmt.Errorf("deleting all object block: %w", err) | |||
| } | |||
| func (*ObjectDB) BatchGetAllEcObjectIDs(ctx SQLContext, start int, count int) ([]int64, error) { | |||
| var ret []int64 | |||
| rep := "rep" | |||
| err := sqlx.Select(ctx, &ret, "SELECT ObjectID FROM object where Redundancy != ? limit ?, ?", rep, start, count) | |||
| return ret, err | |||
| } | |||
| } | |||
| func (*ObjectDB) ChangeState(ctx SQLContext, objectID int64, state string) error { | |||
| _, err := ctx.Exec("update Object set State = ? where ObjectID = ?", state, objectID) | |||
| return err | |||
| } | |||
| // 创建编码块的记录 | |||
| for i := 0; i < len(obj.FileHashes); i++ { | |||
| err := db.ObjectBlock().Create(ctx, objID, i, obj.FileHashes[i]) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating object block: %w", err) | |||
| } | |||
| } | |||
| func (*ObjectDB) UpdateFileInfo(ctx SQLContext, objectID int64, fileSize int64) (bool, error) { | |||
| ret, err := ctx.Exec("update Object set FileSize = ? where ObjectID = ?", fileSize, objectID) | |||
| if err != nil { | |||
| return false, err | |||
| // 创建缓存记录 | |||
| priority := 0 //优先级暂时设置为0 | |||
| for i, nodeID := range obj.NodeIDs { | |||
| err = db.Cache().CreatePinned(ctx, obj.FileHashes[i], nodeID, priority) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("creating cache: %w", err) | |||
| } | |||
| } | |||
| } | |||
| cnt, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return false, fmt.Errorf("get affected rows failed, err: %w", err) | |||
| } | |||
| return objIDs, nil | |||
| } | |||
| return cnt > 0, nil | |||
| func (*ObjectDB) BatchDelete(ctx SQLContext, ids []int64) error { | |||
| _, err := ctx.Exec("delete from Object where ObjectID in (?)", ids) | |||
| return err | |||
| } | |||
| @@ -2,9 +2,11 @@ package db | |||
| import ( | |||
| "database/sql" | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage-common/consts" | |||
| "gitlink.org.cn/cloudream/storage-common/models" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| @@ -16,7 +18,12 @@ func (db *DB) ObjectBlock() *ObjectBlockDB { | |||
| return &ObjectBlockDB{DB: db} | |||
| } | |||
| func (db *ObjectBlockDB) Delete(ctx SQLContext, objectID int64) error { | |||
| func (db *ObjectBlockDB) Create(ctx SQLContext, objectID int64, index int, fileHash string) error { | |||
| _, err := ctx.Exec("insert into ObjectBlock(ObjectID, Index, FileHash) values(?,?,?)", objectID, index, fileHash) | |||
| return err | |||
| } | |||
| func (db *ObjectBlockDB) DeleteObjectAll(ctx SQLContext, objectID int64) error { | |||
| _, err := ctx.Exec("delete from ObjectBlock where ObjectID = ?", objectID) | |||
| return err | |||
| } | |||
| @@ -24,9 +31,10 @@ func (db *ObjectBlockDB) Delete(ctx SQLContext, objectID int64) error { | |||
| func (db *ObjectBlockDB) CountBlockWithHash(ctx SQLContext, fileHash string) (int, error) { | |||
| var cnt int | |||
| err := sqlx.Get(ctx, &cnt, | |||
| "select count(BlockHash) from ObjectBlock, Object where BlockHash = ? and "+ | |||
| "select count(FileHash) from ObjectBlock, Object, Package where FileHash = ? and "+ | |||
| "ObjectBlock.ObjectID = Object.ObjectID and "+ | |||
| "Object.State = ?", fileHash, consts.ObjectStateNormal) | |||
| "Object.PackageID = Package.PackageID and "+ | |||
| "Package.State = ?", fileHash, consts.PackageStateNormal) | |||
| if err == sql.ErrNoRows { | |||
| return 0, nil | |||
| } | |||
| @@ -43,7 +51,7 @@ func (db *ObjectBlockDB) GetBatchObjectBlocks(ctx SQLContext, objectIDs []int64) | |||
| err = db.d.Select(&x, sql, objectID) | |||
| xx := make([]string, len(x)) | |||
| for ii := 0; ii < len(x); ii++ { | |||
| xx[x[ii].InnerID] = x[ii].BlockHash | |||
| xx[x[ii].Index] = x[ii].FileHash | |||
| } | |||
| blocks[i] = xx | |||
| } | |||
| @@ -70,3 +78,49 @@ func (db *ObjectBlockDB) GetBatchBlocksNodes(ctx SQLContext, hashs [][]string) ( | |||
| } | |||
| return nodes, err | |||
| } | |||
| func (db *ObjectBlockDB) GetWithNodeIDInPackage(ctx SQLContext, packageID int64) ([]models.ObjectECData, error) { | |||
| var objectIDs []int64 | |||
| err := sqlx.Select(ctx, &objectIDs, "select ObjectID from Object where PackageID = ? order by ObjectID asc", packageID) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("query objectIDs: %w", err) | |||
| } | |||
| rets := make([]models.ObjectECData, 0, len(objectIDs)) | |||
| for _, objID := range objectIDs { | |||
| var tmpRets []struct { | |||
| Index int `db:"Index"` | |||
| FileHash string `db:"FileHash"` | |||
| NodeIDs *string `db:"NodeIDs"` | |||
| } | |||
| err := sqlx.Select(ctx, | |||
| &tmpRets, | |||
| "select ObjectBlock.Index, ObjectBlock.FileHash, group_concat(NodeID) as NodeIDs from ObjectBlock "+ | |||
| "left join Cache on ObjectBlock.FileHash = Cache.FileHash"+ | |||
| "where ObjectID = ? group by ObjectBlock.Index, ObjectBlock.FileHash", | |||
| objID, | |||
| ) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| blocks := make([]models.ObjectBlockData, 0, len(tmpRets)) | |||
| for _, tmp := range tmpRets { | |||
| var block models.ObjectBlockData | |||
| block.Index = tmp.Index | |||
| block.FileHash = tmp.FileHash | |||
| if tmp.NodeIDs != nil { | |||
| block.NodeIDs = splitIDStringUnsafe(*tmp.NodeIDs) | |||
| } | |||
| blocks = append(blocks, block) | |||
| } | |||
| rets = append(rets, models.NewObjectECData(blocks)) | |||
| } | |||
| return rets, nil | |||
| } | |||
| @@ -3,9 +3,12 @@ package db | |||
| import ( | |||
| "database/sql" | |||
| "fmt" | |||
| "strconv" | |||
| "strings" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage-common/consts" | |||
| "gitlink.org.cn/cloudream/storage-common/models" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| @@ -24,7 +27,12 @@ func (db *ObjectRepDB) GetByID(ctx SQLContext, objectID int64) (model.ObjectRep, | |||
| return ret, err | |||
| } | |||
| func (db *ObjectRepDB) UpdateFileHash(ctx SQLContext, objectID int64, fileHash string) (int64, error) { | |||
| func (db *ObjectRepDB) Create(ctx SQLContext, objectID int64, fileHash string) error { | |||
| _, err := ctx.Exec("insert into ObjectRep(ObjectID, FileHash) values(?,?)", objectID, fileHash) | |||
| return err | |||
| } | |||
| func (db *ObjectRepDB) Update(ctx SQLContext, objectID int64, fileHash string) (int64, error) { | |||
| ret, err := ctx.Exec("update ObjectRep set FileHash = ? where ObjectID = ?", fileHash, objectID) | |||
| if err != nil { | |||
| return 0, err | |||
| @@ -46,9 +54,10 @@ func (db *ObjectRepDB) Delete(ctx SQLContext, objectID int64) error { | |||
| func (db *ObjectRepDB) GetFileMaxRepCount(ctx SQLContext, fileHash string) (int, error) { | |||
| var maxRepCnt *int | |||
| err := sqlx.Get(ctx, &maxRepCnt, | |||
| "select max(RepCount) from ObjectRep, Object where FileHash = ? and "+ | |||
| "select max(RepCount) from ObjectRep, Object, Package where FileHash = ? and "+ | |||
| "ObjectRep.ObjectID = Object.ObjectID and "+ | |||
| "Object.State = ?", fileHash, consts.ObjectStateNormal) | |||
| "Object.PackageID = Package.PackageID and "+ | |||
| "Package.State = ?", fileHash, consts.PackageStateNormal) | |||
| if err == sql.ErrNoRows { | |||
| return 0, nil | |||
| @@ -64,3 +73,56 @@ func (db *ObjectRepDB) GetFileMaxRepCount(ctx SQLContext, fileHash string) (int, | |||
| return *maxRepCnt, err | |||
| } | |||
| func (db *ObjectRepDB) GetWithNodeIDInPackage(ctx SQLContext, packageID int64) ([]models.ObjectRepData, error) { | |||
| var tmpRets []struct { | |||
| ObjectID int64 `db:"ObjectID"` | |||
| FileHash *string `db:"FileHash"` | |||
| NodeIDs *string `db:"NodeIDs"` | |||
| } | |||
| err := sqlx.Select(ctx, | |||
| &tmpRets, | |||
| "select Object.ObjectID, ObjectRep.FileHash, group_concat(NodeID) as NodeIDs from Object "+ | |||
| "left join ObjectRep on Object.ObjectID = ObjectRep.ObjectID "+ | |||
| "left join Cache on ObjectRep.FileHash = Cache.FileHash"+ | |||
| "where PackageID = ? group by Object.ObjectID order by Object.ObjectID asc", | |||
| packageID, | |||
| ) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| rets := make([]models.ObjectRepData, 0, len(tmpRets)) | |||
| for _, tmp := range tmpRets { | |||
| var repData models.ObjectRepData | |||
| repData.ObjectID = tmp.ObjectID | |||
| if tmp.FileHash != nil { | |||
| repData.FileHash = *tmp.FileHash | |||
| } | |||
| if tmp.NodeIDs != nil { | |||
| repData.NodeIDs = splitIDStringUnsafe(*tmp.NodeIDs) | |||
| } | |||
| rets = append(rets, repData) | |||
| } | |||
| return rets, nil | |||
| } | |||
| // 按逗号切割字符串,并将每一个部分解析为一个int64的ID。 | |||
| // 注:需要外部保证分隔的每一个部分都是正确的10进制数字格式 | |||
| func splitIDStringUnsafe(idStr string) []int64 { | |||
| idStrs := strings.Split(idStr, ",") | |||
| ids := make([]int64, 0, len(idStrs)) | |||
| for _, str := range idStrs { | |||
| // 假设传入的ID是正确的数字格式 | |||
| id, _ := strconv.ParseInt(str, 10, 64) | |||
| ids = append(ids, id) | |||
| } | |||
| return ids | |||
| } | |||
| @@ -0,0 +1,167 @@ | |||
| package db | |||
| import ( | |||
| "database/sql" | |||
| "errors" | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/utils/serder" | |||
| "gitlink.org.cn/cloudream/storage-common/consts" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type PackageDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) Package() *PackageDB { | |||
| return &PackageDB{DB: db} | |||
| } | |||
| func (db *PackageDB) GetByID(ctx SQLContext, packageID int64) (model.Package, error) { | |||
| var ret model.Package | |||
| err := sqlx.Get(ctx, &ret, "select * from Package where PackageID = ?", packageID) | |||
| return ret, err | |||
| } | |||
| func (db *PackageDB) GetByName(ctx SQLContext, bucketID int64, name string) (model.Package, error) { | |||
| var ret model.Package | |||
| err := sqlx.Get(ctx, &ret, "select * from Package where BucketID = ? and Name = ?", bucketID, name) | |||
| return ret, err | |||
| } | |||
| func (*PackageDB) BatchGetAllPackageIDs(ctx SQLContext, start int, count int) ([]int64, error) { | |||
| var ret []int64 | |||
| err := sqlx.Select(ctx, &ret, "select PackageID from Package limit ?, ?", start, count) | |||
| return ret, err | |||
| } | |||
| func (db *PackageDB) GetBucketPackages(ctx SQLContext, userID int64, bucketID int64) ([]model.Package, error) { | |||
| var ret []model.Package | |||
| err := sqlx.Select(ctx, &ret, "select Package.* from UserBucket, Package where UserID = ? and UserBucket.BucketID = ? and UserBucket.BucketID = Package.BucketID", userID, bucketID) | |||
| return ret, err | |||
| } | |||
| // IsAvailable 判断一个用户是否拥有指定对象 | |||
| func (db *PackageDB) IsAvailable(ctx SQLContext, userID int64, packageID int64) (bool, error) { | |||
| var objID int64 | |||
| // 先根据PackageID找到Package,然后判断此Package所在的Bucket是不是归此用户所有 | |||
| err := sqlx.Get(ctx, &objID, | |||
| "select Package.PackageID from Package, UserBucket where "+ | |||
| "Package.PackageID = ? and "+ | |||
| "Package.BucketID = UserBucket.BucketID and "+ | |||
| "UserBucket.UserID = ?", | |||
| packageID, userID) | |||
| if err == sql.ErrNoRows { | |||
| return false, nil | |||
| } | |||
| if err != nil { | |||
| return false, fmt.Errorf("find package failed, err: %w", err) | |||
| } | |||
| return true, nil | |||
| } | |||
| // GetUserPackage 获得Package,如果用户没有权限访问,则不会获得结果 | |||
| func (db *PackageDB) GetUserPackage(ctx SQLContext, userID int64, packageID int64) (model.Package, error) { | |||
| var ret model.Package | |||
| err := sqlx.Get(ctx, &ret, | |||
| "select Package.* from Package, UserBucket where "+ | |||
| "Package.PackageID = ? and "+ | |||
| "Package.BucketID = UserBucket.BucketID and "+ | |||
| "UserBucket.UserID = ?", | |||
| packageID, userID) | |||
| return ret, err | |||
| } | |||
| func (db *PackageDB) Create(ctx SQLContext, bucketID int64, name string, redundancy models.TypedRedundancyInfo) (int64, error) { | |||
| // 根据packagename和bucketid查询,若不存在则插入,若存在则返回错误 | |||
| var packageID int64 | |||
| err := sqlx.Get(ctx, &packageID, "select PackageID from Package where Name = ? AND BucketID = ?", name, bucketID) | |||
| // 无错误代表存在记录 | |||
| if err == nil { | |||
| return 0, fmt.Errorf("package with given Name and BucketID already exists") | |||
| } | |||
| // 错误不是记录不存在 | |||
| if !errors.Is(err, sql.ErrNoRows) { | |||
| return 0, fmt.Errorf("query Package by PackageName and BucketID failed, err: %w", err) | |||
| } | |||
| redundancyJSON, err := serder.ObjectToJSON(redundancy) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("redundancy to json: %w", err) | |||
| } | |||
| sql := "insert into Package(Name, BucketID, State, Redundancy) values(?,?,?,?)" | |||
| r, err := ctx.Exec(sql, name, bucketID, consts.PackageStateNormal, redundancyJSON) | |||
| if err != nil { | |||
| return 0, fmt.Errorf("insert package failed, err: %w", err) | |||
| } | |||
| packageID, err = r.LastInsertId() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get id of inserted package failed, err: %w", err) | |||
| } | |||
| return packageID, nil | |||
| } | |||
| // SoftDelete 设置一个对象被删除,并将相关数据删除 | |||
| func (db *PackageDB) SoftDelete(ctx SQLContext, packageID int64) error { | |||
| obj, err := db.GetByID(ctx, packageID) | |||
| if err != nil { | |||
| return fmt.Errorf("get package failed, err: %w", err) | |||
| } | |||
| // 不是正常状态的Package,则不删除 | |||
| // TODO 未来可能有其他状态 | |||
| if obj.State != consts.PackageStateNormal { | |||
| return nil | |||
| } | |||
| err = db.ChangeState(ctx, packageID, consts.PackageStateDeleted) | |||
| if err != nil { | |||
| return fmt.Errorf("change package state failed, err: %w", err) | |||
| } | |||
| if obj.Redundancy.Type == models.RedundancyRep { | |||
| // TODO2 | |||
| //err = db.ObjectRep().Delete(ctx, objectID) | |||
| //if err != nil { | |||
| // return fmt.Errorf("delete from object rep failed, err: %w", err) | |||
| //} | |||
| } else { | |||
| //err = db.ObjectBlock().Delete(ctx, objectID) | |||
| //if err != nil { | |||
| // return fmt.Errorf("delete from object rep failed, err: %w", err) | |||
| //} | |||
| } | |||
| _, err = db.StoragePackage().SetAllPackageDeleted(ctx, packageID) | |||
| if err != nil { | |||
| return fmt.Errorf("set storage package deleted failed, err: %w", err) | |||
| } | |||
| return nil | |||
| } | |||
| // DeleteUnused 删除一个已经是Deleted状态,且不再被使用的对象。目前可能被使用的地方只有StoragePackage | |||
| func (PackageDB) DeleteUnused(ctx SQLContext, packageID int64) error { | |||
| _, err := ctx.Exec("delete from Package where PackageID = ? and State = ? and "+ | |||
| "not exists(select StorageID from StoragePackage where PackageID = ?)", | |||
| packageID, | |||
| consts.PackageStateDeleted, | |||
| packageID, | |||
| ) | |||
| return err | |||
| } | |||
| func (*PackageDB) ChangeState(ctx SQLContext, packageID int64, state string) error { | |||
| _, err := ctx.Exec("update Package set State = ? where PackageID = ?", state, packageID) | |||
| return err | |||
| } | |||
| @@ -1,116 +0,0 @@ | |||
| package db | |||
| import ( | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage-common/consts" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type StorageObjectDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) StorageObject() *StorageObjectDB { | |||
| return &StorageObjectDB{DB: db} | |||
| } | |||
| func (*StorageObjectDB) Get(ctx SQLContext, storageID int64, objectID int64, userID int64) (model.StorageObject, error) { | |||
| var ret model.StorageObject | |||
| err := sqlx.Get(ctx, &ret, "select * from StorageObject where StorageID = ? and ObjectID = ? and UserID = ?", storageID, objectID, userID) | |||
| return ret, err | |||
| } | |||
| func (*StorageObjectDB) GetAllByStorageAndObjectID(ctx SQLContext, storageID int64, objectID int64) ([]model.StorageObject, error) { | |||
| var ret []model.StorageObject | |||
| err := sqlx.Select(ctx, &ret, "select * from StorageObject where StorageID = ? and ObjectID = ?", storageID, objectID) | |||
| return ret, err | |||
| } | |||
| func (*StorageObjectDB) GetAllByStorageID(ctx SQLContext, storageID int64) ([]model.StorageObject, error) { | |||
| var ret []model.StorageObject | |||
| err := sqlx.Select(ctx, &ret, "select * from StorageObject where StorageID = ?", storageID) | |||
| return ret, err | |||
| } | |||
| func (*StorageObjectDB) MoveObjectTo(ctx SQLContext, objectID int64, storageID int64, userID int64) error { | |||
| _, err := ctx.Exec("insert into StorageObject values(?,?,?,?)", objectID, storageID, userID, consts.StorageObjectStateNormal) | |||
| return err | |||
| } | |||
| func (*StorageObjectDB) ChangeState(ctx SQLContext, storageID int64, objectID int64, userID int64, state string) error { | |||
| _, err := ctx.Exec("update StorageObject set State = ? where StorageID = ? and ObjectID = ? and UserID = ?", state, storageID, objectID, userID) | |||
| return err | |||
| } | |||
| // SetStateNormal 将状态设置为Normal,如果记录状态是Deleted,则不进行操作 | |||
| func (*StorageObjectDB) SetStateNormal(ctx SQLContext, storageID int64, objectID int64, userID int64) error { | |||
| _, err := ctx.Exec("update StorageObject set State = ? where StorageID = ? and ObjectID = ? and UserID = ? and State <> ?", | |||
| consts.StorageObjectStateNormal, | |||
| storageID, | |||
| objectID, | |||
| userID, | |||
| consts.StorageObjectStateDeleted, | |||
| ) | |||
| return err | |||
| } | |||
| func (*StorageObjectDB) SetAllObjectState(ctx SQLContext, objectID int64, state string) (int64, error) { | |||
| ret, err := ctx.Exec( | |||
| "update StorageObject set State = ? where ObjectID = ?", | |||
| state, | |||
| objectID, | |||
| ) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| cnt, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get affected rows failed, err: %w", err) | |||
| } | |||
| return cnt, nil | |||
| } | |||
| // SetAllObjectOutdated 将Storage中指定对象设置为已过期。 | |||
| // 注:只会设置Normal状态的对象 | |||
| func (*StorageObjectDB) SetAllObjectOutdated(ctx SQLContext, objectID int64) (int64, error) { | |||
| ret, err := ctx.Exec( | |||
| "update StorageObject set State = ? where State = ? and ObjectID = ?", | |||
| consts.StorageObjectStateOutdated, | |||
| consts.StorageObjectStateNormal, | |||
| objectID, | |||
| ) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| cnt, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get affected rows failed, err: %w", err) | |||
| } | |||
| return cnt, nil | |||
| } | |||
| func (db *StorageObjectDB) SetAllObjectDeleted(ctx SQLContext, objectID int64) (int64, error) { | |||
| return db.SetAllObjectState(ctx, objectID, consts.StorageObjectStateDeleted) | |||
| } | |||
| func (*StorageObjectDB) Delete(ctx SQLContext, storageID int64, objectID int64, userID int64) error { | |||
| _, err := ctx.Exec("delete from StorageObject where StorageID = ? and ObjectID = ? and UserID = ?", storageID, objectID, userID) | |||
| return err | |||
| } | |||
| // FindObjectStorages 查询存储了指定对象的Storage | |||
| func (*StorageObjectDB) FindObjectStorages(ctx SQLContext, objectID int64) ([]model.Storage, error) { | |||
| var ret []model.Storage | |||
| err := sqlx.Select(ctx, &ret, | |||
| "select Storage.* from StorageObject, Storage where ObjectID = ? and "+ | |||
| "StorageObject.StorageID = Storage.StorageID", | |||
| objectID, | |||
| ) | |||
| return ret, err | |||
| } | |||
| @@ -0,0 +1,116 @@ | |||
| package db | |||
| import ( | |||
| "fmt" | |||
| "github.com/jmoiron/sqlx" | |||
| "gitlink.org.cn/cloudream/storage-common/consts" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type StoragePackageDB struct { | |||
| *DB | |||
| } | |||
| func (db *DB) StoragePackage() *StoragePackageDB { | |||
| return &StoragePackageDB{DB: db} | |||
| } | |||
| func (*StoragePackageDB) Get(ctx SQLContext, storageID int64, packageID int64, userID int64) (model.StoragePackage, error) { | |||
| var ret model.StoragePackage | |||
| err := sqlx.Get(ctx, &ret, "select * from StoragePackage where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID) | |||
| return ret, err | |||
| } | |||
| func (*StoragePackageDB) GetAllByStorageAndPackageID(ctx SQLContext, storageID int64, packageID int64) ([]model.StoragePackage, error) { | |||
| var ret []model.StoragePackage | |||
| err := sqlx.Select(ctx, &ret, "select * from StoragePackage where StorageID = ? and PackageID = ?", storageID, packageID) | |||
| return ret, err | |||
| } | |||
| func (*StoragePackageDB) GetAllByStorageID(ctx SQLContext, storageID int64) ([]model.StoragePackage, error) { | |||
| var ret []model.StoragePackage | |||
| err := sqlx.Select(ctx, &ret, "select * from StoragePackage where StorageID = ?", storageID) | |||
| return ret, err | |||
| } | |||
| func (*StoragePackageDB) MovePackageTo(ctx SQLContext, packageID int64, storageID int64, userID int64) error { | |||
| _, err := ctx.Exec("insert into StoragePackage values(?,?,?,?)", packageID, storageID, userID, consts.StoragePackageStateNormal) | |||
| return err | |||
| } | |||
| func (*StoragePackageDB) ChangeState(ctx SQLContext, storageID int64, packageID int64, userID int64, state string) error { | |||
| _, err := ctx.Exec("update StoragePackage set State = ? where StorageID = ? and PackageID = ? and UserID = ?", state, storageID, packageID, userID) | |||
| return err | |||
| } | |||
| // SetStateNormal 将状态设置为Normal,如果记录状态是Deleted,则不进行操作 | |||
| func (*StoragePackageDB) SetStateNormal(ctx SQLContext, storageID int64, packageID int64, userID int64) error { | |||
| _, err := ctx.Exec("update StoragePackage set State = ? where StorageID = ? and PackageID = ? and UserID = ? and State <> ?", | |||
| consts.StoragePackageStateNormal, | |||
| storageID, | |||
| packageID, | |||
| userID, | |||
| consts.StoragePackageStateDeleted, | |||
| ) | |||
| return err | |||
| } | |||
| func (*StoragePackageDB) SetAllPackageState(ctx SQLContext, packageID int64, state string) (int64, error) { | |||
| ret, err := ctx.Exec( | |||
| "update StoragePackage set State = ? where PackageID = ?", | |||
| state, | |||
| packageID, | |||
| ) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| cnt, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get affected rows failed, err: %w", err) | |||
| } | |||
| return cnt, nil | |||
| } | |||
| // SetAllPackageOutdated 将Storage中指定对象设置为已过期。 | |||
| // 注:只会设置Normal状态的对象 | |||
| func (*StoragePackageDB) SetAllPackageOutdated(ctx SQLContext, packageID int64) (int64, error) { | |||
| ret, err := ctx.Exec( | |||
| "update StoragePackage set State = ? where State = ? and PackageID = ?", | |||
| consts.StoragePackageStateOutdated, | |||
| consts.StoragePackageStateNormal, | |||
| packageID, | |||
| ) | |||
| if err != nil { | |||
| return 0, err | |||
| } | |||
| cnt, err := ret.RowsAffected() | |||
| if err != nil { | |||
| return 0, fmt.Errorf("get affected rows failed, err: %w", err) | |||
| } | |||
| return cnt, nil | |||
| } | |||
| func (db *StoragePackageDB) SetAllPackageDeleted(ctx SQLContext, packageID int64) (int64, error) { | |||
| return db.SetAllPackageState(ctx, packageID, consts.StoragePackageStateDeleted) | |||
| } | |||
| func (*StoragePackageDB) Delete(ctx SQLContext, storageID int64, packageID int64, userID int64) error { | |||
| _, err := ctx.Exec("delete from StoragePackage where StorageID = ? and PackageID = ? and UserID = ?", storageID, packageID, userID) | |||
| return err | |||
| } | |||
| // FindPackageStorages 查询存储了指定对象的Storage | |||
| func (*StoragePackageDB) FindPackageStorages(ctx SQLContext, packageID int64) ([]model.Storage, error) { | |||
| var ret []model.Storage | |||
| err := sqlx.Select(ctx, &ret, | |||
| "select Storage.* from StoragePackage, Storage where PackageID = ? and "+ | |||
| "StoragePackage.StorageID = Storage.StorageID", | |||
| packageID, | |||
| ) | |||
| return ret, err | |||
| } | |||
| @@ -0,0 +1,339 @@ | |||
| package iterator | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "os" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock/reqbuilder" | |||
| distsvc "gitlink.org.cn/cloudream/common/pkgs/distlock/service" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/storage-common/models" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/ec" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/credentials/insecure" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| agentcaller "gitlink.org.cn/cloudream/storage-common/pkgs/proto" | |||
| mygrpc "gitlink.org.cn/cloudream/storage-common/utils/grpc" | |||
| ) | |||
| type ECObjectIterator struct { | |||
| objects []model.Object | |||
| objectECData []models.ObjectECData | |||
| currentIndex int | |||
| inited bool | |||
| coorCli *coormq.Client | |||
| distlock *distsvc.Service | |||
| ec model.Ec | |||
| ecPacketSize int64 | |||
| downloadConfig DownloadConfig | |||
| cliLocation model.Location | |||
| } | |||
| func NewECObjectIterator(objects []model.Object, objectECData []models.ObjectECData, coorCli *coormq.Client, distlock *distsvc.Service, ec model.Ec, ecPacketSize int64, downloadConfig DownloadConfig) *ECObjectIterator { | |||
| return &ECObjectIterator{ | |||
| objects: objects, | |||
| objectECData: objectECData, | |||
| coorCli: coorCli, | |||
| distlock: distlock, | |||
| ec: ec, | |||
| ecPacketSize: ecPacketSize, | |||
| downloadConfig: downloadConfig, | |||
| } | |||
| } | |||
| func (i *ECObjectIterator) MoveNext() (*IterDownloadingObject, error) { | |||
| if !i.inited { | |||
| i.inited = true | |||
| findCliLocResp, err := i.coorCli.FindClientLocation(coormq.NewFindClientLocation(i.downloadConfig.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| i.cliLocation = findCliLocResp.Location | |||
| } | |||
| if i.currentIndex >= len(i.objects) { | |||
| return nil, ErrNoMoreItem | |||
| } | |||
| item, err := i.doMove() | |||
| i.currentIndex++ | |||
| return item, err | |||
| } | |||
| func (iter *ECObjectIterator) doMove() (*IterDownloadingObject, error) { | |||
| obj := iter.objects[iter.currentIndex] | |||
| ecData := iter.objectECData[iter.currentIndex] | |||
| blocks := ecData.Blocks | |||
| ec := iter.ec | |||
| ecK := ec.EcK | |||
| ecN := ec.EcN | |||
| //采取直接读,优先选内网节点 | |||
| hashs := make([]string, ecK) | |||
| nds := make([]DownloadNodeInfo, ecK) | |||
| for i := 0; i < ecK; i++ { | |||
| hashs[i] = blocks[i].FileHash | |||
| getNodesResp, err := iter.coorCli.GetNodes(coormq.NewGetNodes(blocks[i].NodeIDs)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting nodes: %w", err) | |||
| } | |||
| downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo { | |||
| return DownloadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == iter.cliLocation.LocationID, | |||
| } | |||
| }) | |||
| nds[i] = iter.chooseDownloadNode(downloadNodes) | |||
| } | |||
| //nodeIDs, nodeIPs直接按照第1~ecK个排列 | |||
| nodeIDs := make([]int64, ecK) | |||
| nodeIPs := make([]string, ecK) | |||
| for i := 0; i < ecK; i++ { | |||
| nodeIDs[i] = nds[i].Node.NodeID | |||
| nodeIPs[i] = nds[i].Node.ExternalIP | |||
| if nds[i].IsSameLocation { | |||
| nodeIPs[i] = nds[i].Node.LocalIP | |||
| logger.Infof("client and node %d are at the same location, use local ip\n", nds[i].Node.NodeID) | |||
| } | |||
| } | |||
| fileSize := obj.Size | |||
| blockIDs := make([]int, ecK) | |||
| for i := 0; i < ecK; i++ { | |||
| blockIDs[i] = i | |||
| } | |||
| reader, err := iter.downloadEcObject(fileSize, ecK, ecN, blockIDs, nodeIDs, nodeIPs, hashs) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("ec read failed, err: %w", err) | |||
| } | |||
| return &IterDownloadingObject{ | |||
| File: reader, | |||
| }, nil | |||
| } | |||
| func (i *ECObjectIterator) Close() { | |||
| } | |||
| // chooseDownloadNode 选择一个下载节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (i *ECObjectIterator) chooseDownloadNode(entries []DownloadNodeInfo) DownloadNodeInfo { | |||
| sameLocationEntries := lo.Filter(entries, func(e DownloadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationEntries) > 0 { | |||
| return sameLocationEntries[rand.Intn(len(sameLocationEntries))] | |||
| } | |||
| return entries[rand.Intn(len(entries))] | |||
| } | |||
| func (i *ECObjectIterator) downloadObject(nodeID int64, nodeIP string, fileHash string) (io.ReadCloser, error) { | |||
| if i.downloadConfig.LocalIPFS != nil { | |||
| logger.Infof("try to use local IPFS to download file") | |||
| reader, err := i.downloadFromLocalIPFS(fileHash) | |||
| if err == nil { | |||
| return reader, nil | |||
| } | |||
| logger.Warnf("download from local IPFS failed, so try to download from node %s, err: %s", nodeIP, err.Error()) | |||
| } | |||
| return i.downloadFromNode(nodeID, nodeIP, fileHash) | |||
| } | |||
| func (iter *ECObjectIterator) downloadEcObject(fileSize int64, ecK int, ecN int, blockIDs []int, nodeIDs []int64, nodeIPs []string, hashs []string) (io.ReadCloser, error) { | |||
| // TODO zkx 先试用同步方式实现逻辑,做好错误处理。同时也方便下面直接使用uploadToNode和uploadToLocalIPFS来优化代码结构 | |||
| //wg := sync.WaitGroup{} | |||
| numPacket := (fileSize + int64(ecK)*iter.ecPacketSize - 1) / (int64(ecK) * iter.ecPacketSize) | |||
| getBufs := make([]chan []byte, ecN) | |||
| decodeBufs := make([]chan []byte, ecK) | |||
| for i := 0; i < ecN; i++ { | |||
| getBufs[i] = make(chan []byte) | |||
| } | |||
| for i := 0; i < ecK; i++ { | |||
| decodeBufs[i] = make(chan []byte) | |||
| } | |||
| for i := 0; i < len(blockIDs); i++ { | |||
| go iter.get(hashs[i], nodeIPs[i], getBufs[blockIDs[i]], numPacket) | |||
| } | |||
| print(numPacket) | |||
| go decode(getBufs[:], decodeBufs[:], blockIDs, ecK, numPacket) | |||
| r, w := io.Pipe() | |||
| //persist函数,将解码得到的文件写入pipe | |||
| go func() { | |||
| for i := 0; int64(i) < numPacket; i++ { | |||
| for j := 0; j < len(decodeBufs); j++ { | |||
| tmp := <-decodeBufs[j] | |||
| _, err := w.Write(tmp) | |||
| if err != nil { | |||
| fmt.Errorf("persist file falied, err:%w", err) | |||
| } | |||
| } | |||
| } | |||
| w.Close() | |||
| }() | |||
| return r, nil | |||
| } | |||
| func (iter *ECObjectIterator) get(fileHash string, nodeIP string, getBuf chan []byte, numPacket int64) error { | |||
| downloadFromAgent := false | |||
| //使用本地IPFS获取 | |||
| if iter.downloadConfig.LocalIPFS != nil { | |||
| logger.Infof("try to use local IPFS to download file") | |||
| //获取IPFS的reader | |||
| reader, err := iter.downloadFromLocalIPFS(fileHash) | |||
| if err != nil { | |||
| downloadFromAgent = true | |||
| fmt.Errorf("read ipfs block failed, err: %w", err) | |||
| } | |||
| defer reader.Close() | |||
| for i := 0; int64(i) < numPacket; i++ { | |||
| buf := make([]byte, iter.ecPacketSize) | |||
| _, err := io.ReadFull(reader, buf) | |||
| if err != nil { | |||
| downloadFromAgent = true | |||
| fmt.Errorf("read file falied, err:%w", err) | |||
| } | |||
| getBuf <- buf | |||
| } | |||
| if downloadFromAgent == false { | |||
| close(getBuf) | |||
| return nil | |||
| } | |||
| } else { | |||
| downloadFromAgent = true | |||
| } | |||
| //从agent获取 | |||
| if downloadFromAgent == true { | |||
| /*// 二次获取锁 | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| // 用于从IPFS下载文件 | |||
| IPFS().ReadOneRep(nodeID, fileHash). | |||
| MutexLock(svc.distlock) | |||
| if err != nil { | |||
| return fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| defer mutex.Unlock() | |||
| */ | |||
| // 连接grpc | |||
| grpcAddr := fmt.Sprintf("%s:%d", nodeIP, iter.downloadConfig.GRPCPort) | |||
| conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) | |||
| if err != nil { | |||
| return fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err) | |||
| } | |||
| // 下载文件 | |||
| client := agentcaller.NewFileTransportClient(conn) | |||
| reader, err := mygrpc.GetFileAsStream(client, fileHash) | |||
| if err != nil { | |||
| conn.Close() | |||
| return fmt.Errorf("request to get file failed, err: %w", err) | |||
| } | |||
| for index := 0; int64(index) < numPacket; index++ { | |||
| buf := make([]byte, iter.ecPacketSize) | |||
| _, _ = reader.Read(buf) | |||
| fmt.Println(buf) | |||
| fmt.Println(numPacket, "\n") | |||
| getBuf <- buf | |||
| } | |||
| close(getBuf) | |||
| reader.Close() | |||
| return nil | |||
| } | |||
| return nil | |||
| } | |||
| func decode(inBufs []chan []byte, outBufs []chan []byte, blockSeq []int, ecK int, numPacket int64) { | |||
| fmt.Println("decode ") | |||
| var tmpIn [][]byte | |||
| var zeroPkt []byte | |||
| tmpIn = make([][]byte, len(inBufs)) | |||
| hasBlock := map[int]bool{} | |||
| for j := 0; j < len(blockSeq); j++ { | |||
| hasBlock[blockSeq[j]] = true | |||
| } | |||
| needRepair := false //检测是否传入了所有数据块 | |||
| for j := 0; j < len(outBufs); j++ { | |||
| if blockSeq[j] != j { | |||
| needRepair = true | |||
| } | |||
| } | |||
| enc := ec.NewRsEnc(ecK, len(inBufs)) | |||
| for i := 0; int64(i) < numPacket; i++ { | |||
| print("!!!!!") | |||
| for j := 0; j < len(inBufs); j++ { | |||
| if hasBlock[j] { | |||
| tmpIn[j] = <-inBufs[j] | |||
| } else { | |||
| tmpIn[j] = zeroPkt | |||
| } | |||
| } | |||
| if needRepair { | |||
| err := enc.Repair(tmpIn) | |||
| if err != nil { | |||
| fmt.Fprintf(os.Stderr, "Decode Repair Error: %s", err.Error()) | |||
| } | |||
| } | |||
| for j := 0; j < len(outBufs); j++ { | |||
| outBufs[j] <- tmpIn[j] | |||
| } | |||
| } | |||
| for i := 0; i < len(outBufs); i++ { | |||
| close(outBufs[i]) | |||
| } | |||
| } | |||
| func (i *ECObjectIterator) downloadFromNode(nodeID int64, nodeIP string, fileHash string) (io.ReadCloser, error) { | |||
| // 二次获取锁 | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| // 用于从IPFS下载文件 | |||
| IPFS().ReadOneRep(nodeID, fileHash). | |||
| MutexLock(i.distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| // 连接grpc | |||
| grpcAddr := fmt.Sprintf("%s:%d", nodeIP, i.downloadConfig.GRPCPort) | |||
| conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err) | |||
| } | |||
| // 下载文件 | |||
| client := agentcaller.NewFileTransportClient(conn) | |||
| reader, err := mygrpc.GetFileAsStream(client, fileHash) | |||
| if err != nil { | |||
| conn.Close() | |||
| return nil, fmt.Errorf("request to get file failed, err: %w", err) | |||
| } | |||
| reader = myio.AfterReadClosed(reader, func(io.ReadCloser) { | |||
| conn.Close() | |||
| mutex.Unlock() | |||
| }) | |||
| return reader, nil | |||
| } | |||
| func (i *ECObjectIterator) downloadFromLocalIPFS(fileHash string) (io.ReadCloser, error) { | |||
| reader, err := i.downloadConfig.LocalIPFS.OpenRead(fileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("read ipfs file failed, err: %w", err) | |||
| } | |||
| return reader, nil | |||
| } | |||
| @@ -0,0 +1,45 @@ | |||
| package iterator | |||
| import ( | |||
| "mime/multipart" | |||
| ) | |||
| type HTTPUploadingIterator struct { | |||
| files []*multipart.FileHeader | |||
| currentIndex int | |||
| } | |||
| func NewHTTPObjectIterator(files []*multipart.FileHeader) *HTTPUploadingIterator { | |||
| return &HTTPUploadingIterator{ | |||
| files: files, | |||
| } | |||
| } | |||
| func (i *HTTPUploadingIterator) MoveNext() (*IterUploadingObject, error) { | |||
| if i.currentIndex >= len(i.files) { | |||
| return nil, ErrNoMoreItem | |||
| } | |||
| item, err := i.doMove() | |||
| i.currentIndex++ | |||
| return item, err | |||
| } | |||
| func (i *HTTPUploadingIterator) doMove() (*IterUploadingObject, error) { | |||
| fileInfo := i.files[i.currentIndex] | |||
| file, err := fileInfo.Open() | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &IterUploadingObject{ | |||
| Path: fileInfo.Filename, | |||
| Size: fileInfo.Size, | |||
| File: file, | |||
| }, nil | |||
| } | |||
| func (i *HTTPUploadingIterator) Close() { | |||
| } | |||
| @@ -0,0 +1,12 @@ | |||
| package iterator | |||
| import ( | |||
| "errors" | |||
| ) | |||
| var ErrNoMoreItem = errors.New("no more item") | |||
| type Iterator[T any] interface { | |||
| MoveNext() (T, error) | |||
| Close() | |||
| } | |||
| @@ -0,0 +1,63 @@ | |||
| package iterator | |||
| import ( | |||
| "io" | |||
| "os" | |||
| "path/filepath" | |||
| "strings" | |||
| ) | |||
| type UploadingObjectIterator = Iterator[*IterUploadingObject] | |||
| type LocalUploadingIterator struct { | |||
| pathRoot string | |||
| filePathes []string | |||
| currentIndex int | |||
| } | |||
| type IterUploadingObject struct { | |||
| Path string | |||
| Size int64 | |||
| File io.ReadCloser | |||
| } | |||
| func NewUploadingObjectIterator(pathRoot string, filePathes []string) *LocalUploadingIterator { | |||
| return &LocalUploadingIterator{ | |||
| pathRoot: filepath.ToSlash(pathRoot), | |||
| filePathes: filePathes, | |||
| } | |||
| } | |||
| func (i *LocalUploadingIterator) MoveNext() (*IterUploadingObject, error) { | |||
| if i.currentIndex >= len(i.filePathes) { | |||
| return nil, ErrNoMoreItem | |||
| } | |||
| item, err := i.doMove() | |||
| i.currentIndex++ | |||
| return item, err | |||
| } | |||
| func (i *LocalUploadingIterator) doMove() (*IterUploadingObject, error) { | |||
| path := i.filePathes[i.currentIndex] | |||
| info, err := os.Stat(path) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| file, err := os.Open(path) | |||
| if err != nil { | |||
| return nil, err | |||
| } | |||
| return &IterUploadingObject{ | |||
| Path: strings.TrimPrefix(filepath.ToSlash(path), i.pathRoot), | |||
| Size: info.Size(), | |||
| File: file, | |||
| }, nil | |||
| } | |||
| func (i *LocalUploadingIterator) Close() { | |||
| } | |||
| @@ -0,0 +1,196 @@ | |||
| package iterator | |||
| import ( | |||
| "fmt" | |||
| "io" | |||
| "math/rand" | |||
| "github.com/samber/lo" | |||
| "gitlink.org.cn/cloudream/common/pkgs/distlock/reqbuilder" | |||
| distsvc "gitlink.org.cn/cloudream/common/pkgs/distlock/service" | |||
| "gitlink.org.cn/cloudream/common/pkgs/logger" | |||
| "gitlink.org.cn/cloudream/common/utils/ipfs" | |||
| "gitlink.org.cn/cloudream/storage-common/models" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| coormq "gitlink.org.cn/cloudream/storage-common/pkgs/mq/coordinator" | |||
| "google.golang.org/grpc" | |||
| "google.golang.org/grpc/credentials/insecure" | |||
| myio "gitlink.org.cn/cloudream/common/utils/io" | |||
| agentcaller "gitlink.org.cn/cloudream/storage-common/pkgs/proto" | |||
| mygrpc "gitlink.org.cn/cloudream/storage-common/utils/grpc" | |||
| ) | |||
| type DownloadingObjectIterator = Iterator[*IterDownloadingObject] | |||
| type RepObjectIterator struct { | |||
| objects []model.Object | |||
| objectRepData []models.ObjectRepData | |||
| currentIndex int | |||
| inited bool | |||
| coorCli *coormq.Client | |||
| distlock *distsvc.Service | |||
| downloadConfig DownloadConfig | |||
| cliLocation model.Location | |||
| } | |||
| type IterDownloadingObject struct { | |||
| Object model.Object | |||
| File io.ReadCloser | |||
| } | |||
| type DownloadNodeInfo struct { | |||
| Node model.Node | |||
| IsSameLocation bool | |||
| } | |||
| type DownloadConfig struct { | |||
| LocalIPFS *ipfs.IPFS | |||
| LocalNodeID *int64 | |||
| ExternalIP string | |||
| GRPCPort int | |||
| MQ *mymq.Config | |||
| } | |||
| func NewRepObjectIterator(objects []model.Object, objectRepData []models.ObjectRepData, coorCli *coormq.Client, distlock *distsvc.Service, downloadConfig DownloadConfig) *RepObjectIterator { | |||
| return &RepObjectIterator{ | |||
| objects: objects, | |||
| objectRepData: objectRepData, | |||
| coorCli: coorCli, | |||
| distlock: distlock, | |||
| downloadConfig: downloadConfig, | |||
| } | |||
| } | |||
| func (i *RepObjectIterator) MoveNext() (*IterDownloadingObject, error) { | |||
| if !i.inited { | |||
| i.inited = true | |||
| findCliLocResp, err := i.coorCli.FindClientLocation(coormq.NewFindClientLocation(i.downloadConfig.ExternalIP)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("finding client location: %w", err) | |||
| } | |||
| i.cliLocation = findCliLocResp.Location | |||
| } | |||
| if i.currentIndex >= len(i.objects) { | |||
| return nil, ErrNoMoreItem | |||
| } | |||
| item, err := i.doMove() | |||
| i.currentIndex++ | |||
| return item, err | |||
| } | |||
| func (i *RepObjectIterator) doMove() (*IterDownloadingObject, error) { | |||
| repData := i.objectRepData[i.currentIndex] | |||
| if len(repData.NodeIDs) == 0 { | |||
| return nil, fmt.Errorf("no node has this file %s", repData.FileHash) | |||
| } | |||
| getNodesResp, err := i.coorCli.GetNodes(coormq.NewGetNodes(repData.NodeIDs)) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("getting nodes: %w", err) | |||
| } | |||
| downloadNodes := lo.Map(getNodesResp.Nodes, func(node model.Node, index int) DownloadNodeInfo { | |||
| return DownloadNodeInfo{ | |||
| Node: node, | |||
| IsSameLocation: node.LocationID == i.cliLocation.LocationID, | |||
| } | |||
| }) | |||
| // 选择下载节点 | |||
| downloadNode := i.chooseDownloadNode(downloadNodes) | |||
| // 如果客户端与节点在同一个地域,则使用内网地址连接节点 | |||
| nodeIP := downloadNode.Node.ExternalIP | |||
| if downloadNode.IsSameLocation { | |||
| nodeIP = downloadNode.Node.LocalIP | |||
| logger.Infof("client and node %d are at the same location, use local ip\n", downloadNode.Node.NodeID) | |||
| } | |||
| reader, err := i.downloadObject(downloadNode.Node.NodeID, nodeIP, repData.FileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("rep read failed, err: %w", err) | |||
| } | |||
| return &IterDownloadingObject{ | |||
| Object: i.objects[i.currentIndex], | |||
| File: reader, | |||
| }, nil | |||
| } | |||
| func (i *RepObjectIterator) Close() { | |||
| } | |||
| // chooseDownloadNode 选择一个下载节点 | |||
| // 1. 从与当前客户端相同地域的节点中随机选一个 | |||
| // 2. 没有用的话从所有节点中随机选一个 | |||
| func (i *RepObjectIterator) chooseDownloadNode(entries []DownloadNodeInfo) DownloadNodeInfo { | |||
| sameLocationEntries := lo.Filter(entries, func(e DownloadNodeInfo, i int) bool { return e.IsSameLocation }) | |||
| if len(sameLocationEntries) > 0 { | |||
| return sameLocationEntries[rand.Intn(len(sameLocationEntries))] | |||
| } | |||
| return entries[rand.Intn(len(entries))] | |||
| } | |||
| func (i *RepObjectIterator) downloadObject(nodeID int64, nodeIP string, fileHash string) (io.ReadCloser, error) { | |||
| if i.downloadConfig.LocalIPFS != nil { | |||
| logger.Infof("try to use local IPFS to download file") | |||
| reader, err := i.downloadFromLocalIPFS(fileHash) | |||
| if err == nil { | |||
| return reader, nil | |||
| } | |||
| logger.Warnf("download from local IPFS failed, so try to download from node %s, err: %s", nodeIP, err.Error()) | |||
| } | |||
| return i.downloadFromNode(nodeID, nodeIP, fileHash) | |||
| } | |||
| func (i *RepObjectIterator) downloadFromNode(nodeID int64, nodeIP string, fileHash string) (io.ReadCloser, error) { | |||
| // 二次获取锁 | |||
| mutex, err := reqbuilder.NewBuilder(). | |||
| // 用于从IPFS下载文件 | |||
| IPFS().ReadOneRep(nodeID, fileHash). | |||
| MutexLock(i.distlock) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("acquire locks failed, err: %w", err) | |||
| } | |||
| // 连接grpc | |||
| grpcAddr := fmt.Sprintf("%s:%d", nodeIP, i.downloadConfig.GRPCPort) | |||
| conn, err := grpc.Dial(grpcAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("connect to grpc server at %s failed, err: %w", grpcAddr, err) | |||
| } | |||
| // 下载文件 | |||
| client := agentcaller.NewFileTransportClient(conn) | |||
| reader, err := mygrpc.GetFileAsStream(client, fileHash) | |||
| if err != nil { | |||
| conn.Close() | |||
| return nil, fmt.Errorf("request to get file failed, err: %w", err) | |||
| } | |||
| reader = myio.AfterReadClosed(reader, func(io.ReadCloser) { | |||
| conn.Close() | |||
| mutex.Unlock() | |||
| }) | |||
| return reader, nil | |||
| } | |||
| func (i *RepObjectIterator) downloadFromLocalIPFS(fileHash string) (io.ReadCloser, error) { | |||
| reader, err := i.downloadConfig.LocalIPFS.OpenRead(fileHash) | |||
| if err != nil { | |||
| return nil, fmt.Errorf("read ipfs file failed, err: %w", err) | |||
| } | |||
| return reader, nil | |||
| } | |||
| @@ -0,0 +1,30 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| ) | |||
| type AgentService interface { | |||
| GetState(msg *GetState) (*GetStateResp, *mq.CodeMessage) | |||
| } | |||
| // 获取agent状态 | |||
| var _ = Register(AgentService.GetState) | |||
| type GetState struct { | |||
| } | |||
| type GetStateResp struct { | |||
| IPFSState string `json:"ipfsState"` | |||
| } | |||
| func NewGetState() GetState { | |||
| return GetState{} | |||
| } | |||
| func NewGetStateResp(ipfsState string) GetStateResp { | |||
| return GetStateResp{ | |||
| IPFSState: ipfsState, | |||
| } | |||
| } | |||
| func (client *Client) GetState(msg GetState, opts ...mq.RequestOption) (*GetStateResp, error) { | |||
| return mq.Request[GetStateResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -3,7 +3,6 @@ package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/mq/config" | |||
| ) | |||
| type Client struct { | |||
| @@ -11,7 +10,7 @@ type Client struct { | |||
| id int64 | |||
| } | |||
| func NewClient(id int64, cfg *config.Config) (*Client, error) { | |||
| func NewClient(id int64, cfg *mymq.Config) (*Client, error) { | |||
| rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), mymq.MakeAgentQueueName(id), "") | |||
| if err != nil { | |||
| return nil, err | |||
| @@ -5,46 +5,47 @@ import ( | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type CheckIPFS struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Caches []model.Cache `json:"caches"` | |||
| } | |||
| func NewCheckIPFS(isComplete bool, caches []model.Cache) CheckIPFS { | |||
| return CheckIPFS{ | |||
| IsComplete: isComplete, | |||
| Caches: caches, | |||
| } | |||
| type IPFSService interface { | |||
| CheckIPFS(msg *CheckIPFS) (*CheckIPFSResp, *mq.CodeMessage) | |||
| } | |||
| type CheckIPFSResp struct { | |||
| Entries []CheckIPFSRespEntry `json:"entries"` | |||
| } | |||
| // 检查节点上的IPFS | |||
| var _ = Register(IPFSService.CheckIPFS) | |||
| const ( | |||
| CHECK_IPFS_RESP_OP_DELETE_TEMP = "DeleteTemp" | |||
| CHECK_IPFS_RESP_OP_CREATE_TEMP = "CreateTemp" | |||
| ) | |||
| type CheckIPFS struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Caches []model.Cache `json:"caches"` | |||
| } | |||
| type CheckIPFSResp struct { | |||
| Entries []CheckIPFSRespEntry `json:"entries"` | |||
| } | |||
| type CheckIPFSRespEntry struct { | |||
| FileHash string `json:"fileHash"` | |||
| Operation string `json:"operation"` | |||
| } | |||
| func NewCheckIPFSRespEntry(fileHash string, op string) CheckIPFSRespEntry { | |||
| return CheckIPFSRespEntry{ | |||
| FileHash: fileHash, | |||
| Operation: op, | |||
| func NewCheckIPFS(isComplete bool, caches []model.Cache) CheckIPFS { | |||
| return CheckIPFS{ | |||
| IsComplete: isComplete, | |||
| Caches: caches, | |||
| } | |||
| } | |||
| func NewCheckIPFSResp(entries []CheckIPFSRespEntry) CheckIPFSResp { | |||
| return CheckIPFSResp{ | |||
| Entries: entries, | |||
| } | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[CheckIPFS]() | |||
| mq.RegisterMessage[CheckIPFSResp]() | |||
| func NewCheckIPFSRespEntry(fileHash string, op string) CheckIPFSRespEntry { | |||
| return CheckIPFSRespEntry{ | |||
| FileHash: fileHash, | |||
| Operation: op, | |||
| } | |||
| } | |||
| func (client *Client) CheckIPFS(msg CheckIPFS, opts ...mq.RequestOption) (*CheckIPFSResp, error) { | |||
| return mq.Request[CheckIPFSResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -2,30 +2,46 @@ package agent | |||
| import "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| type ObjectService interface { | |||
| StartPinningObject(msg *StartPinningObject) (*StartPinningObjectResp, *mq.CodeMessage) | |||
| WaitPinningObject(msg *WaitPinningObject) (*WaitPinningObjectResp, *mq.CodeMessage) | |||
| } | |||
| // 启动Pin对象的任务 | |||
| var _ = Register(ObjectService.StartPinningObject) | |||
| type StartPinningObject struct { | |||
| FileHash string `json:"fileHash"` | |||
| } | |||
| type StartPinningObjectResp struct { | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartPinningObject(fileHash string) StartPinningObject { | |||
| return StartPinningObject{ | |||
| FileHash: fileHash, | |||
| } | |||
| } | |||
| type StartPinningObjectResp struct { | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartPinningObjectResp(taskID string) StartPinningObjectResp { | |||
| return StartPinningObjectResp{ | |||
| TaskID: taskID, | |||
| } | |||
| } | |||
| func (client *Client) StartPinningObject(msg StartPinningObject, opts ...mq.RequestOption) (*StartPinningObjectResp, error) { | |||
| return mq.Request[StartPinningObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| // 等待Pin对象的任务 | |||
| var _ = Register(ObjectService.WaitPinningObject) | |||
| type WaitPinningObject struct { | |||
| TaskID string `json:"taskID"` | |||
| WaitTimeoutMs int64 `json:"waitTimeout"` | |||
| } | |||
| type WaitPinningObjectResp struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| } | |||
| func NewWaitPinningObject(taskID string, waitTimeoutMs int64) WaitPinningObject { | |||
| return WaitPinningObject{ | |||
| @@ -33,23 +49,12 @@ func NewWaitPinningObject(taskID string, waitTimeoutMs int64) WaitPinningObject | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| type WaitPinningObjectResp struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| } | |||
| func NewWaitPinningObjectResp(isComplete bool, err string) WaitPinningObjectResp { | |||
| return WaitPinningObjectResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| } | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[StartPinningObject]() | |||
| mq.RegisterMessage[StartPinningObjectResp]() | |||
| mq.RegisterMessage[WaitPinningObject]() | |||
| mq.RegisterMessage[WaitPinningObjectResp]() | |||
| func (client *Client) WaitPinningObject(msg WaitPinningObject, opts ...mq.RequestOption) (*WaitPinningObjectResp, error) { | |||
| return mq.Request[WaitPinningObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -3,7 +3,6 @@ package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/mq/config" | |||
| ) | |||
| type Service interface { | |||
| @@ -23,7 +22,7 @@ type Server struct { | |||
| OnError func(err error) | |||
| } | |||
| func NewServer(svc Service, id int64, cfg *config.Config) (*Server, error) { | |||
| func NewServer(svc Service, id int64, cfg *mymq.Config) (*Server, error) { | |||
| srv := &Server{ | |||
| service: svc, | |||
| } | |||
| @@ -54,14 +53,21 @@ func (s *Server) Serve() error { | |||
| var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher() | |||
| // Register 将Service中的一个接口函数作为指定类型消息的处理函数 | |||
| // Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型 | |||
| // TODO 需要约束:Service实现了TSvc接口 | |||
| func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) { | |||
| func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) any { | |||
| mq.AddServiceFn(&msgDispatcher, svcFn) | |||
| mq.RegisterMessage[TReq]() | |||
| mq.RegisterMessage[TResp]() | |||
| return nil | |||
| } | |||
| // RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数 | |||
| // RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型 | |||
| // TODO 需要约束:Service实现了TSvc接口 | |||
| func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) { | |||
| func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) any { | |||
| mq.AddNoRespServiceFn(&msgDispatcher, svcFn) | |||
| mq.RegisterMessage[TReq]() | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,188 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type StorageService interface { | |||
| StartStorageMovePackage(msg *StartStorageMovePackage) (*StartStorageMovePackageResp, *mq.CodeMessage) | |||
| WaitStorageMovePackage(msg *WaitStorageMovePackage) (*WaitStorageMovePackageResp, *mq.CodeMessage) | |||
| StorageCheck(msg *StorageCheck) (*StorageCheckResp, *mq.CodeMessage) | |||
| StartStorageCreatePackage(msg *StartStorageCreatePackage) (*StartStorageCreatePackageResp, *mq.CodeMessage) | |||
| WaitStorageCreatePackage(msg *WaitStorageCreatePackage) (*WaitStorageCreatePackageResp, *mq.CodeMessage) | |||
| } | |||
| // 启动调度Package的任务 | |||
| var _ = Register(StorageService.StartStorageMovePackage) | |||
| type StartStorageMovePackage struct { | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| StorageID int64 `json:"storageID"` | |||
| } | |||
| type StartStorageMovePackageResp struct { | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartStorageMovePackage(userID int64, packageID int64, storageID int64) StartStorageMovePackage { | |||
| return StartStorageMovePackage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| StorageID: storageID, | |||
| } | |||
| } | |||
| func NewStartStorageMovePackageResp(taskID string) StartStorageMovePackageResp { | |||
| return StartStorageMovePackageResp{ | |||
| TaskID: taskID, | |||
| } | |||
| } | |||
| func (client *Client) StartStorageMovePackage(msg StartStorageMovePackage, opts ...mq.RequestOption) (*StartStorageMovePackageResp, error) { | |||
| return mq.Request[StartStorageMovePackageResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| // 等待调度Package的任务 | |||
| var _ = Register(StorageService.WaitStorageMovePackage) | |||
| type WaitStorageMovePackage struct { | |||
| TaskID string `json:"taskID"` | |||
| WaitTimeoutMs int64 `json:"waitTimeout"` | |||
| } | |||
| type WaitStorageMovePackageResp struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| } | |||
| func NewWaitStorageMovePackage(taskID string, waitTimeoutMs int64) WaitStorageMovePackage { | |||
| return WaitStorageMovePackage{ | |||
| TaskID: taskID, | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| func NewWaitStorageMovePackageResp(isComplete bool, err string) WaitStorageMovePackageResp { | |||
| return WaitStorageMovePackageResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| } | |||
| } | |||
| func (client *Client) WaitStorageMovePackage(msg WaitStorageMovePackage, opts ...mq.RequestOption) (*WaitStorageMovePackageResp, error) { | |||
| return mq.Request[WaitStorageMovePackageResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| // 检查Storage | |||
| var _ = Register(StorageService.StorageCheck) | |||
| const ( | |||
| CHECK_STORAGE_RESP_OP_DELETE = "Delete" | |||
| CHECK_STORAGE_RESP_OP_SET_NORMAL = "SetNormal" | |||
| ) | |||
| type StorageCheck struct { | |||
| StorageID int64 `json:"storageID"` | |||
| Directory string `json:"directory"` | |||
| IsComplete bool `json:"isComplete"` | |||
| Packages []model.StoragePackage `json:"packages"` | |||
| } | |||
| type StorageCheckResp struct { | |||
| DirectoryState string `json:"directoryState"` | |||
| Entries []StorageCheckRespEntry `json:"entries"` | |||
| } | |||
| type StorageCheckRespEntry struct { | |||
| PackageID int64 `json:"packageID"` | |||
| UserID int64 `json:"userID"` | |||
| Operation string `json:"operation"` | |||
| } | |||
| func NewStorageCheck(storageID int64, directory string, isComplete bool, packages []model.StoragePackage) StorageCheck { | |||
| return StorageCheck{ | |||
| StorageID: storageID, | |||
| Directory: directory, | |||
| IsComplete: isComplete, | |||
| Packages: packages, | |||
| } | |||
| } | |||
| func NewStorageCheckResp(dirState string, entries []StorageCheckRespEntry) StorageCheckResp { | |||
| return StorageCheckResp{ | |||
| DirectoryState: dirState, | |||
| Entries: entries, | |||
| } | |||
| } | |||
| func NewStorageCheckRespEntry(packageID int64, userID int64, op string) StorageCheckRespEntry { | |||
| return StorageCheckRespEntry{ | |||
| PackageID: packageID, | |||
| UserID: userID, | |||
| Operation: op, | |||
| } | |||
| } | |||
| func (client *Client) StorageCheck(msg StorageCheck, opts ...mq.RequestOption) (*StorageCheckResp, error) { | |||
| return mq.Request[StorageCheckResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| // 启动从Storage上传Package的任务 | |||
| var _ = Register(StorageService.StartStorageCreatePackage) | |||
| type StartStorageCreatePackage struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| StorageID int64 `json:"storageID"` | |||
| Path string `json:"path"` | |||
| Redundancy models.TypedRedundancyInfo `json:"redundancy"` | |||
| } | |||
| type StartStorageCreatePackageResp struct { | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartStorageCreatePackage(userID int64, bucketID int64, name string, storageID int64, path string, redundancy models.TypedRedundancyInfo) StartStorageCreatePackage { | |||
| return StartStorageCreatePackage{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| Name: name, | |||
| StorageID: storageID, | |||
| Path: path, | |||
| Redundancy: redundancy, | |||
| } | |||
| } | |||
| func NewStartStorageCreatePackageResp(taskID string) StartStorageCreatePackageResp { | |||
| return StartStorageCreatePackageResp{ | |||
| TaskID: taskID, | |||
| } | |||
| } | |||
| func (client *Client) StartStorageCreatePackage(msg StartStorageCreatePackage, opts ...mq.RequestOption) (*StartStorageCreatePackageResp, error) { | |||
| return mq.Request[StartStorageCreatePackageResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| // 等待从Storage上传Package的任务 | |||
| var _ = Register(StorageService.WaitStorageCreatePackage) | |||
| type WaitStorageCreatePackage struct { | |||
| TaskID string `json:"taskID"` | |||
| WaitTimeoutMs int64 `json:"waitTimeout"` | |||
| } | |||
| type WaitStorageCreatePackageResp struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| func NewWaitStorageCreatePackage(taskID string, waitTimeoutMs int64) WaitStorageCreatePackage { | |||
| return WaitStorageCreatePackage{ | |||
| TaskID: taskID, | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| func NewWaitStorageCreatePackageResp(isComplete bool, err string, packageID int64) WaitStorageCreatePackageResp { | |||
| return WaitStorageCreatePackageResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func (client *Client) WaitStorageCreatePackage(msg WaitStorageCreatePackage, opts ...mq.RequestOption) (*WaitStorageCreatePackageResp, error) { | |||
| return mq.Request[WaitStorageCreatePackageResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -1,10 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| func (client *Client) GetState(msg agtmsg.GetState, opts ...mq.RequestOption) (*agtmsg.GetStateResp, error) { | |||
| return mq.Request[agtmsg.GetStateResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -1,10 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| func (client *Client) CheckIPFS(msg agtmsg.CheckIPFS, opts ...mq.RequestOption) (*agtmsg.CheckIPFSResp, error) { | |||
| return mq.Request[agtmsg.CheckIPFSResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -1,14 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| func (client *Client) StartPinningObject(msg agtmsg.StartPinningObject, opts ...mq.RequestOption) (*agtmsg.StartPinningObjectResp, error) { | |||
| return mq.Request[agtmsg.StartPinningObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| func (client *Client) WaitPinningObject(msg agtmsg.WaitPinningObject, opts ...mq.RequestOption) (*agtmsg.WaitPinningObjectResp, error) { | |||
| return mq.Request[agtmsg.WaitPinningObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -1,26 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| func (client *Client) StartStorageMoveObject(msg agtmsg.StartStorageMoveObject, opts ...mq.RequestOption) (*agtmsg.StartStorageMoveObjectResp, error) { | |||
| return mq.Request[agtmsg.StartStorageMoveObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| func (client *Client) WaitStorageMoveObject(msg agtmsg.WaitStorageMoveObject, opts ...mq.RequestOption) (*agtmsg.WaitStorageMoveObjectResp, error) { | |||
| return mq.Request[agtmsg.WaitStorageMoveObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| func (client *Client) StorageCheck(msg agtmsg.StorageCheck, opts ...mq.RequestOption) (*agtmsg.StorageCheckResp, error) { | |||
| return mq.Request[agtmsg.StorageCheckResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| func (client *Client) StartStorageUploadRepObject(msg agtmsg.StartStorageUploadRepObject, opts ...mq.RequestOption) (*agtmsg.StartStorageUploadRepObjectResp, error) { | |||
| return mq.Request[agtmsg.StartStorageUploadRepObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| func (client *Client) WaitStorageUploadRepObject(msg agtmsg.WaitStorageUploadRepObject, opts ...mq.RequestOption) (*agtmsg.WaitStorageUploadRepObjectResp, error) { | |||
| return mq.Request[agtmsg.WaitStorageUploadRepObjectResp](client.rabbitCli, msg, opts...) | |||
| } | |||
| @@ -1,14 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| ) | |||
| func (client *Client) TempCacheReport(msg coormsg.TempCacheReport) error { | |||
| return mq.Send(client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) AgentStatusReport(msg coormsg.AgentStatusReport) error { | |||
| return mq.Send(client.rabbitCli, msg) | |||
| } | |||
| @@ -1,22 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| ) | |||
| func (client *Client) GetUserBuckets(msg coormsg.GetUserBuckets) (*coormsg.GetUserBucketsResp, error) { | |||
| return mq.Request[coormsg.GetUserBucketsResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) GetBucketObjects(msg coormsg.GetBucketObjects) (*coormsg.GetBucketObjectsResp, error) { | |||
| return mq.Request[coormsg.GetBucketObjectsResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) CreateBucket(msg coormsg.CreateBucket) (*coormsg.CreateBucketResp, error) { | |||
| return mq.Request[coormsg.CreateBucketResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) DeleteBucket(msg coormsg.DeleteBucket) (*coormsg.DeleteBucketResp, error) { | |||
| return mq.Request[coormsg.DeleteBucketResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -1,42 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| ) | |||
| func (client *Client) GetObjectsByDirName(msg coormsg.GetObjectsByDirName) (*coormsg.GetObjectsResp, error) { | |||
| return mq.Request[coormsg.GetObjectsResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) PreDownloadObject(msg coormsg.PreDownloadObject) (*coormsg.PreDownloadObjectResp, error) { | |||
| return mq.Request[coormsg.PreDownloadObjectResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) PreUploadRepObject(msg coormsg.PreUploadRepObject) (*coormsg.PreUploadResp, error) { | |||
| return mq.Request[coormsg.PreUploadResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) CreateRepObject(msg coormsg.CreateRepObject) (*coormsg.CreateObjectResp, error) { | |||
| return mq.Request[coormsg.CreateObjectResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) PreUploadEcObject(msg coormsg.PreUploadEcObject) (*coormsg.PreUploadEcResp, error) { | |||
| return mq.Request[coormsg.PreUploadEcResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) CreateEcObject(msg coormsg.CreateEcObject) (*coormsg.CreateObjectResp, error) { | |||
| return mq.Request[coormsg.CreateObjectResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) PreUpdateRepObject(msg coormsg.PreUpdateRepObject) (*coormsg.PreUpdateRepObjectResp, error) { | |||
| return mq.Request[coormsg.PreUpdateRepObjectResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) UpdateRepObject(msg coormsg.UpdateRepObject) (*coormsg.UpdateRepObjectResp, error) { | |||
| return mq.Request[coormsg.UpdateRepObjectResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) DeleteObject(msg coormsg.DeleteObject) (*coormsg.DeleteObjectResp, error) { | |||
| return mq.Request[coormsg.DeleteObjectResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -1,18 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| ) | |||
| func (client *Client) GetStorageInfo(msg coormsg.GetStorageInfo) (*coormsg.GetStorageInfoResp, error) { | |||
| return mq.Request[coormsg.GetStorageInfoResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) PreMoveObjectToStorage(msg coormsg.PreMoveObjectToStorage) (*coormsg.PreMoveObjectToStorageResp, error) { | |||
| return mq.Request[coormsg.PreMoveObjectToStorageResp](client.rabbitCli, msg) | |||
| } | |||
| func (client *Client) MoveObjectToStorage(msg coormsg.MoveObjectToStorage) (*coormsg.MoveObjectToStorageResp, error) { | |||
| return mq.Request[coormsg.MoveObjectToStorageResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -1,25 +0,0 @@ | |||
| package scanner | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| scmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/scanner" | |||
| ) | |||
| func (cli *Client) PostEvent(event any, isEmergency bool, dontMerge bool, opts ...mq.SendOption) error { | |||
| opt := mq.SendOption{ | |||
| Timeout: time.Second * 30, | |||
| } | |||
| if len(opts) > 0 { | |||
| opt = opts[0] | |||
| } | |||
| body, err := scmsg.NewPostEvent(event, isEmergency, dontMerge) | |||
| if err != nil { | |||
| return fmt.Errorf("new post event body failed, err: %w", err) | |||
| } | |||
| return mq.Send(cli.rabbitCli, body, opt) | |||
| } | |||
| @@ -1,4 +1,4 @@ | |||
| package config | |||
| package mq | |||
| import "fmt" | |||
| @@ -2,7 +2,15 @@ package coordinator | |||
| import "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| type AgentService interface { | |||
| TempCacheReport(msg *TempCacheReport) | |||
| AgentStatusReport(msg *AgentStatusReport) | |||
| } | |||
| // 代理端发给协调端,告知临时缓存的数据 | |||
| var _ = RegisterNoReply(AgentService.TempCacheReport) | |||
| type TempCacheReport struct { | |||
| NodeID int64 `json:"nodeID"` | |||
| Hashes []string `json:"hashes"` | |||
| @@ -14,8 +22,13 @@ func NewTempCacheReportBody(nodeID int64, hashes []string) TempCacheReport { | |||
| Hashes: hashes, | |||
| } | |||
| } | |||
| func (client *Client) TempCacheReport(msg TempCacheReport) error { | |||
| return mq.Send(client.rabbitCli, msg) | |||
| } | |||
| // 代理端发给协调端,告知延迟、ipfs和资源目录的可达性 | |||
| var _ = RegisterNoReply(AgentService.AgentStatusReport) | |||
| type AgentStatusReport struct { | |||
| NodeID int64 `json:"nodeID"` | |||
| NodeDelayIDs []int64 `json:"nodeDelayIDs"` | |||
| @@ -33,9 +46,6 @@ func NewAgentStatusReportBody(nodeID int64, nodeDelayIDs []int64, nodeDelays []i | |||
| LocalDirStatus: localDirStatus, | |||
| } | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[TempCacheReport]() | |||
| mq.RegisterMessage[AgentStatusReport]() | |||
| func (client *Client) AgentStatusReport(msg AgentStatusReport) error { | |||
| return mq.Send(client.rabbitCli, msg) | |||
| } | |||
| @@ -0,0 +1,114 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type BucketService interface { | |||
| GetUserBuckets(msg *GetUserBuckets) (*GetUserBucketsResp, *mq.CodeMessage) | |||
| GetBucketPackages(msg *GetBucketPackages) (*GetBucketPackagesResp, *mq.CodeMessage) | |||
| CreateBucket(msg *CreateBucket) (*CreateBucketResp, *mq.CodeMessage) | |||
| DeleteBucket(msg *DeleteBucket) (*DeleteBucketResp, *mq.CodeMessage) | |||
| } | |||
| // 获取用户所有的桶 | |||
| var _ = Register(BucketService.GetUserBuckets) | |||
| type GetUserBuckets struct { | |||
| UserID int64 `json:"userID"` | |||
| } | |||
| type GetUserBucketsResp struct { | |||
| Buckets []model.Bucket `json:"buckets"` | |||
| } | |||
| func NewGetUserBuckets(userID int64) GetUserBuckets { | |||
| return GetUserBuckets{ | |||
| UserID: userID, | |||
| } | |||
| } | |||
| func NewGetUserBucketsResp(buckets []model.Bucket) GetUserBucketsResp { | |||
| return GetUserBucketsResp{ | |||
| Buckets: buckets, | |||
| } | |||
| } | |||
| func (client *Client) GetUserBuckets(msg GetUserBuckets) (*GetUserBucketsResp, error) { | |||
| return mq.Request[GetUserBucketsResp](client.rabbitCli, msg) | |||
| } | |||
| // 获取桶中的所有Package | |||
| var _ = Register(BucketService.GetBucketPackages) | |||
| type GetBucketPackages struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| } | |||
| type GetBucketPackagesResp struct { | |||
| Packages []model.Package `json:"packages"` | |||
| } | |||
| func NewGetBucketPackages(userID int64, bucketID int64) GetBucketPackages { | |||
| return GetBucketPackages{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| } | |||
| } | |||
| func NewGetBucketPackagesResp(packages []model.Package) GetBucketPackagesResp { | |||
| return GetBucketPackagesResp{ | |||
| Packages: packages, | |||
| } | |||
| } | |||
| func (client *Client) GetBucketPackages(msg GetBucketPackages) (*GetBucketPackagesResp, error) { | |||
| return mq.Request[GetBucketPackagesResp](client.rabbitCli, msg) | |||
| } | |||
| // 创建桶 | |||
| var _ = Register(BucketService.CreateBucket) | |||
| type CreateBucket struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketName string `json:"bucketName"` | |||
| } | |||
| type CreateBucketResp struct { | |||
| BucketID int64 `json:"bucketID"` | |||
| } | |||
| func NewCreateBucket(userID int64, bucketName string) CreateBucket { | |||
| return CreateBucket{ | |||
| UserID: userID, | |||
| BucketName: bucketName, | |||
| } | |||
| } | |||
| func NewCreateBucketResp(bucketID int64) CreateBucketResp { | |||
| return CreateBucketResp{ | |||
| BucketID: bucketID, | |||
| } | |||
| } | |||
| func (client *Client) CreateBucket(msg CreateBucket) (*CreateBucketResp, error) { | |||
| return mq.Request[CreateBucketResp](client.rabbitCli, msg) | |||
| } | |||
| // 删除桶 | |||
| var _ = Register(BucketService.DeleteBucket) | |||
| type DeleteBucket struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| } | |||
| type DeleteBucketResp struct{} | |||
| func NewDeleteBucket(userID int64, bucketID int64) DeleteBucket { | |||
| return DeleteBucket{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| } | |||
| } | |||
| func NewDeleteBucketResp() DeleteBucketResp { | |||
| return DeleteBucketResp{} | |||
| } | |||
| func (client *Client) DeleteBucket(msg DeleteBucket) (*DeleteBucketResp, error) { | |||
| return mq.Request[DeleteBucketResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -3,14 +3,13 @@ package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/mq/config" | |||
| ) | |||
| type Client struct { | |||
| rabbitCli *mq.RabbitMQClient | |||
| } | |||
| func NewClient(cfg *config.Config) (*Client, error) { | |||
| func NewClient(cfg *mymq.Config) (*Client, error) { | |||
| rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), mymq.COORDINATOR_QUEUE_NAME, "") | |||
| if err != nil { | |||
| return nil, err | |||
| @@ -0,0 +1,60 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type CommonService interface { | |||
| FindClientLocation(msg *FindClientLocation) (*FindClientLocationResp, *mq.CodeMessage) | |||
| GetECConfig(msg *GetECConfig) (*GetECConfigResp, *mq.CodeMessage) | |||
| } | |||
| // 查询指定IP所属的地域 | |||
| var _ = Register(CommonService.FindClientLocation) | |||
| type FindClientLocation struct { | |||
| IP string `json:"ip"` | |||
| } | |||
| type FindClientLocationResp struct { | |||
| Location model.Location `json:"location"` | |||
| } | |||
| func NewFindClientLocation(ip string) FindClientLocation { | |||
| return FindClientLocation{ | |||
| IP: ip, | |||
| } | |||
| } | |||
| func NewFindClientLocationResp(location model.Location) FindClientLocationResp { | |||
| return FindClientLocationResp{ | |||
| Location: location, | |||
| } | |||
| } | |||
| func (client *Client) FindClientLocation(msg FindClientLocation) (*FindClientLocationResp, error) { | |||
| return mq.Request[FindClientLocationResp](client.rabbitCli, msg) | |||
| } | |||
| // 获取EC具体配置 | |||
| var _ = Register(CommonService.GetECConfig) | |||
| type GetECConfig struct { | |||
| ECName string `json:"ecName"` | |||
| } | |||
| type GetECConfigResp struct { | |||
| Config model.Ec `json:"config"` | |||
| } | |||
| func NewGetECConfig(ecName string) GetECConfig { | |||
| return GetECConfig{ | |||
| ECName: ecName, | |||
| } | |||
| } | |||
| func NewGetECConfigResp(config model.Ec) GetECConfigResp { | |||
| return GetECConfigResp{ | |||
| Config: config, | |||
| } | |||
| } | |||
| func (client *Client) GetECConfig(msg GetECConfig) (*GetECConfigResp, error) { | |||
| return mq.Request[GetECConfigResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -0,0 +1,60 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type NodeService interface { | |||
| GetUserNodes(msg *GetUserNodes) (*GetUserNodesResp, *mq.CodeMessage) | |||
| GetNodes(msg *GetNodes) (*GetNodesResp, *mq.CodeMessage) | |||
| } | |||
| // 查询用户可用的节点 | |||
| var _ = Register(NodeService.GetUserNodes) | |||
| type GetUserNodes struct { | |||
| UserID int64 `json:"userID"` | |||
| } | |||
| type GetUserNodesResp struct { | |||
| Nodes []model.Node `json:"nodes"` | |||
| } | |||
| func NewGetUserNodes(userID int64) GetUserNodes { | |||
| return GetUserNodes{ | |||
| UserID: userID, | |||
| } | |||
| } | |||
| func NewGetUserNodesResp(nodes []model.Node) GetUserNodesResp { | |||
| return GetUserNodesResp{ | |||
| Nodes: nodes, | |||
| } | |||
| } | |||
| func (client *Client) GetUserNodes(msg GetUserNodes) (*GetUserNodesResp, error) { | |||
| return mq.Request[GetUserNodesResp](client.rabbitCli, msg) | |||
| } | |||
| // 获取指定节点的信息 | |||
| var _ = Register(NodeService.GetNodes) | |||
| type GetNodes struct { | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| type GetNodesResp struct { | |||
| Nodes []model.Node `json:"nodes"` | |||
| } | |||
| func NewGetNodes(nodeIDs []int64) GetNodes { | |||
| return GetNodes{ | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| } | |||
| func NewGetNodesResp(nodes []model.Node) GetNodesResp { | |||
| return GetNodesResp{ | |||
| Nodes: nodes, | |||
| } | |||
| } | |||
| func (client *Client) GetNodes(msg GetNodes) (*GetNodesResp, error) { | |||
| return mq.Request[GetNodesResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -0,0 +1,60 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/models" | |||
| ) | |||
| type ObjectService interface { | |||
| GetPackageObjectRepData(msg *GetPackageObjectRepData) (*GetPackageObjectRepDataResp, *mq.CodeMessage) | |||
| GetPackageObjectECData(msg *GetPackageObjectECData) (*GetPackageObjectECDataResp, *mq.CodeMessage) | |||
| } | |||
| // 获取指定Object的Rep数据,返回的Objects会按照ObjectID升序 | |||
| var _ = Register(ObjectService.GetPackageObjectRepData) | |||
| type GetPackageObjectRepData struct { | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| type GetPackageObjectRepDataResp struct { | |||
| Data []models.ObjectRepData `json:"data"` | |||
| } | |||
| func NewGetPackageObjectRepData(packageID int64) GetPackageObjectRepData { | |||
| return GetPackageObjectRepData{ | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageObjectRepDataResp(data []models.ObjectRepData) GetPackageObjectRepDataResp { | |||
| return GetPackageObjectRepDataResp{ | |||
| Data: data, | |||
| } | |||
| } | |||
| func (client *Client) GetPackageObjectRepData(msg GetPackageObjectRepData) (*GetPackageObjectRepDataResp, error) { | |||
| return mq.Request[GetPackageObjectRepDataResp](client.rabbitCli, msg) | |||
| } | |||
| // 获取指定Object的EC数据,返回的Objects会按照ObjectID升序 | |||
| var _ = Register(ObjectService.GetPackageObjectECData) | |||
| type GetPackageObjectECData struct { | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| type GetPackageObjectECDataResp struct { | |||
| Data []models.ObjectECData `json:"data"` | |||
| } | |||
| func NewGetPackageObjectECData(packageID int64) GetPackageObjectECData { | |||
| return GetPackageObjectECData{ | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageObjectECDataResp(data []models.ObjectECData) GetPackageObjectECDataResp { | |||
| return GetPackageObjectECDataResp{ | |||
| Data: data, | |||
| } | |||
| } | |||
| func (client *Client) GetPackageObjectECData(msg GetPackageObjectECData) (*GetPackageObjectECDataResp, error) { | |||
| return mq.Request[GetPackageObjectECDataResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -0,0 +1,201 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type PackageService interface { | |||
| GetPackage(msg *GetPackage) (*GetPackageResp, *mq.CodeMessage) | |||
| GetPackageObjects(msg *GetPackageObjects) (*GetPackageObjectsResp, *mq.CodeMessage) | |||
| CreatePackage(msg *CreatePackage) (*CreatePackageResp, *mq.CodeMessage) | |||
| UpdateRepPackage(msg *UpdateRepPackage) (*UpdateRepPackageResp, *mq.CodeMessage) | |||
| UpdateECPackage(msg *UpdateECPackage) (*UpdateECPackageResp, *mq.CodeMessage) | |||
| DeletePackage(msg *DeletePackage) (*DeletePackageResp, *mq.CodeMessage) | |||
| } | |||
| // 获取Package基本信息 | |||
| var _ = Register(PackageService.GetPackage) | |||
| type GetPackage struct { | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| type GetPackageResp struct { | |||
| model.Package | |||
| } | |||
| func NewGetPackage(userID int64, packageID int64) GetPackage { | |||
| return GetPackage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageResp(pkg model.Package) GetPackageResp { | |||
| return GetPackageResp{ | |||
| Package: pkg, | |||
| } | |||
| } | |||
| func (client *Client) GetPackage(msg GetPackage) (*GetPackageResp, error) { | |||
| return mq.Request[GetPackageResp](client.rabbitCli, msg) | |||
| } | |||
| // 查询Package中的所有Object,返回的Objects会按照ObjectID升序 | |||
| var _ = Register(PackageService.GetPackageObjects) | |||
| type GetPackageObjects struct { | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| type GetPackageObjectsResp struct { | |||
| Objects []model.Object `json:"objects"` | |||
| } | |||
| func NewGetPackageObjects(userID int64, packageID int64) GetPackageObjects { | |||
| return GetPackageObjects{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewGetPackageObjectsResp(objects []model.Object) GetPackageObjectsResp { | |||
| return GetPackageObjectsResp{ | |||
| Objects: objects, | |||
| } | |||
| } | |||
| func (client *Client) GetPackageObjects(msg GetPackageObjects) (*GetPackageObjectsResp, error) { | |||
| return mq.Request[GetPackageObjectsResp](client.rabbitCli, msg) | |||
| } | |||
| // 创建一个Package | |||
| var _ = Register(PackageService.CreatePackage) | |||
| type CreatePackage struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| Name string `json:"name"` | |||
| Redundancy models.TypedRedundancyInfo `json:"redundancy"` | |||
| } | |||
| type CreatePackageResp struct { | |||
| PackageID int64 `json:"packageID"` | |||
| } | |||
| func NewCreatePackage(userID int64, bucketID int64, name string, redundancy models.TypedRedundancyInfo) CreatePackage { | |||
| return CreatePackage{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| Name: name, | |||
| Redundancy: redundancy, | |||
| } | |||
| } | |||
| func NewCreatePackageResp(packageID int64) CreatePackageResp { | |||
| return CreatePackageResp{ | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func (client *Client) CreatePackage(msg CreatePackage) (*CreatePackageResp, error) { | |||
| return mq.Request[CreatePackageResp](client.rabbitCli, msg) | |||
| } | |||
| // 更新Rep备份模式的Package | |||
| var _ = Register(PackageService.UpdateRepPackage) | |||
| type UpdateRepPackage struct { | |||
| PackageID int64 `json:"packageID"` | |||
| Adds []AddRepObjectInfo `json:"objects"` | |||
| Deletes []int64 `json:"deletes"` | |||
| } | |||
| type UpdateRepPackageResp struct{} | |||
| type AddRepObjectInfo struct { | |||
| Path string `json:"path"` | |||
| Size int64 `json:"size,string"` | |||
| FileHash string `json:"fileHash"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| func NewUpdateRepPackage(packageID int64, adds []AddRepObjectInfo, deletes []int64) UpdateRepPackage { | |||
| return UpdateRepPackage{ | |||
| PackageID: packageID, | |||
| Adds: adds, | |||
| Deletes: deletes, | |||
| } | |||
| } | |||
| func NewUpdateRepPackageResp() UpdateRepPackageResp { | |||
| return UpdateRepPackageResp{} | |||
| } | |||
| func NewAddRepObjectInfo(path string, size int64, fileHash string, nodeIDs []int64) AddRepObjectInfo { | |||
| return AddRepObjectInfo{ | |||
| Path: path, | |||
| Size: size, | |||
| FileHash: fileHash, | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| } | |||
| func (client *Client) UpdateRepPackage(msg UpdateRepPackage) (*UpdateRepPackageResp, error) { | |||
| return mq.Request[UpdateRepPackageResp](client.rabbitCli, msg) | |||
| } | |||
| // 更新EC备份模式的Package | |||
| var _ = Register(PackageService.UpdateECPackage) | |||
| type UpdateECPackage struct { | |||
| PackageID int64 `json:"packageID"` | |||
| Adds []AddECObjectInfo `json:"objects"` | |||
| Deletes []int64 `json:"deletes"` | |||
| } | |||
| type UpdateECPackageResp struct{} | |||
| type AddECObjectInfo struct { | |||
| Path string `json:"path"` | |||
| Size int64 `json:"size,string"` | |||
| FileHashes []string `json:"fileHashes"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| } | |||
| func NewUpdateECPackage(packageID int64, adds []AddECObjectInfo, deletes []int64) UpdateECPackage { | |||
| return UpdateECPackage{ | |||
| PackageID: packageID, | |||
| Adds: adds, | |||
| Deletes: deletes, | |||
| } | |||
| } | |||
| func NewUpdateECPackageResp() UpdateECPackageResp { | |||
| return UpdateECPackageResp{} | |||
| } | |||
| func NewAddECObjectInfo(path string, size int64, fileHashes []string, nodeIDs []int64) AddECObjectInfo { | |||
| return AddECObjectInfo{ | |||
| Path: path, | |||
| Size: size, | |||
| FileHashes: fileHashes, | |||
| NodeIDs: nodeIDs, | |||
| } | |||
| } | |||
| func (client *Client) UpdateECPackage(msg UpdateECPackage) (*UpdateECPackageResp, error) { | |||
| return mq.Request[UpdateECPackageResp](client.rabbitCli, msg) | |||
| } | |||
| // 删除对象 | |||
| var _ = Register(PackageService.DeletePackage) | |||
| type DeletePackage struct { | |||
| UserID int64 `db:"userID"` | |||
| PackageID int64 `db:"packageID"` | |||
| } | |||
| type DeletePackageResp struct{} | |||
| func NewDeletePackage(userID int64, packageID int64) DeletePackage { | |||
| return DeletePackage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| } | |||
| } | |||
| func NewDeletePackageResp() DeletePackageResp { | |||
| return DeletePackageResp{} | |||
| } | |||
| func (client *Client) DeletePackage(msg DeletePackage) (*DeletePackageResp, error) { | |||
| return mq.Request[DeletePackageResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -3,18 +3,23 @@ package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/mq/config" | |||
| ) | |||
| // Service 协调端接口 | |||
| type Service interface { | |||
| ObjectService | |||
| AgentService | |||
| BucketService | |||
| StorageService | |||
| CommonService | |||
| AgentService | |||
| NodeService | |||
| ObjectService | |||
| PackageService | |||
| StorageService | |||
| } | |||
| type Server struct { | |||
| @@ -24,7 +29,7 @@ type Server struct { | |||
| OnError func(err error) | |||
| } | |||
| func NewServer(svc Service, cfg *config.Config) (*Server, error) { | |||
| func NewServer(svc Service, cfg *mymq.Config) (*Server, error) { | |||
| srv := &Server{ | |||
| service: svc, | |||
| } | |||
| @@ -54,14 +59,21 @@ func (s *Server) Serve() error { | |||
| var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher() | |||
| // Register 将Service中的一个接口函数作为指定类型消息的处理函数 | |||
| // Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型 | |||
| // TODO 需要约束:Service实现了TSvc接口 | |||
| func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) { | |||
| func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) any { | |||
| mq.AddServiceFn(&msgDispatcher, svcFn) | |||
| mq.RegisterMessage[TReq]() | |||
| mq.RegisterMessage[TResp]() | |||
| return nil | |||
| } | |||
| // RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数 | |||
| // RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型 | |||
| // TODO 需要约束:Service实现了TSvc接口 | |||
| func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) { | |||
| func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) any { | |||
| mq.AddNoRespServiceFn(&msgDispatcher, svcFn) | |||
| mq.RegisterMessage[TReq]() | |||
| return nil | |||
| } | |||
| @@ -0,0 +1,68 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type StorageService interface { | |||
| GetStorageInfo(msg *GetStorageInfo) (*GetStorageInfoResp, *mq.CodeMessage) | |||
| PackageMovedToStorage(msg *PackageMovedToStorage) (*PackageMovedToStorageResp, *mq.CodeMessage) | |||
| } | |||
| // 获取Storage信息 | |||
| var _ = Register(StorageService.GetStorageInfo) | |||
| type GetStorageInfo struct { | |||
| UserID int64 `json:"userID"` | |||
| StorageID int64 `json:"storageID"` | |||
| } | |||
| type GetStorageInfoResp struct { | |||
| model.Storage | |||
| } | |||
| func NewGetStorageInfo(userID int64, storageID int64) GetStorageInfo { | |||
| return GetStorageInfo{ | |||
| UserID: userID, | |||
| StorageID: storageID, | |||
| } | |||
| } | |||
| func NewGetStorageInfoResp(storageID int64, name string, nodeID int64, dir string, state string) GetStorageInfoResp { | |||
| return GetStorageInfoResp{ | |||
| model.Storage{ | |||
| StorageID: storageID, | |||
| Name: name, | |||
| NodeID: nodeID, | |||
| Directory: dir, | |||
| State: state, | |||
| }, | |||
| } | |||
| } | |||
| func (client *Client) GetStorageInfo(msg GetStorageInfo) (*GetStorageInfoResp, error) { | |||
| return mq.Request[GetStorageInfoResp](client.rabbitCli, msg) | |||
| } | |||
| // 提交调度记录 | |||
| var _ = Register(StorageService.PackageMovedToStorage) | |||
| type PackageMovedToStorage struct { | |||
| UserID int64 `json:"userID"` | |||
| PackageID int64 `json:"packageID"` | |||
| StorageID int64 `json:"storageID"` | |||
| } | |||
| type PackageMovedToStorageResp struct{} | |||
| func NewPackageMovedToStorage(userID int64, packageID int64, stgID int64) PackageMovedToStorage { | |||
| return PackageMovedToStorage{ | |||
| UserID: userID, | |||
| PackageID: packageID, | |||
| StorageID: stgID, | |||
| } | |||
| } | |||
| func NewPackageMovedToStorageResp() PackageMovedToStorageResp { | |||
| return PackageMovedToStorageResp{} | |||
| } | |||
| func (client *Client) PackageMovedToStorage(msg PackageMovedToStorage) (*PackageMovedToStorageResp, error) { | |||
| return mq.Request[PackageMovedToStorageResp](client.rabbitCli, msg) | |||
| } | |||
| @@ -1,27 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| ) | |||
| type GetState struct { | |||
| } | |||
| func NewGetState() GetState { | |||
| return GetState{} | |||
| } | |||
| type GetStateResp struct { | |||
| IPFSState string `json:"ipfsState"` | |||
| } | |||
| func NewGetStateRespBody(ipfsState string) GetStateResp { | |||
| return GetStateResp{ | |||
| IPFSState: ipfsState, | |||
| } | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[GetState]() | |||
| mq.RegisterMessage[GetStateResp]() | |||
| } | |||
| @@ -1,184 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/models" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| // 客户端发给代理端,告知要调度多副本冗余的数据,以及要调度数据的详情 | |||
| type StartStorageMoveObject struct { | |||
| UserID int64 `json:"userID"` | |||
| ObjectID int64 `json:"objectID"` | |||
| ObjectName string `json:"objectName"` | |||
| Directory string `json:"directory"` | |||
| FileSize int64 `json:"fileSize,string"` | |||
| Redundancy models.RedundancyData `json:"redundancy"` | |||
| } | |||
| func NewStartStorageMoveObject[T models.RedundancyDataConst](dir string, objectID int64, objectName string, userID int64, fileSize int64, redundancy T) StartStorageMoveObject { | |||
| return StartStorageMoveObject{ | |||
| Directory: dir, | |||
| ObjectID: objectID, | |||
| ObjectName: objectName, | |||
| UserID: userID, | |||
| FileSize: fileSize, | |||
| Redundancy: redundancy, | |||
| } | |||
| } | |||
| // 代理端发给客户端,告知调度的结果 | |||
| type StartStorageMoveObjectResp struct { | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartStorageMoveObjectResp(taskID string) StartStorageMoveObjectResp { | |||
| return StartStorageMoveObjectResp{ | |||
| TaskID: taskID, | |||
| } | |||
| } | |||
| type WaitStorageMoveObject struct { | |||
| TaskID string `json:"taskID"` | |||
| WaitTimeoutMs int64 `json:"waitTimeout"` | |||
| } | |||
| func NewWaitStorageMoveObject(taskID string, waitTimeoutMs int64) WaitStorageMoveObject { | |||
| return WaitStorageMoveObject{ | |||
| TaskID: taskID, | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| type WaitStorageMoveObjectResp struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| } | |||
| func NewWaitStorageMoveObjectResp(isComplete bool, err string) WaitStorageMoveObjectResp { | |||
| return WaitStorageMoveObjectResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| } | |||
| } | |||
| type StorageCheck struct { | |||
| StorageID int64 `json:"storageID"` | |||
| Directory string `json:"directory"` | |||
| IsComplete bool `json:"isComplete"` | |||
| Objects []model.StorageObject `json:"objects"` | |||
| } | |||
| func NewStorageCheck(storageID int64, directory string, isComplete bool, objects []model.StorageObject) StorageCheck { | |||
| return StorageCheck{ | |||
| StorageID: storageID, | |||
| Directory: directory, | |||
| IsComplete: isComplete, | |||
| Objects: objects, | |||
| } | |||
| } | |||
| type StorageCheckResp struct { | |||
| DirectoryState string `json:"directoryState"` | |||
| Entries []StorageCheckRespEntry `json:"entries"` | |||
| } | |||
| const ( | |||
| CHECK_STORAGE_RESP_OP_DELETE = "Delete" | |||
| CHECK_STORAGE_RESP_OP_SET_NORMAL = "SetNormal" | |||
| ) | |||
| type StorageCheckRespEntry struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| UserID int64 `json:"userID"` | |||
| Operation string `json:"operation"` | |||
| } | |||
| func NewStorageCheckRespEntry(objectID int64, userID int64, op string) StorageCheckRespEntry { | |||
| return StorageCheckRespEntry{ | |||
| ObjectID: objectID, | |||
| UserID: userID, | |||
| Operation: op, | |||
| } | |||
| } | |||
| func NewStorageCheckResp(dirState string, entries []StorageCheckRespEntry) StorageCheckResp { | |||
| return StorageCheckResp{ | |||
| DirectoryState: dirState, | |||
| Entries: entries, | |||
| } | |||
| } | |||
| type StartStorageUploadRepObject struct { | |||
| UserID int64 `json:"userID"` | |||
| FilePath string `json:"filePath"` | |||
| BucketID int64 `json:"bucketID"` | |||
| ObjectName string `json:"objectName"` | |||
| RepCount int `json:"repCount"` | |||
| StorageDirectory string `json:"storageDirectory"` | |||
| } | |||
| func NewStartStorageUploadRepObject(userID int64, filePath string, bucketID int64, objectName string, repCount int, storageDirectory string) StartStorageUploadRepObject { | |||
| return StartStorageUploadRepObject{ | |||
| UserID: userID, | |||
| FilePath: filePath, | |||
| BucketID: bucketID, | |||
| ObjectName: objectName, | |||
| RepCount: repCount, | |||
| StorageDirectory: storageDirectory, | |||
| } | |||
| } | |||
| type StartStorageUploadRepObjectResp struct { | |||
| TaskID string `json:"taskID"` | |||
| } | |||
| func NewStartStorageUploadRepObjectResp(taskID string) StartStorageUploadRepObjectResp { | |||
| return StartStorageUploadRepObjectResp{ | |||
| TaskID: taskID, | |||
| } | |||
| } | |||
| type WaitStorageUploadRepObject struct { | |||
| TaskID string `json:"taskID"` | |||
| WaitTimeoutMs int64 `json:"waitTimeout"` | |||
| } | |||
| func NewWaitStorageUploadRepObject(taskID string, waitTimeoutMs int64) WaitStorageUploadRepObject { | |||
| return WaitStorageUploadRepObject{ | |||
| TaskID: taskID, | |||
| WaitTimeoutMs: waitTimeoutMs, | |||
| } | |||
| } | |||
| type WaitStorageUploadRepObjectResp struct { | |||
| IsComplete bool `json:"isComplete"` | |||
| Error string `json:"error"` | |||
| ObjectID int64 `json:"objectID"` | |||
| FileHash string `json:"fileHash"` | |||
| } | |||
| func NewWaitStorageUploadRepObjectResp(isComplete bool, err string, objectID int64, fileHash string) WaitStorageUploadRepObjectResp { | |||
| return WaitStorageUploadRepObjectResp{ | |||
| IsComplete: isComplete, | |||
| Error: err, | |||
| ObjectID: objectID, | |||
| FileHash: fileHash, | |||
| } | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[StartStorageMoveObject]() | |||
| mq.RegisterMessage[StartStorageMoveObjectResp]() | |||
| mq.RegisterMessage[WaitStorageMoveObject]() | |||
| mq.RegisterMessage[WaitStorageMoveObjectResp]() | |||
| mq.RegisterMessage[StorageCheck]() | |||
| mq.RegisterMessage[StorageCheckResp]() | |||
| mq.RegisterMessage[StartStorageUploadRepObject]() | |||
| mq.RegisterMessage[StartStorageUploadRepObjectResp]() | |||
| mq.RegisterMessage[WaitStorageUploadRepObject]() | |||
| mq.RegisterMessage[WaitStorageUploadRepObjectResp]() | |||
| } | |||
| @@ -1,102 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type GetUserBuckets struct { | |||
| UserID int64 `json:"userID"` | |||
| } | |||
| func NewGetUserBuckets(userID int64) GetUserBuckets { | |||
| return GetUserBuckets{ | |||
| UserID: userID, | |||
| } | |||
| } | |||
| type GetUserBucketsResp struct { | |||
| Buckets []model.Bucket `json:"buckets"` | |||
| } | |||
| func NewGetUserBucketsResp(buckets []model.Bucket) GetUserBucketsResp { | |||
| return GetUserBucketsResp{ | |||
| Buckets: buckets, | |||
| } | |||
| } | |||
| type GetBucketObjects struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| } | |||
| func NewGetBucketObjects(userID int64, bucketID int64) GetBucketObjects { | |||
| return GetBucketObjects{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| } | |||
| } | |||
| type GetBucketObjectsResp struct { | |||
| Objects []model.Object `json:"objects"` | |||
| } | |||
| func NewGetBucketObjectsResp(objects []model.Object) GetBucketObjectsResp { | |||
| return GetBucketObjectsResp{ | |||
| Objects: objects, | |||
| } | |||
| } | |||
| type CreateBucket struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketName string `json:"bucketName"` | |||
| } | |||
| func NewCreateBucket(userID int64, bucketName string) CreateBucket { | |||
| return CreateBucket{ | |||
| UserID: userID, | |||
| BucketName: bucketName, | |||
| } | |||
| } | |||
| type CreateBucketResp struct { | |||
| BucketID int64 `json:"bucketID"` | |||
| } | |||
| func NewCreateBucketResp(bucketID int64) CreateBucketResp { | |||
| return CreateBucketResp{ | |||
| BucketID: bucketID, | |||
| } | |||
| } | |||
| type DeleteBucket struct { | |||
| UserID int64 `json:"userID"` | |||
| BucketID int64 `json:"bucketID"` | |||
| } | |||
| func NewDeleteBucket(userID int64, bucketID int64) DeleteBucket { | |||
| return DeleteBucket{ | |||
| UserID: userID, | |||
| BucketID: bucketID, | |||
| } | |||
| } | |||
| type DeleteBucketResp struct{} | |||
| func NewDeleteBucketResp() DeleteBucketResp { | |||
| return DeleteBucketResp{} | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[GetUserBuckets]() | |||
| mq.RegisterMessage[GetUserBucketsResp]() | |||
| mq.RegisterMessage[GetBucketObjects]() | |||
| mq.RegisterMessage[GetBucketObjectsResp]() | |||
| mq.RegisterMessage[CreateBucket]() | |||
| mq.RegisterMessage[CreateBucketResp]() | |||
| mq.RegisterMessage[DeleteBucket]() | |||
| mq.RegisterMessage[DeleteBucketResp]() | |||
| } | |||
| @@ -1,24 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "testing" | |||
| . "github.com/smartystreets/goconvey/convey" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| ) | |||
| func TestSerder(t *testing.T) { | |||
| Convey("序列化ReadCmd", t, func() { | |||
| msg := mq.MakeMessage(NewPreDownloadObject(1, 123, "")) | |||
| data, err := mq.Serialize(msg) | |||
| So(err, ShouldBeNil) | |||
| deMsg, err := mq.Deserialize(data) | |||
| So(err, ShouldBeNil) | |||
| So(*deMsg, ShouldResemble, msg) | |||
| }) | |||
| } | |||
| @@ -1,295 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ramsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message" | |||
| ) | |||
| // 客户端发给协调端,告知要查询的文件夹名称 | |||
| type GetObjectsByDirName struct { | |||
| UserID int64 `json:"userID"` | |||
| DirName string `json:"dirName"` | |||
| } | |||
| func NewGetObjectsByDirName(userID int64, dirName string) GetObjectsByDirName { | |||
| return GetObjectsByDirName{ | |||
| UserID: userID, | |||
| DirName: dirName, | |||
| } | |||
| } | |||
| // 协调端告知客户端,查询到的object信息 | |||
| type GetObjectsResp struct { | |||
| Objects []model.Object `json:"objects"` | |||
| } | |||
| func NewGetObjectsResp(objects []model.Object) GetObjectsResp { | |||
| return GetObjectsResp{ | |||
| Objects: objects, | |||
| } | |||
| } | |||
| // 客户端发给协调端,告知要读取数据 | |||
| type PreDownloadObject struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| UserID int64 `json:"userID"` | |||
| ClientExternalIP string `json:"clientExternalIP"` // 客户端的外网IP | |||
| } | |||
| func NewPreDownloadObject(objectID int64, userID int64, clientExternalIP string) PreDownloadObject { | |||
| return PreDownloadObject{ | |||
| ObjectID: objectID, | |||
| UserID: userID, | |||
| ClientExternalIP: clientExternalIP, | |||
| } | |||
| } | |||
| // 协调端告知客户端,待读取数据的元数据 | |||
| type PreDownloadObjectResp struct { | |||
| FileSize int64 `json:"fileSize,string"` | |||
| Redundancy ramsg.RespRedundancyData `json:"redundancy"` | |||
| } | |||
| func NewPreDownloadObjectResp[T ramsg.RespRedundancyDataConst](fileSize int64, redundancy T) PreDownloadObjectResp { | |||
| return PreDownloadObjectResp{ | |||
| Redundancy: redundancy, | |||
| FileSize: fileSize, | |||
| } | |||
| } | |||
| // 客户端发给协调端,告知要以多副本方式执行写入操作 | |||
| type PreUploadRepObject struct { | |||
| BucketID int64 `json:"bucketID"` | |||
| ObjectName string `json:"objectName"` | |||
| FileSize int64 `json:"fileSize,string"` | |||
| UserID int64 `json:"userID"` | |||
| ClientExternalIP string `json:"clientExternalIP"` // 客户端的外网IP | |||
| } | |||
| func NewPreUploadRepObjectBody(bucketID int64, objectName string, fileSize int64, userID int64, clientExterIP string) PreUploadRepObject { | |||
| return PreUploadRepObject{ | |||
| BucketID: bucketID, | |||
| ObjectName: objectName, | |||
| FileSize: fileSize, | |||
| UserID: userID, | |||
| ClientExternalIP: clientExterIP, | |||
| } | |||
| } | |||
| // 协调端发给客户端,返回副本的写入目的地节点IP | |||
| type PreUploadResp struct { | |||
| Nodes []ramsg.RespNode `json:"nodes"` | |||
| } | |||
| func NewPreUploadResp(nodes []ramsg.RespNode) PreUploadResp { | |||
| return PreUploadResp{ | |||
| Nodes: nodes, | |||
| } | |||
| } | |||
| // 客户端发给协调端,告知要以纠删码方式执行写入操作 | |||
| type PreUploadEcObject struct { | |||
| BucketID int64 `json:"bucketID"` | |||
| ObjectName string `json:"objectName"` | |||
| FileSize int64 `json:"fileSize,string"` | |||
| EcName string `json:"ecName"` | |||
| UserID int64 `json:"userID"` | |||
| ClientExternalIP string `json:"clientExternalIP"` // 读取方的外网IP | |||
| } | |||
| func NewPreUploadEcObject(bucketID int64, objectName string, fileSize int64, ecName string, userID int64, writerExterIP string) PreUploadEcObject { | |||
| return PreUploadEcObject{ | |||
| BucketID: bucketID, | |||
| ObjectName: objectName, | |||
| FileSize: fileSize, | |||
| EcName: ecName, | |||
| UserID: userID, | |||
| ClientExternalIP: writerExterIP, | |||
| } | |||
| } | |||
| // 协调端发给客户端,返回编码块的写入目的地节点IP | |||
| type PreUploadEcResp struct { | |||
| Nodes []ramsg.RespNode `json:"nodes"` | |||
| Ec ramsg.Ec `json:"ec"` | |||
| } | |||
| func NewPreUploadEcResp(nodes []ramsg.RespNode, ec ramsg.Ec) PreUploadEcResp { | |||
| return PreUploadEcResp{ | |||
| Nodes: nodes, | |||
| Ec: ec, | |||
| } | |||
| } | |||
| type CreateRepObject struct { | |||
| BucketID int64 `json:"bucketID"` | |||
| ObjectName string `json:"objectName"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| FileHash string `json:"fileHash"` | |||
| FileSize int64 `json:"fileSize,string"` | |||
| RepCount int `json:"repCount"` | |||
| UserID int64 `json:"userID"` | |||
| DirName string `json:"dirName"` | |||
| } | |||
| func NewCreateRepObject(bucketID int64, objectName string, fileSize int64, repCount int, userID int64, nodeIDs []int64, fileHash string, dirName string) CreateRepObject { | |||
| return CreateRepObject{ | |||
| BucketID: bucketID, | |||
| ObjectName: objectName, | |||
| FileSize: fileSize, | |||
| RepCount: repCount, | |||
| UserID: userID, | |||
| NodeIDs: nodeIDs, | |||
| FileHash: fileHash, | |||
| } | |||
| } | |||
| type CreateEcObject struct { | |||
| BucketID int64 `json:"bucketID"` | |||
| ObjectName string `json:"objectName"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| Hashes []string `json:"hashes"` | |||
| FileSize int64 `json:"fileSize,string"` | |||
| UserID int64 `json:"userID"` | |||
| EcName string `json:"ecName"` | |||
| DirName string `json:"dirName"` | |||
| } | |||
| func NewCreateEcObject(bucketID int64, objectName string, fileSize int64, userID int64, nodeIDs []int64, hashes []string, ecName string, dirName string) CreateEcObject { | |||
| return CreateEcObject{ | |||
| BucketID: bucketID, | |||
| ObjectName: objectName, | |||
| FileSize: fileSize, | |||
| UserID: userID, | |||
| NodeIDs: nodeIDs, | |||
| Hashes: hashes, | |||
| EcName: ecName, | |||
| DirName: dirName, | |||
| } | |||
| } | |||
| // 协调端发给客户端,告知哈希写入结果 | |||
| type CreateObjectResp struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| } | |||
| func NewCreateObjectResp(objectID int64) CreateObjectResp { | |||
| return CreateObjectResp{ | |||
| ObjectID: objectID, | |||
| } | |||
| } | |||
| // PreUpdateRepObject 更新Rep对象 | |||
| type PreUpdateRepObject struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| FileSize int64 `json:"fileSize,string"` | |||
| UserID int64 `json:"userID"` | |||
| ClientExternalIP string `json:"clientExternalIP"` | |||
| } | |||
| func NewPreUpdateRepObject(objectID int64, fileSize int64, userID int64, clientExternalIP string) PreUpdateRepObject { | |||
| return PreUpdateRepObject{ | |||
| ObjectID: objectID, | |||
| FileSize: fileSize, | |||
| UserID: userID, | |||
| ClientExternalIP: clientExternalIP, | |||
| } | |||
| } | |||
| type PreUpdateRepObjectResp struct { | |||
| Nodes []PreUpdateRepObjectRespNode `json:"nodes"` | |||
| } | |||
| type PreUpdateRepObjectRespNode struct { | |||
| ID int64 `json:"id"` | |||
| ExternalIP string `json:"externalIP"` | |||
| LocalIP string `json:"localIP"` | |||
| IsSameLocation bool `json:"isSameLocation"` // 客户端是否与此节点在同一个地域 | |||
| HasOldObject bool `json:"hasOldObject"` // 节点是否存有旧的对象文件 | |||
| } | |||
| func NewPreUpdateRepObjectRespNode(id int64, exterIP string, localIP string, isSameLocation bool, hasOldObject bool) PreUpdateRepObjectRespNode { | |||
| return PreUpdateRepObjectRespNode{ | |||
| ID: id, | |||
| ExternalIP: exterIP, | |||
| LocalIP: localIP, | |||
| IsSameLocation: isSameLocation, | |||
| HasOldObject: hasOldObject, | |||
| } | |||
| } | |||
| func NewPreUpdateRepObjectResp(nodes []PreUpdateRepObjectRespNode) PreUpdateRepObjectResp { | |||
| return PreUpdateRepObjectResp{ | |||
| Nodes: nodes, | |||
| } | |||
| } | |||
| // UpdateRepObject 更新Rep对象 | |||
| type UpdateRepObject struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| FileHash string `json:"fileHash"` | |||
| FileSize int64 `json:"fileSize,string"` | |||
| NodeIDs []int64 `json:"nodeIDs"` | |||
| UserID int64 `json:"userID"` | |||
| } | |||
| func NewUpdateRepObject(objectID int64, fileHash string, fileSize int64, nodeIDs []int64, userID int64) UpdateRepObject { | |||
| return UpdateRepObject{ | |||
| ObjectID: objectID, | |||
| FileHash: fileHash, | |||
| FileSize: fileSize, | |||
| NodeIDs: nodeIDs, | |||
| UserID: userID, | |||
| } | |||
| } | |||
| type UpdateRepObjectResp struct{} | |||
| func NewUpdateRepObjectResp() UpdateRepObjectResp { | |||
| return UpdateRepObjectResp{} | |||
| } | |||
| // DeleteObjectBody 删除对象 | |||
| type DeleteObject struct { | |||
| UserID int64 `db:"userID"` | |||
| ObjectID int64 `db:"objectID"` | |||
| } | |||
| func NewDeleteObject(userID int64, objectID int64) DeleteObject { | |||
| return DeleteObject{ | |||
| UserID: userID, | |||
| ObjectID: objectID, | |||
| } | |||
| } | |||
| type DeleteObjectResp struct{} | |||
| func NewDeleteObjectResp() DeleteObjectResp { | |||
| return DeleteObjectResp{} | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[GetObjectsByDirName]() | |||
| mq.RegisterMessage[GetObjectsResp]() | |||
| mq.RegisterMessage[PreDownloadObject]() | |||
| mq.RegisterMessage[PreDownloadObjectResp]() | |||
| mq.RegisterMessage[PreUploadRepObject]() | |||
| mq.RegisterMessage[PreUploadResp]() | |||
| mq.RegisterMessage[PreUploadEcObject]() | |||
| mq.RegisterMessage[PreUploadEcResp]() | |||
| mq.RegisterMessage[CreateRepObject]() | |||
| mq.RegisterMessage[CreateEcObject]() | |||
| mq.RegisterMessage[CreateObjectResp]() | |||
| mq.RegisterMessage[PreUpdateRepObject]() | |||
| mq.RegisterMessage[PreUpdateRepObjectResp]() | |||
| mq.RegisterMessage[UpdateRepObject]() | |||
| mq.RegisterMessage[UpdateRepObjectResp]() | |||
| mq.RegisterMessage[DeleteObject]() | |||
| mq.RegisterMessage[DeleteObjectResp]() | |||
| } | |||
| @@ -1,100 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/models" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/db/model" | |||
| ) | |||
| type GetStorageInfo struct { | |||
| UserID int64 `json:"userID"` | |||
| StorageID int64 `json:"storageID"` | |||
| } | |||
| func NewGetStorageInfo(userID int64, storageID int64) GetStorageInfo { | |||
| return GetStorageInfo{ | |||
| UserID: userID, | |||
| StorageID: storageID, | |||
| } | |||
| } | |||
| type GetStorageInfoResp struct { | |||
| model.Storage | |||
| } | |||
| func NewGetStorageInfoResp(storageID int64, name string, nodeID int64, dir string, state string) GetStorageInfoResp { | |||
| return GetStorageInfoResp{ | |||
| model.Storage{ | |||
| StorageID: storageID, | |||
| Name: name, | |||
| NodeID: nodeID, | |||
| Directory: dir, | |||
| State: state, | |||
| }, | |||
| } | |||
| } | |||
| // 客户端发给协调端,告知要调度数据 | |||
| type PreMoveObjectToStorage struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| StorageID int64 `json:"storageID"` | |||
| UserID int64 `json:"userID"` | |||
| } | |||
| func NewPreMoveObjectToStorage(objectID int64, stgID int64, userID int64) PreMoveObjectToStorage { | |||
| return PreMoveObjectToStorage{ | |||
| ObjectID: objectID, | |||
| StorageID: stgID, | |||
| UserID: userID, | |||
| } | |||
| } | |||
| // 协调端发给客户端,告知要调度数据的详情 | |||
| type PreMoveObjectToStorageResp struct { | |||
| NodeID int64 `json:"nodeID"` | |||
| Directory string `json:"directory"` | |||
| Object model.Object `json:"object"` | |||
| Redundancy models.RedundancyData `json:"redundancy"` | |||
| } | |||
| func NewPreMoveObjectToStorageRespBody[T models.RedundancyData](nodeID int64, dir string, object model.Object, redundancy T) PreMoveObjectToStorageResp { | |||
| return PreMoveObjectToStorageResp{ | |||
| NodeID: nodeID, | |||
| Directory: dir, | |||
| Object: object, | |||
| Redundancy: redundancy, | |||
| } | |||
| } | |||
| // 调度完成 | |||
| type MoveObjectToStorage struct { | |||
| ObjectID int64 `json:"objectID"` | |||
| StorageID int64 `json:"storageID"` | |||
| UserID int64 `json:"userID"` | |||
| } | |||
| func NewMoveObjectToStorage(objectID int64, stgID int64, userID int64) MoveObjectToStorage { | |||
| return MoveObjectToStorage{ | |||
| ObjectID: objectID, | |||
| StorageID: stgID, | |||
| UserID: userID, | |||
| } | |||
| } | |||
| // 协调端发给客户端,告知要调度数据的详情 | |||
| type MoveObjectToStorageResp struct{} | |||
| func NewMoveObjectToStorageResp() MoveObjectToStorageResp { | |||
| return MoveObjectToStorageResp{} | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[GetStorageInfo]() | |||
| mq.RegisterMessage[GetStorageInfoResp]() | |||
| mq.RegisterMessage[PreMoveObjectToStorage]() | |||
| mq.RegisterMessage[PreMoveObjectToStorageResp]() | |||
| mq.RegisterMessage[MoveObjectToStorage]() | |||
| mq.RegisterMessage[MoveObjectToStorageResp]() | |||
| } | |||
| @@ -1,113 +0,0 @@ | |||
| package message | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/models" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| myreflect "gitlink.org.cn/cloudream/common/utils/reflect" | |||
| mymodels "gitlink.org.cn/cloudream/storage-common/models" | |||
| ) | |||
| type Node struct { | |||
| ID int64 `json:"id"` | |||
| ExternalIP string `json:"externalIP"` | |||
| LocalIP string `json:"localIP"` | |||
| } | |||
| func NewNode(id int64, externalIP string, localIP string) Node { | |||
| return Node{ | |||
| ID: id, | |||
| ExternalIP: externalIP, | |||
| LocalIP: localIP, | |||
| } | |||
| } | |||
| type RespNode struct { | |||
| Node | |||
| IsSameLocation bool `json:"isSameLocation"` // 客户端是否与此节点在同一个地域 | |||
| } | |||
| func NewRespNode(id int64, externalIP string, localIP string, isSameLocation bool) RespNode { | |||
| return RespNode{ | |||
| Node: Node{ | |||
| ID: id, | |||
| ExternalIP: externalIP, | |||
| LocalIP: localIP, | |||
| }, | |||
| IsSameLocation: isSameLocation, | |||
| } | |||
| } | |||
| // Resp开头的RedundancyData与RedundancyData的区别在于,多了Nodes等字段。需要一个更好的名称。 | |||
| type RespRedundancyDataConst interface { | |||
| RespRepRedundancyData | RespEcRedundancyData | |||
| } | |||
| type RespRedundancyData interface{} | |||
| type RespRepRedundancyData struct { | |||
| mymodels.RepRedundancyData | |||
| Nodes []RespNode `json:"nodes"` | |||
| } | |||
| func NewRespRepRedundancyData(fileHash string, nodes []RespNode) RespRepRedundancyData { | |||
| return RespRepRedundancyData{ | |||
| RepRedundancyData: mymodels.RepRedundancyData{ | |||
| FileHash: fileHash, | |||
| }, | |||
| Nodes: nodes, | |||
| } | |||
| } | |||
| type RespEcRedundancyData struct { | |||
| Ec Ec `json:"ec"` | |||
| Nodes [][]RespNode `json:"nodes"` | |||
| Blocks []RespObjectBlock `json:"blocks"` | |||
| } | |||
| func NewRespEcRedundancyData(ec Ec, blocks []RespObjectBlock, nodes [][]RespNode) RespEcRedundancyData { | |||
| return RespEcRedundancyData{ | |||
| Ec: ec, | |||
| Nodes: nodes, | |||
| Blocks: blocks, | |||
| } | |||
| } | |||
| type RespObjectBlock struct { | |||
| mymodels.ObjectBlock | |||
| //Node RespNode `json:"node"` | |||
| } | |||
| // func NewRespObjectBlock(index int, fileHash string, node RespNode) RespObjectBlock { | |||
| func NewRespObjectBlock(index int, fileHash string) RespObjectBlock { | |||
| return RespObjectBlock{ | |||
| ObjectBlock: mymodels.ObjectBlock{ | |||
| Index: index, | |||
| FileHash: fileHash, | |||
| }, | |||
| //Node: node, | |||
| } | |||
| } | |||
| type Ec struct { | |||
| ID int `json:"id"` | |||
| Name string `json:"name"` | |||
| EcK int `json:"ecK"` | |||
| EcN int `json:"ecN"` | |||
| } | |||
| func NewEc(id int, name string, k int, n int) Ec { | |||
| return Ec{ | |||
| ID: id, | |||
| Name: name, | |||
| EcK: k, | |||
| EcN: n, | |||
| } | |||
| } | |||
| func init() { | |||
| mq.RegisterTypeSet[models.RedundancyInfo](myreflect.TypeOf[models.RepRedundancyInfo](), myreflect.TypeOf[models.ECRedundancyInfo]()) | |||
| mq.RegisterTypeSet[mymodels.RedundancyData](myreflect.TypeOf[mymodels.RepRedundancyData](), myreflect.TypeOf[mymodels.ECRedundancyData]()) | |||
| mq.RegisterTypeSet[RespRedundancyData](myreflect.TypeOf[RespRepRedundancyData](), myreflect.TypeOf[RespEcRedundancyData]()) | |||
| } | |||
| @@ -1,17 +0,0 @@ | |||
| package event | |||
| type AgentCheckStorage struct { | |||
| StorageID int64 `json:"storageID"` | |||
| ObjectIDs []int64 `json:"objectIDs"` // 需要检查的Object文件列表,如果为nil(不是为空),则代表进行全量检查 | |||
| } | |||
| func NewAgentCheckStorage(storageID int64, objectIDs []int64) AgentCheckStorage { | |||
| return AgentCheckStorage{ | |||
| StorageID: storageID, | |||
| ObjectIDs: objectIDs, | |||
| } | |||
| } | |||
| func init() { | |||
| Register[AgentCheckStorage]() | |||
| } | |||
| @@ -1,15 +0,0 @@ | |||
| package event | |||
| type CheckObject struct { | |||
| ObjectIDs []int64 `json:"objectIDs"` | |||
| } | |||
| func NewCheckObject(objectIDs []int64) CheckObject { | |||
| return CheckObject{ | |||
| ObjectIDs: objectIDs, | |||
| } | |||
| } | |||
| func init() { | |||
| Register[CheckObject]() | |||
| } | |||
| @@ -3,14 +3,13 @@ package scanner | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/mq/config" | |||
| ) | |||
| type Client struct { | |||
| rabbitCli *mq.RabbitMQClient | |||
| } | |||
| func NewClient(cfg *config.Config) (*Client, error) { | |||
| func NewClient(cfg *mymq.Config) (*Client, error) { | |||
| rabbitCli, err := mq.NewRabbitMQClient(cfg.MakeConnectingURL(), mymq.SCANNER_QUEUE_NAME, "") | |||
| if err != nil { | |||
| return nil, err | |||
| @@ -2,11 +2,19 @@ package scanner | |||
| import ( | |||
| "fmt" | |||
| "time" | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/scanner/event" | |||
| scevt "gitlink.org.cn/cloudream/storage-common/pkgs/mq/scanner/event" | |||
| ) | |||
| type EventService interface { | |||
| PostEvent(event *PostEvent) | |||
| } | |||
| // 投递Event | |||
| var _ = RegisterNoReply(EventService.PostEvent) | |||
| type PostEvent struct { | |||
| Event map[string]any `json:"event"` | |||
| IsEmergency bool `json:"isEmergency"` // 重要消息,优先处理 | |||
| @@ -25,7 +33,18 @@ func NewPostEvent(event any, isEmergency bool, dontMerge bool) (PostEvent, error | |||
| DontMerge: dontMerge, | |||
| }, nil | |||
| } | |||
| func (cli *Client) PostEvent(event any, isEmergency bool, dontMerge bool, opts ...mq.SendOption) error { | |||
| opt := mq.SendOption{ | |||
| Timeout: time.Second * 30, | |||
| } | |||
| if len(opts) > 0 { | |||
| opt = opts[0] | |||
| } | |||
| body, err := NewPostEvent(event, isEmergency, dontMerge) | |||
| if err != nil { | |||
| return fmt.Errorf("new post event body failed, err: %w", err) | |||
| } | |||
| func init() { | |||
| mq.RegisterMessage[PostEvent]() | |||
| return mq.Send(cli.rabbitCli, body, opt) | |||
| } | |||
| @@ -0,0 +1,17 @@ | |||
| package event | |||
| type AgentCheckStorage struct { | |||
| StorageID int64 `json:"storageID"` | |||
| PackageIDs []int64 `json:"packageIDs"` // 需要检查的Package文件列表,如果为nil(不是为空),则代表进行全量检查 | |||
| } | |||
| func NewAgentCheckStorage(storageID int64, packageIDs []int64) AgentCheckStorage { | |||
| return AgentCheckStorage{ | |||
| StorageID: storageID, | |||
| PackageIDs: packageIDs, | |||
| } | |||
| } | |||
| func init() { | |||
| Register[AgentCheckStorage]() | |||
| } | |||
| @@ -0,0 +1,15 @@ | |||
| package event | |||
| type CheckPackage struct { | |||
| PackageIDs []int64 `json:"packageIDs"` | |||
| } | |||
| func NewCheckPackage(packageIDs []int64) CheckPackage { | |||
| return CheckPackage{ | |||
| PackageIDs: packageIDs, | |||
| } | |||
| } | |||
| func init() { | |||
| Register[CheckPackage]() | |||
| } | |||
| @@ -3,7 +3,6 @@ package scanner | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| mymq "gitlink.org.cn/cloudream/storage-common/pkgs/mq" | |||
| "gitlink.org.cn/cloudream/storage-common/pkgs/mq/config" | |||
| ) | |||
| // Service 协调端接口 | |||
| @@ -17,7 +16,7 @@ type Server struct { | |||
| OnError func(err error) | |||
| } | |||
| func NewServer(svc Service, cfg *config.Config) (*Server, error) { | |||
| func NewServer(svc Service, cfg *mymq.Config) (*Server, error) { | |||
| srv := &Server{ | |||
| service: svc, | |||
| } | |||
| @@ -48,14 +47,21 @@ func (s *Server) Serve() error { | |||
| var msgDispatcher mq.MessageDispatcher = mq.NewMessageDispatcher() | |||
| // Register 将Service中的一个接口函数作为指定类型消息的处理函数 | |||
| // Register 将Service中的一个接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型 | |||
| // TODO 需要约束:Service实现了TSvc接口 | |||
| func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) { | |||
| func Register[TSvc any, TReq any, TResp any](svcFn func(svc TSvc, msg *TReq) (*TResp, *mq.CodeMessage)) any { | |||
| mq.AddServiceFn(&msgDispatcher, svcFn) | |||
| mq.RegisterMessage[TReq]() | |||
| mq.RegisterMessage[TResp]() | |||
| return nil | |||
| } | |||
| // RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数 | |||
| // RegisterNoReply 将Service中的一个*没有返回值的*接口函数作为指定类型消息的处理函数,同时会注册请求和响应的消息类型 | |||
| // TODO 需要约束:Service实现了TSvc接口 | |||
| func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) { | |||
| func RegisterNoReply[TSvc any, TReq any](svcFn func(svc TSvc, msg *TReq)) any { | |||
| mq.AddNoRespServiceFn(&msgDispatcher, svcFn) | |||
| mq.RegisterMessage[TReq]() | |||
| return nil | |||
| } | |||
| @@ -1,14 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| type AgentService interface { | |||
| GetState(msg *agtmsg.GetState) (*agtmsg.GetStateResp, *mq.CodeMessage) | |||
| } | |||
| func init() { | |||
| Register(AgentService.GetState) | |||
| } | |||
| @@ -1,14 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| type IPFSService interface { | |||
| CheckIPFS(msg *agtmsg.CheckIPFS) (*agtmsg.CheckIPFSResp, *mq.CodeMessage) | |||
| } | |||
| func init() { | |||
| Register(IPFSService.CheckIPFS) | |||
| } | |||
| @@ -1,16 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| type ObjectService interface { | |||
| StartPinningObject(msg *agtmsg.StartPinningObject) (*agtmsg.StartPinningObjectResp, *mq.CodeMessage) | |||
| WaitPinningObject(msg *agtmsg.WaitPinningObject) (*agtmsg.WaitPinningObjectResp, *mq.CodeMessage) | |||
| } | |||
| func init() { | |||
| Register(ObjectService.StartPinningObject) | |||
| Register(ObjectService.WaitPinningObject) | |||
| } | |||
| @@ -1,30 +0,0 @@ | |||
| package agent | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| agtmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/agent" | |||
| ) | |||
| type StorageService interface { | |||
| StartStorageMoveObject(msg *agtmsg.StartStorageMoveObject) (*agtmsg.StartStorageMoveObjectResp, *mq.CodeMessage) | |||
| WaitStorageMoveObject(msg *agtmsg.WaitStorageMoveObject) (*agtmsg.WaitStorageMoveObjectResp, *mq.CodeMessage) | |||
| StorageCheck(msg *agtmsg.StorageCheck) (*agtmsg.StorageCheckResp, *mq.CodeMessage) | |||
| StartStorageUploadRepObject(msg *agtmsg.StartStorageUploadRepObject) (*agtmsg.StartStorageUploadRepObjectResp, *mq.CodeMessage) | |||
| WaitStorageUploadRepObject(msg *agtmsg.WaitStorageUploadRepObject) (*agtmsg.WaitStorageUploadRepObjectResp, *mq.CodeMessage) | |||
| } | |||
| func init() { | |||
| Register(StorageService.StartStorageMoveObject) | |||
| Register(StorageService.WaitStorageMoveObject) | |||
| Register(StorageService.StorageCheck) | |||
| Register(StorageService.StartStorageUploadRepObject) | |||
| Register(StorageService.WaitStorageUploadRepObject) | |||
| } | |||
| @@ -1,15 +0,0 @@ | |||
| package coordinator | |||
| import coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| type AgentService interface { | |||
| TempCacheReport(msg *coormsg.TempCacheReport) | |||
| AgentStatusReport(msg *coormsg.AgentStatusReport) | |||
| } | |||
| func init() { | |||
| RegisterNoReply(AgentService.TempCacheReport) | |||
| RegisterNoReply(AgentService.AgentStatusReport) | |||
| } | |||
| @@ -1,26 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| ) | |||
| type BucketService interface { | |||
| GetUserBuckets(msg *coormsg.GetUserBuckets) (*coormsg.GetUserBucketsResp, *mq.CodeMessage) | |||
| GetBucketObjects(msg *coormsg.GetBucketObjects) (*coormsg.GetBucketObjectsResp, *mq.CodeMessage) | |||
| CreateBucket(msg *coormsg.CreateBucket) (*coormsg.CreateBucketResp, *mq.CodeMessage) | |||
| DeleteBucket(msg *coormsg.DeleteBucket) (*coormsg.DeleteBucketResp, *mq.CodeMessage) | |||
| } | |||
| func init() { | |||
| Register(BucketService.GetUserBuckets) | |||
| Register(BucketService.GetBucketObjects) | |||
| Register(BucketService.CreateBucket) | |||
| Register(BucketService.DeleteBucket) | |||
| } | |||
| @@ -1,38 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| ) | |||
| type ObjectService interface { | |||
| GetObjectsByDirName(msg *coormsg.GetObjectsByDirName) (*coormsg.GetObjectsResp, *mq.CodeMessage) | |||
| PreDownloadObject(msg *coormsg.PreDownloadObject) (*coormsg.PreDownloadObjectResp, *mq.CodeMessage) | |||
| PreUploadRepObject(msg *coormsg.PreUploadRepObject) (*coormsg.PreUploadResp, *mq.CodeMessage) | |||
| CreateRepObject(msg *coormsg.CreateRepObject) (*coormsg.CreateObjectResp, *mq.CodeMessage) | |||
| PreUpdateRepObject(msg *coormsg.PreUpdateRepObject) (*coormsg.PreUpdateRepObjectResp, *mq.CodeMessage) | |||
| UpdateRepObject(msg *coormsg.UpdateRepObject) (*coormsg.UpdateRepObjectResp, *mq.CodeMessage) | |||
| PreUploadEcObject(msg *coormsg.PreUploadEcObject) (*coormsg.PreUploadEcResp, *mq.CodeMessage) | |||
| CreateEcObject(msg *coormsg.CreateEcObject) (*coormsg.CreateObjectResp, *mq.CodeMessage) | |||
| DeleteObject(msg *coormsg.DeleteObject) (*coormsg.DeleteObjectResp, *mq.CodeMessage) | |||
| } | |||
| func init() { | |||
| Register(ObjectService.GetObjectsByDirName) | |||
| Register(ObjectService.PreDownloadObject) | |||
| Register(ObjectService.PreUploadRepObject) | |||
| Register(ObjectService.CreateRepObject) | |||
| Register(ObjectService.PreUpdateRepObject) | |||
| Register(ObjectService.UpdateRepObject) | |||
| Register(ObjectService.PreUploadEcObject) | |||
| Register(ObjectService.CreateEcObject) | |||
| Register(ObjectService.DeleteObject) | |||
| } | |||
| @@ -1,18 +0,0 @@ | |||
| package coordinator | |||
| import ( | |||
| "gitlink.org.cn/cloudream/common/pkgs/mq" | |||
| coormsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/coordinator" | |||
| ) | |||
| type StorageService interface { | |||
| GetStorageInfo(msg *coormsg.GetStorageInfo) (*coormsg.GetStorageInfoResp, *mq.CodeMessage) | |||
| PreMoveObjectToStorage(msg *coormsg.PreMoveObjectToStorage) (*coormsg.PreMoveObjectToStorageResp, *mq.CodeMessage) | |||
| MoveObjectToStorage(msg *coormsg.MoveObjectToStorage) (*coormsg.MoveObjectToStorageResp, *mq.CodeMessage) | |||
| } | |||
| func init() { | |||
| Register(StorageService.GetStorageInfo) | |||
| Register(StorageService.PreMoveObjectToStorage) | |||
| Register(StorageService.MoveObjectToStorage) | |||
| } | |||
| @@ -1,11 +0,0 @@ | |||
| package scanner | |||
| import scmsg "gitlink.org.cn/cloudream/storage-common/pkgs/mq/message/scanner" | |||
| type EventService interface { | |||
| PostEvent(event *scmsg.PostEvent) | |||
| } | |||
| func init() { | |||
| RegisterNoReply(EventService.PostEvent) | |||
| } | |||
| @@ -5,8 +5,8 @@ import ( | |||
| "strings" | |||
| ) | |||
| // MakeMoveOperationFileName Move操作时,写入的文件的名称 | |||
| func MakeMoveOperationFileName(objectID int64, userID int64) string { | |||
| // MakeStorageMovePackageDirName Move操作时,写入的文件的名称 | |||
| func MakeStorageMovePackageDirName(objectID int64, userID int64) string { | |||
| return fmt.Sprintf("%d-%d", objectID, userID) | |||
| } | |||