Compare commits
662 Commits
nr-s3-fix-
...
florent-fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4984d2c5dd | ||
|
|
7296d98313 | ||
|
|
30568ced49 | ||
|
|
5e1284a9e0 | ||
|
|
27d2de872a | ||
|
|
03d6e3356b | ||
|
|
ca8baa62fb | ||
|
|
2f607357c6 | ||
|
|
2de80f7aff | ||
|
|
386058ed88 | ||
|
|
033fa9e067 | ||
|
|
e8104420b5 | ||
|
|
ae24b10da0 | ||
|
|
407b05b643 | ||
|
|
79bf8bc9f6 | ||
|
|
65d6dca52c | ||
|
|
66eeefbd7b | ||
|
|
c10bbcde00 | ||
|
|
fe69928bcc | ||
|
|
3ad8508ea5 | ||
|
|
1f1ae759e0 | ||
|
|
6e4bfe8f0f | ||
|
|
6276c48768 | ||
|
|
f6005baf1a | ||
|
|
b62fdbc6a6 | ||
|
|
bbd3d31b6a | ||
|
|
481ac92bf8 | ||
|
|
a2f2b50f57 | ||
|
|
bbab9d0f36 | ||
|
|
7f8190056d | ||
|
|
8f4737c5f1 | ||
|
|
c5adba3c97 | ||
|
|
d91eb9e396 | ||
|
|
1b47102d6c | ||
|
|
cd147f3fc5 | ||
|
|
c3acdc8cbd | ||
|
|
c3d755dc7b | ||
|
|
6f49c48bd4 | ||
|
|
446f390b3d | ||
|
|
966091593a | ||
|
|
d5f21bc27c | ||
|
|
8c3b452c0d | ||
|
|
9cacb92c2c | ||
|
|
7a1b56db87 | ||
|
|
56c3d70149 | ||
|
|
1ec8fcc73f | ||
|
|
060b16c5ca | ||
|
|
0acc52e3e9 | ||
|
|
a9c2c9b6ba | ||
|
|
5b2a6bc56b | ||
|
|
19c8693b62 | ||
|
|
c4720e1215 | ||
|
|
b6d4c8044c | ||
|
|
57dd6ebfba | ||
|
|
c75569f278 | ||
|
|
a8757f9074 | ||
|
|
f5c3bf72e5 | ||
|
|
d7ee13f98d | ||
|
|
1f47aa491d | ||
|
|
ffe430758e | ||
|
|
a4bb453401 | ||
|
|
5c8ebce9eb | ||
|
|
8b0cee5e6f | ||
|
|
e5f4f825b6 | ||
|
|
b179dc1d56 | ||
|
|
7281c9505d | ||
|
|
4db82f447d | ||
|
|
834da3d2b4 | ||
|
|
c6c3a33dcc | ||
|
|
fb720d9b05 | ||
|
|
547d318e55 | ||
|
|
cb5a2c18f2 | ||
|
|
e01ca3ad07 | ||
|
|
314d193f35 | ||
|
|
e0200bb730 | ||
|
|
2a3f4a6f97 | ||
|
|
88628bbdc0 | ||
|
|
cb7b695a72 | ||
|
|
ae549e2a88 | ||
|
|
7f9a970714 | ||
|
|
7661d3372d | ||
|
|
dbb4f34015 | ||
|
|
8f15a4c29d | ||
|
|
1b0a885ac3 | ||
|
|
f7195bad88 | ||
|
|
15630aee5e | ||
|
|
a950a1fe24 | ||
|
|
71b8e625fe | ||
|
|
e7391675fb | ||
|
|
84fdd3fe4b | ||
|
|
4dc4b635f2 | ||
|
|
ee0c6d7f8b | ||
|
|
a637af395d | ||
|
|
59fb612315 | ||
|
|
59b21c7a3e | ||
|
|
40f881c2ac | ||
|
|
1d069683ca | ||
|
|
de1d942b90 | ||
|
|
fc73971d63 | ||
|
|
eb238bf107 | ||
|
|
2412f8b1e2 | ||
|
|
0c87dee31c | ||
|
|
215146f663 | ||
|
|
9fe1069df0 | ||
|
|
d2c5b52bf1 | ||
|
|
12153a414d | ||
|
|
5ec1092a83 | ||
|
|
284169a2f2 | ||
|
|
838bfbb75f | ||
|
|
a448da77c9 | ||
|
|
268fb22d5f | ||
|
|
07cc4c853d | ||
|
|
c62d727cbe | ||
|
|
7ef89d5043 | ||
|
|
9ceba1d6e8 | ||
|
|
e2e453985f | ||
|
|
84dccd800f | ||
|
|
f9734d202b | ||
|
|
d3cb0f4672 | ||
|
|
c198bbb6fa | ||
|
|
c965a89509 | ||
|
|
47f9da2160 | ||
|
|
348a75adb4 | ||
|
|
332218a7f7 | ||
|
|
6d7a26d2b9 | ||
|
|
d19a748f0c | ||
|
|
9c83e70a28 | ||
|
|
abcabb736b | ||
|
|
0451aaeb5c | ||
|
|
880c45830c | ||
|
|
5fa16d2344 | ||
|
|
9e50b5dd83 | ||
|
|
29d8753574 | ||
|
|
f93e1e1695 | ||
|
|
0eaac8fd7a | ||
|
|
06c71154b9 | ||
|
|
0e8f314dd6 | ||
|
|
f53ec8968b | ||
|
|
919d118f21 | ||
|
|
216b759df1 | ||
|
|
01450db71e | ||
|
|
ed987e1610 | ||
|
|
2773591e1f | ||
|
|
a995276d1e | ||
|
|
ffb6a8fa3f | ||
|
|
0966efb7f2 | ||
|
|
4a0a708092 | ||
|
|
6bf3b6f3e0 | ||
|
|
8f197fe266 | ||
|
|
e1a3f680f2 | ||
|
|
e89cca7e90 | ||
|
|
5bb2767d62 | ||
|
|
95f029e0e7 | ||
|
|
fb21e4d585 | ||
|
|
633805cec9 | ||
|
|
b8801d7d2a | ||
|
|
a84fac1b6a | ||
|
|
a9de4ceb30 | ||
|
|
827b55d60c | ||
|
|
0e1fe76b46 | ||
|
|
097c9e8e12 | ||
|
|
266356cb20 | ||
|
|
6dba39a804 | ||
|
|
3ddafa7aca | ||
|
|
9d8e232684 | ||
|
|
bf83c269c4 | ||
|
|
54e47c98cc | ||
|
|
118f2594ea | ||
|
|
ab4fcd6ac4 | ||
|
|
ca6f345429 | ||
|
|
79b8e1b4e4 | ||
|
|
cafa1ffa14 | ||
|
|
ea10df8a92 | ||
|
|
85abc42100 | ||
|
|
4747eb4386 | ||
|
|
ad9cc900b8 | ||
|
|
6cd93a7bb0 | ||
|
|
3338a02afb | ||
|
|
31cfe82224 | ||
|
|
70a191336b | ||
|
|
030477454c | ||
|
|
2a078d1572 | ||
|
|
3c1f96bc69 | ||
|
|
7d30bdc148 | ||
|
|
5d42961761 | ||
|
|
f20d5cd8d3 | ||
|
|
f5111c0f41 | ||
|
|
f5473236d0 | ||
|
|
d3cb31f1a7 | ||
|
|
d5f5cdd27a | ||
|
|
656dc8fefc | ||
|
|
a505cd9567 | ||
|
|
f2a860b01a | ||
|
|
1a5b93de9c | ||
|
|
0f165b33a6 | ||
|
|
4f53555f09 | ||
|
|
175be44823 | ||
|
|
20a6428290 | ||
|
|
4b4bea5f3b | ||
|
|
c82f860334 | ||
|
|
b2a56c047c | ||
|
|
bc6afc3933 | ||
|
|
280e4b65c3 | ||
|
|
c6f22f4d75 | ||
|
|
4bed8eb86f | ||
|
|
c482f18572 | ||
|
|
d7668acd9b | ||
|
|
05b978c568 | ||
|
|
62e5ab6990 | ||
|
|
12216f1463 | ||
|
|
cbfa13a8b4 | ||
|
|
03ec0cab1e | ||
|
|
d7940292d0 | ||
|
|
9139c5e9d6 | ||
|
|
65e62018e6 | ||
|
|
138a3673ce | ||
|
|
096f443b56 | ||
|
|
b37f30393d | ||
|
|
f095a05c42 | ||
|
|
3d15a73f1b | ||
|
|
bbd571e311 | ||
|
|
a7c554f033 | ||
|
|
25b4532ce3 | ||
|
|
a304f50a6b | ||
|
|
e75f476965 | ||
|
|
1c31460d27 | ||
|
|
19db468bf0 | ||
|
|
5fe05578c4 | ||
|
|
956f5a56cf | ||
|
|
a3f589d740 | ||
|
|
beef09bb6d | ||
|
|
ff0a246c28 | ||
|
|
f1459a1a52 | ||
|
|
f3501acb64 | ||
|
|
2238c98e95 | ||
|
|
9658d43f1f | ||
|
|
1748a0c3e5 | ||
|
|
4463d81758 | ||
|
|
74221a4ab5 | ||
|
|
0d998ed342 | ||
|
|
7d5a01756e | ||
|
|
d66313406b | ||
|
|
d96a267191 | ||
|
|
5467583bb3 | ||
|
|
9a8138d07b | ||
|
|
36c290ffea | ||
|
|
3413bf9f64 | ||
|
|
3c352a3545 | ||
|
|
56e4847b6b | ||
|
|
033b671d0b | ||
|
|
51f013851d | ||
|
|
dafa4ced27 | ||
|
|
05fe154749 | ||
|
|
5ddceb4660 | ||
|
|
341a1b195c | ||
|
|
29c3d1f9a6 | ||
|
|
734d4fb92b | ||
|
|
057a1cbab6 | ||
|
|
d44509b2cd | ||
|
|
58cf69795a | ||
|
|
6d39512576 | ||
|
|
ec4dde86f5 | ||
|
|
1c91fb9dd5 | ||
|
|
cbd650c5ef | ||
|
|
c5a769cb29 | ||
|
|
00a7277377 | ||
|
|
b8c32d41f5 | ||
|
|
49c9fc79c7 | ||
|
|
1284a7708e | ||
|
|
0dd8d15a9a | ||
|
|
90f59e954a | ||
|
|
03d7ec55a7 | ||
|
|
1929b69145 | ||
|
|
fbf194e4be | ||
|
|
a20927343a | ||
|
|
3b465dc09e | ||
|
|
fb8ca00ad1 | ||
|
|
dd7dddaa2b | ||
|
|
f41903c2a1 | ||
|
|
9984b5882d | ||
|
|
9ff20bee5a | ||
|
|
53caa11bc4 | ||
|
|
f6ac08567c | ||
|
|
040c6375c0 | ||
|
|
a03266aaad | ||
|
|
3479064348 | ||
|
|
b02d823b30 | ||
|
|
a204b6fb3f | ||
|
|
c2450843a5 | ||
|
|
00beb6170e | ||
|
|
9f1a300d2a | ||
|
|
05aefa1d5c | ||
|
|
059843f030 | ||
|
|
e202dc9851 | ||
|
|
18ae664ba7 | ||
|
|
76b563fa88 | ||
|
|
2553f4c161 | ||
|
|
f35c865348 | ||
|
|
b873ba3a75 | ||
|
|
d49e388ea3 | ||
|
|
b931699175 | ||
|
|
55fd58efd8 | ||
|
|
773847e139 | ||
|
|
3a52944f21 | ||
|
|
cc9d741275 | ||
|
|
f0096cf0e2 | ||
|
|
1d673bf6ff | ||
|
|
d986f00b6a | ||
|
|
01c3ca4f37 | ||
|
|
497bd7dad5 | ||
|
|
1d6a0ae8f1 | ||
|
|
c5e6b5ec7a | ||
|
|
ca26b4b30d | ||
|
|
254558e9de | ||
|
|
da0cd0b99c | ||
|
|
2e49c685cc | ||
|
|
a64af4da7c | ||
|
|
68bb2fa7f0 | ||
|
|
8bc2710380 | ||
|
|
1691e7ad83 | ||
|
|
6c2cb31923 | ||
|
|
0c6d920682 | ||
|
|
a126b5b61b | ||
|
|
dadb16bb04 | ||
|
|
f29473ef4c | ||
|
|
84b3162bcd | ||
|
|
c7f1469e1f | ||
|
|
d1dfd93e15 | ||
|
|
4ef55b8d1f | ||
|
|
7da22094f3 | ||
|
|
cf45cb56ad | ||
|
|
df96898543 | ||
|
|
a58bf66dea | ||
|
|
0f1fc0cc79 | ||
|
|
dc41f60f52 | ||
|
|
3d21afb640 | ||
|
|
79c3667fd4 | ||
|
|
ab1549f60e | ||
|
|
5d32fa36ff | ||
|
|
8ac17ab6e3 | ||
|
|
2076141f47 | ||
|
|
6d0f479f81 | ||
|
|
f56a5a3de1 | ||
|
|
d0c34fd760 | ||
|
|
9e7afd67bc | ||
|
|
964810858b | ||
|
|
7a51361099 | ||
|
|
ec2e71a22f | ||
|
|
5b188f35b5 | ||
|
|
5683571577 | ||
|
|
db75568905 | ||
|
|
5517305973 | ||
|
|
57ef531be0 | ||
|
|
b590e29608 | ||
|
|
569d575a96 | ||
|
|
dd8bf3776e | ||
|
|
d4ea9c8892 | ||
|
|
793c6b4a5a | ||
|
|
917c9dabc7 | ||
|
|
1d1bf504de | ||
|
|
d0c07e1e97 | ||
|
|
dfff520259 | ||
|
|
bb928bbd73 | ||
|
|
f86ec98e05 | ||
|
|
48af5c7ed6 | ||
|
|
cfaf336597 | ||
|
|
b52345236d | ||
|
|
87ebaf62c1 | ||
|
|
c7721d6100 | ||
|
|
40a722a7ff | ||
|
|
d41fbb9216 | ||
|
|
8bee0925d0 | ||
|
|
b8edca53cb | ||
|
|
34a13dd293 | ||
|
|
f72e582a80 | ||
|
|
6da2865781 | ||
|
|
a0ea12cf6c | ||
|
|
317bfde574 | ||
|
|
5f53ebdf12 | ||
|
|
cb835b7b6a | ||
|
|
bf76787e49 | ||
|
|
15a4f7e273 | ||
|
|
dc3e5ffa4b | ||
|
|
b84c7cc2bb | ||
|
|
049717260d | ||
|
|
a50a96de82 | ||
|
|
8ff8c0d176 | ||
|
|
a29b63c7d1 | ||
|
|
a8400c77fb | ||
|
|
e1c40bd218 | ||
|
|
757224683f | ||
|
|
95d982f3f3 | ||
|
|
7bfdfe5e41 | ||
|
|
8888b1a89a | ||
|
|
c6ba48be10 | ||
|
|
f132c4b5d1 | ||
|
|
87f5a8f6f2 | ||
|
|
de500af30d | ||
|
|
8b5607ac89 | ||
|
|
22727f68c1 | ||
|
|
ba64f8e5b5 | ||
|
|
b3bde5857e | ||
|
|
6e36a21d18 | ||
|
|
968ebeb5a3 | ||
|
|
47e11652fb | ||
|
|
84019ed4e7 | ||
|
|
37befd89e7 | ||
|
|
aa4f1b834a | ||
|
|
e6f8fd9234 | ||
|
|
86904892f2 | ||
|
|
d176dd6533 | ||
|
|
283efe0eac | ||
|
|
0e361cb105 | ||
|
|
53aeb085ac | ||
|
|
cd8c618f08 | ||
|
|
18b74d9797 | ||
|
|
4008934bbb | ||
|
|
8ae432554e | ||
|
|
337b26176a | ||
|
|
2e643fce28 | ||
|
|
5edd271975 | ||
|
|
c219ea06bf | ||
|
|
ffacc0d8d0 | ||
|
|
70fff77a28 | ||
|
|
bcc52d586e | ||
|
|
521ded5079 | ||
|
|
73b6b59ec9 | ||
|
|
157c81b0e9 | ||
|
|
233096354c | ||
|
|
01ac23162f | ||
|
|
4e3628c6fb | ||
|
|
d6bea8aed8 | ||
|
|
a254097092 | ||
|
|
b2a3d224a5 | ||
|
|
b495c2b60b | ||
|
|
452f76cbef | ||
|
|
3a0690bfee | ||
|
|
29fd2ff5e9 | ||
|
|
a344b3b76d | ||
|
|
14cf955cb9 | ||
|
|
31193d5b40 | ||
|
|
d6dc63c491 | ||
|
|
263f693542 | ||
|
|
3f42199f8f | ||
|
|
251ccd2e38 | ||
|
|
82ccf5886e | ||
|
|
6acb1e3853 | ||
|
|
8c0238e98f | ||
|
|
e7779c3d55 | ||
|
|
bdb0ca836c | ||
|
|
53038a0372 | ||
|
|
1b0eb91d58 | ||
|
|
5814ba38ac | ||
|
|
b2ec0d288b | ||
|
|
5171378bea | ||
|
|
7f570c074b | ||
|
|
dac675143f | ||
|
|
72a5f0e220 | ||
|
|
375aaa8430 | ||
|
|
4c704a8a3a | ||
|
|
78c0f2c7e9 | ||
|
|
c262dd06e6 | ||
|
|
e0d6b501c7 | ||
|
|
efc3f45ef6 | ||
|
|
24d8ef25bb | ||
|
|
2aca775907 | ||
|
|
7aa10ef4be | ||
|
|
17ad622ce3 | ||
|
|
cc7431a092 | ||
|
|
4199d02d98 | ||
|
|
8c434760fb | ||
|
|
5f63b99dc8 | ||
|
|
edd0ae4c59 | ||
|
|
3944e6450d | ||
|
|
a8e5ad42ba | ||
|
|
d3bfb0b87b | ||
|
|
75e3e36aa8 | ||
|
|
9102b4aa1b | ||
|
|
e744d90dbb | ||
|
|
c38b957d7c | ||
|
|
282bb26da9 | ||
|
|
6b1c30157f | ||
|
|
e433251420 | ||
|
|
49ed9c7f7f | ||
|
|
5a5c0326b7 | ||
|
|
a25708be2b | ||
|
|
e8f2934534 | ||
|
|
37f8ac9da9 | ||
|
|
0ded95ce48 | ||
|
|
108e769833 | ||
|
|
5b2313ee56 | ||
|
|
368b84b7ff | ||
|
|
864946477b | ||
|
|
da67298b43 | ||
|
|
db5cb8b3a9 | ||
|
|
9643292be6 | ||
|
|
a651e34206 | ||
|
|
a4e7fd3209 | ||
|
|
d1113d40aa | ||
|
|
dcd834d3e4 | ||
|
|
c0be8a2c04 | ||
|
|
09182172cf | ||
|
|
56e903e359 | ||
|
|
9922d60e5b | ||
|
|
09ea42439e | ||
|
|
ce1acf1adc | ||
|
|
fe00badb0f | ||
|
|
2146d67dc2 | ||
|
|
6728768b3e | ||
|
|
48db3de08c | ||
|
|
b944364d1e | ||
|
|
39c2fbe8c3 | ||
|
|
c7ba640ecb | ||
|
|
f749f6be72 | ||
|
|
ccdd384c6e | ||
|
|
4061e2c149 | ||
|
|
e7b8461555 | ||
|
|
70d1537ecc | ||
|
|
cb37f85d8e | ||
|
|
9becf565a4 | ||
|
|
b1a4e5467d | ||
|
|
4bbe8488fc | ||
|
|
54a0d126b5 | ||
|
|
9b1fbf0fbf | ||
|
|
6f626974ac | ||
|
|
5c47beb1c4 | ||
|
|
b4fbe8df07 | ||
|
|
3cc9fd2782 | ||
|
|
eaecba7ec8 | ||
|
|
42a43be092 | ||
|
|
052aafd7cb | ||
|
|
4abae578f4 | ||
|
|
4132d96591 | ||
|
|
8e4c90129e | ||
|
|
31406927e6 | ||
|
|
303646efd3 | ||
|
|
9efc4f9113 | ||
|
|
31a5a42ec7 | ||
|
|
2d0ed3ec8a | ||
|
|
de288a008d | ||
|
|
3c5d73224a | ||
|
|
05f9c07836 | ||
|
|
a7ba6add39 | ||
|
|
479973bf06 | ||
|
|
854c9fe794 | ||
|
|
5a17c75fe4 | ||
|
|
4dc5eff252 | ||
|
|
7fe0d78154 | ||
|
|
2c709dc205 | ||
|
|
9353349a39 | ||
|
|
d3049b2bfa | ||
|
|
61cb2529bd | ||
|
|
e6c6e4395f | ||
|
|
959c955616 | ||
|
|
538253cdc1 | ||
|
|
b4c6594333 | ||
|
|
a7f5f8889c | ||
|
|
1c9b4cf552 | ||
|
|
ce09f487bd | ||
|
|
a5d1decf40 | ||
|
|
7024c7d598 | ||
|
|
8109253eeb | ||
|
|
b61f1e3803 | ||
|
|
db40f80be7 | ||
|
|
26eaf97032 | ||
|
|
da349374bf | ||
|
|
0ffa925fee | ||
|
|
082787c4cf | ||
|
|
be9b5332d9 | ||
|
|
97ae3ba7d3 | ||
|
|
d047f401c2 | ||
|
|
1e9e78223b | ||
|
|
6d5baebd08 | ||
|
|
4e758dbb85 | ||
|
|
40d943c620 | ||
|
|
e69b6c4dc8 | ||
|
|
23444f7083 | ||
|
|
8c077b96df | ||
|
|
4b1a055a88 | ||
|
|
b4ddcc1dec | ||
|
|
271d2e3abc | ||
|
|
37b6399398 | ||
|
|
ebf19b1506 | ||
|
|
e4dd773644 | ||
|
|
f9b3a1f293 | ||
|
|
7c9850ada8 | ||
|
|
9ef05b8afe | ||
|
|
efdd196441 | ||
|
|
6e780a3876 | ||
|
|
b475b265ae | ||
|
|
3bb7d2c294 | ||
|
|
594a148a39 | ||
|
|
779591db36 | ||
|
|
c002eeffb7 | ||
|
|
1dac973d70 | ||
|
|
f5024f0e75 | ||
|
|
cf320c08c5 | ||
|
|
8973c9550c | ||
|
|
bb671f0e93 | ||
|
|
a8774b5011 | ||
|
|
f092cd41bc | ||
|
|
b17ec9731a | ||
|
|
021810201b | ||
|
|
6038dc9c8a | ||
|
|
4df8c9610a | ||
|
|
6c12dd4f16 | ||
|
|
ad3b8fa59f | ||
|
|
cb52a8b51b | ||
|
|
22ba1302d2 | ||
|
|
7d04559921 | ||
|
|
e40e35d30c | ||
|
|
d1af9f236c | ||
|
|
45a0ff26c5 | ||
|
|
1fd330d7a4 | ||
|
|
09833f31cf | ||
|
|
20e7a036cf | ||
|
|
e6667c1782 | ||
|
|
657935eba5 | ||
|
|
67b905a757 | ||
|
|
55cede0434 | ||
|
|
c7677d6d1e | ||
|
|
d191ca54ad | ||
|
|
20f4c952fe | ||
|
|
0bd09896f3 | ||
|
|
60ecfbfb8e | ||
|
|
8921d78610 | ||
|
|
b243ff94e9 | ||
|
|
5f1c1278e3 | ||
|
|
fa56e594b1 | ||
|
|
c9b64927be | ||
|
|
3689cb2a99 | ||
|
|
3bb7541361 | ||
|
|
7b15aa5f83 | ||
|
|
690d3036db | ||
|
|
416e8d02a1 | ||
|
|
a968c2d2b7 | ||
|
|
b4787bf444 | ||
|
|
a4d90e8aff | ||
|
|
32d0606ee4 | ||
|
|
4541f7c758 | ||
|
|
65428d629c | ||
|
|
bdfd9cc617 | ||
|
|
6d324921a0 | ||
|
|
dcf0f5c5a3 | ||
|
|
d98f851a2c | ||
|
|
a95b102396 | ||
|
|
7e2fbbaae6 | ||
|
|
070e8b0b54 | ||
|
|
7b49a1296c | ||
|
|
1e278bde92 | ||
|
|
078f402819 | ||
|
|
52af565f77 | ||
|
|
853905e52f | ||
|
|
2e0e1d2aac | ||
|
|
7f33a62bb5 | ||
|
|
bdb59ea429 | ||
|
|
1c0ffe39f7 | ||
|
|
2fbfc97cca | ||
|
|
482299e765 | ||
|
|
54f4734847 | ||
|
|
0fb6cef577 | ||
|
|
7eec264961 |
18
.eslintrc.js
18
.eslintrc.js
@@ -1,13 +1,5 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'plugin:eslint-comments/recommended',
|
||||
|
||||
'standard',
|
||||
'standard-jsx',
|
||||
'prettier',
|
||||
'prettier/standard',
|
||||
'prettier/react',
|
||||
],
|
||||
extends: ['plugin:eslint-comments/recommended', 'standard', 'standard-jsx', 'prettier'],
|
||||
globals: {
|
||||
__DEV__: true,
|
||||
$Dict: true,
|
||||
@@ -21,19 +13,13 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parser: 'babel-eslint',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
legacyDecorators: true,
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
|
||||
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- Node: [e.g. 16.12.1]
|
||||
- xo-server: [e.g. 5.82.3]
|
||||
- xo-web: [e.g. 5.87.0]
|
||||
- hypervisor: [e.g. XCP-ng 8.2.0]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -4,12 +4,14 @@
|
||||
/lerna-debug.log
|
||||
/lerna-debug.log.*
|
||||
|
||||
/@vates/*/dist/
|
||||
/@vates/*/node_modules/
|
||||
/@xen-orchestra/*/dist/
|
||||
/@xen-orchestra/*/node_modules/
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/@xen-orchestra/proxy/src/app/mixins/index.js
|
||||
/@xen-orchestra/proxy/src/app/mixins/index.mjs
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
@@ -17,9 +19,9 @@
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
/packages/xo-server/.xo-server.*
|
||||
/packages/xo-server/src/api/index.js
|
||||
/packages/xo-server/src/xapi/mixins/index.js
|
||||
/packages/xo-server/src/xo-mixins/index.js
|
||||
/packages/xo-server/src/api/index.mjs
|
||||
/packages/xo-server/src/xapi/mixins/index.mjs
|
||||
/packages/xo-server/src/xo-mixins/index.mjs
|
||||
|
||||
/packages/xo-server-auth-ldap/ldap.cache.conf
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- 12
|
||||
- 14
|
||||
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
|
||||
1
@vates/async-each/.npmignore
Symbolic link
1
@vates/async-each/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
68
@vates/async-each/README.md
Normal file
68
@vates/async-each/README.md
Normal file
@@ -0,0 +1,68 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/async-each
|
||||
|
||||
[](https://npmjs.org/package/@vates/async-each)  [](https://bundlephobia.com/result?p=@vates/async-each) [](https://npmjs.org/package/@vates/async-each)
|
||||
|
||||
> Run async fn for each item in (async) iterable
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
|
||||
|
||||
```
|
||||
> npm install --save @vates/async-each
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### `asyncEach(iterable, iteratee, [opts])`
|
||||
|
||||
Executes `iteratee` in order for each value yielded by `iterable`.
|
||||
|
||||
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
|
||||
|
||||
`iterable` must be an iterable or async iterable.
|
||||
|
||||
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
|
||||
|
||||
- `value`: the value yielded by `iterable`
|
||||
- `index`: the 0-based index for this value
|
||||
- `iterable`: the iterable itself
|
||||
|
||||
`opts` is an object that can contains the following options:
|
||||
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
|
||||
- `signal`: an abort signal to stop the iteration
|
||||
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
|
||||
|
||||
```js
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
|
||||
const contents = []
|
||||
await asyncEach(
|
||||
['foo.txt', 'bar.txt', 'baz.txt'],
|
||||
async function (filename, i) {
|
||||
contents[i] = await readFile(filename)
|
||||
},
|
||||
{
|
||||
// reads two files at a time
|
||||
concurrency: 2,
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
35
@vates/async-each/USAGE.md
Normal file
35
@vates/async-each/USAGE.md
Normal file
@@ -0,0 +1,35 @@
|
||||
### `asyncEach(iterable, iteratee, [opts])`
|
||||
|
||||
Executes `iteratee` in order for each value yielded by `iterable`.
|
||||
|
||||
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
|
||||
|
||||
`iterable` must be an iterable or async iterable.
|
||||
|
||||
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
|
||||
|
||||
- `value`: the value yielded by `iterable`
|
||||
- `index`: the 0-based index for this value
|
||||
- `iterable`: the iterable itself
|
||||
|
||||
`opts` is an object that can contains the following options:
|
||||
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
|
||||
- `signal`: an abort signal to stop the iteration
|
||||
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
|
||||
|
||||
```js
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
|
||||
const contents = []
|
||||
await asyncEach(
|
||||
['foo.txt', 'bar.txt', 'baz.txt'],
|
||||
async function (filename, i) {
|
||||
contents[i] = await readFile(filename)
|
||||
},
|
||||
{
|
||||
// reads two files at a time
|
||||
concurrency: 2,
|
||||
}
|
||||
)
|
||||
```
|
||||
99
@vates/async-each/index.js
Normal file
99
@vates/async-each/index.js
Normal file
@@ -0,0 +1,99 @@
|
||||
'use strict'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
|
||||
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
|
||||
const errors = []
|
||||
let running = 0
|
||||
let index = 0
|
||||
|
||||
let onAbort
|
||||
if (signal !== undefined) {
|
||||
onAbort = () => {
|
||||
onRejectedWrapper(new Error('asyncEach aborted'))
|
||||
}
|
||||
signal.addEventListener('abort', onAbort)
|
||||
}
|
||||
|
||||
const clean = () => {
|
||||
onFulfilled = onRejected = noop
|
||||
if (onAbort !== undefined) {
|
||||
signal.removeEventListener('abort', onAbort)
|
||||
}
|
||||
}
|
||||
|
||||
resolve = (resolve =>
|
||||
function resolveAndClean(value) {
|
||||
resolve(value)
|
||||
clean()
|
||||
})(resolve)
|
||||
reject = (reject =>
|
||||
function rejectAndClean(reason) {
|
||||
reject(reason)
|
||||
clean()
|
||||
})(reject)
|
||||
|
||||
let onFulfilled = value => {
|
||||
--running
|
||||
next()
|
||||
}
|
||||
const onFulfilledWrapper = value => onFulfilled(value)
|
||||
|
||||
let onRejected = stopOnError
|
||||
? reject
|
||||
: error => {
|
||||
--running
|
||||
errors.push(error)
|
||||
next()
|
||||
}
|
||||
const onRejectedWrapper = reason => onRejected(reason)
|
||||
|
||||
let nextIsRunning = false
|
||||
let next = async () => {
|
||||
if (nextIsRunning) {
|
||||
return
|
||||
}
|
||||
nextIsRunning = true
|
||||
if (running < concurrency) {
|
||||
const cursor = await it.next()
|
||||
if (cursor.done) {
|
||||
next = () => {
|
||||
if (running === 0) {
|
||||
if (errors.length === 0) {
|
||||
resolve()
|
||||
} else {
|
||||
reject(new AggregateError(errors))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
++running
|
||||
try {
|
||||
const result = iteratee.call(this, cursor.value, index++, iterable)
|
||||
let then
|
||||
if (result != null && typeof result === 'object' && typeof (then = result.then) === 'function') {
|
||||
then.call(result, onFulfilledWrapper, onRejectedWrapper)
|
||||
} else {
|
||||
onFulfilled(result)
|
||||
}
|
||||
} catch (error) {
|
||||
onRejected(error)
|
||||
}
|
||||
}
|
||||
nextIsRunning = false
|
||||
return next()
|
||||
}
|
||||
nextIsRunning = false
|
||||
}
|
||||
next()
|
||||
})
|
||||
}
|
||||
99
@vates/async-each/index.spec.js
Normal file
99
@vates/async-each/index.spec.js
Normal file
@@ -0,0 +1,99 @@
|
||||
'use strict'
|
||||
|
||||
/* eslint-env jest */
|
||||
|
||||
const { asyncEach } = require('./')
|
||||
|
||||
const randomDelay = (max = 10) =>
|
||||
new Promise(resolve => {
|
||||
setTimeout(resolve, Math.floor(Math.random() * max + 1))
|
||||
})
|
||||
|
||||
const rejectionOf = p =>
|
||||
new Promise((resolve, reject) => {
|
||||
p.then(reject, resolve)
|
||||
})
|
||||
|
||||
describe('asyncEach', () => {
|
||||
const thisArg = 'qux'
|
||||
const values = ['foo', 'bar', 'baz']
|
||||
|
||||
Object.entries({
|
||||
'sync iterable': () => values,
|
||||
'async iterable': async function* () {
|
||||
for (const value of values) {
|
||||
await randomDelay()
|
||||
yield value
|
||||
}
|
||||
},
|
||||
}).forEach(([what, getIterable]) =>
|
||||
describe('with ' + what, () => {
|
||||
let iterable
|
||||
beforeEach(() => {
|
||||
iterable = getIterable()
|
||||
})
|
||||
|
||||
it('works', async () => {
|
||||
const iteratee = jest.fn(async () => {})
|
||||
|
||||
await asyncEach.call(thisArg, iterable, iteratee)
|
||||
|
||||
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
|
||||
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
|
||||
})
|
||||
;[1, 2, 4].forEach(concurrency => {
|
||||
it('respects a concurrency of ' + concurrency, async () => {
|
||||
let running = 0
|
||||
|
||||
await asyncEach(
|
||||
values,
|
||||
async () => {
|
||||
++running
|
||||
expect(running).toBeLessThanOrEqual(concurrency)
|
||||
await randomDelay()
|
||||
--running
|
||||
},
|
||||
{ concurrency }
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it('stops on first error when stopOnError is true', async () => {
|
||||
const error = new Error()
|
||||
const iteratee = jest.fn((_, i) => {
|
||||
if (i === 1) {
|
||||
throw error
|
||||
}
|
||||
})
|
||||
|
||||
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
|
||||
it('rejects AggregateError when stopOnError is false', async () => {
|
||||
const errors = []
|
||||
const iteratee = jest.fn(() => {
|
||||
const error = new Error()
|
||||
errors.push(error)
|
||||
throw error
|
||||
})
|
||||
|
||||
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
|
||||
expect(error.errors).toEqual(errors)
|
||||
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
|
||||
})
|
||||
|
||||
it('can be interrupted with an AbortSignal', async () => {
|
||||
const ac = new AbortController()
|
||||
const iteratee = jest.fn((_, i) => {
|
||||
if (i === 1) {
|
||||
ac.abort()
|
||||
}
|
||||
})
|
||||
|
||||
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
)
|
||||
})
|
||||
34
@vates/async-each/package.json
Normal file
34
@vates/async-each/package.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/async-each",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/async-each",
|
||||
"description": "Run async fn for each item in (async) iterable",
|
||||
"keywords": [
|
||||
"array",
|
||||
"async",
|
||||
"collection",
|
||||
"each",
|
||||
"for",
|
||||
"foreach",
|
||||
"iterable",
|
||||
"iterator"
|
||||
],
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/async-each",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
1
@vates/coalesce-calls/.npmignore
Symbolic link
1
@vates/coalesce-calls/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -20,9 +20,6 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"files": [
|
||||
"index.js"
|
||||
],
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
|
||||
1
@vates/compose/.npmignore
Symbolic link
1
@vates/compose/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -50,7 +50,7 @@ Functions may also be passed in an array:
|
||||
const f = compose([add2, mul3])
|
||||
```
|
||||
|
||||
Options can be passed as first parameters:
|
||||
Options can be passed as first parameter:
|
||||
|
||||
```js
|
||||
const f = compose(
|
||||
|
||||
@@ -32,7 +32,7 @@ Functions may also be passed in an array:
|
||||
const f = compose([add2, mul3])
|
||||
```
|
||||
|
||||
Options can be passed as first parameters:
|
||||
Options can be passed as first parameter:
|
||||
|
||||
```js
|
||||
const f = compose(
|
||||
|
||||
1
@vates/decorate-with/.npmignore
Symbolic link
1
@vates/decorate-with/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -16,7 +16,9 @@ Installation of the [npm package](https://npmjs.org/package/@vates/decorate-with
|
||||
|
||||
## Usage
|
||||
|
||||
For instance, allows using Lodash's functions as decorators:
|
||||
### `decorateWith(fn, ...args)`
|
||||
|
||||
Creates a new ([legacy](https://babeljs.io/docs/en/babel-plugin-syntax-decorators#legacy)) method decorator from a function decorator, for instance, allows using Lodash's functions as decorators:
|
||||
|
||||
```js
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
@@ -29,6 +31,34 @@ class Foo {
|
||||
}
|
||||
```
|
||||
|
||||
### `decorateMethodsWith(class, map)`
|
||||
|
||||
Decorates a number of methods directly, without using the decorator syntax:
|
||||
|
||||
```js
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
|
||||
class Foo {
|
||||
bar() {
|
||||
// body
|
||||
}
|
||||
|
||||
baz() {
|
||||
// body
|
||||
}
|
||||
}
|
||||
|
||||
decorateMethodsWith(Foo, {
|
||||
// without arguments
|
||||
bar: lodash.curry,
|
||||
|
||||
// with arguments
|
||||
baz: [lodash.debounce, 150],
|
||||
})
|
||||
```
|
||||
|
||||
The decorated class is returned, so you can export it directly.
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
For instance, allows using Lodash's functions as decorators:
|
||||
### `decorateWith(fn, ...args)`
|
||||
|
||||
Creates a new ([legacy](https://babeljs.io/docs/en/babel-plugin-syntax-decorators#legacy)) method decorator from a function decorator, for instance, allows using Lodash's functions as decorators:
|
||||
|
||||
```js
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
@@ -10,3 +12,31 @@ class Foo {
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `decorateMethodsWith(class, map)`
|
||||
|
||||
Decorates a number of methods directly, without using the decorator syntax:
|
||||
|
||||
```js
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
|
||||
class Foo {
|
||||
bar() {
|
||||
// body
|
||||
}
|
||||
|
||||
baz() {
|
||||
// body
|
||||
}
|
||||
}
|
||||
|
||||
decorateMethodsWith(Foo, {
|
||||
// without arguments
|
||||
bar: lodash.curry,
|
||||
|
||||
// with arguments
|
||||
baz: [lodash.debounce, 150],
|
||||
})
|
||||
```
|
||||
|
||||
The decorated class is returned, so you can export it directly.
|
||||
|
||||
@@ -1,4 +1,21 @@
|
||||
exports.decorateWith = (fn, ...args) => (target, name, descriptor) => ({
|
||||
...descriptor,
|
||||
value: fn(descriptor.value, ...args),
|
||||
})
|
||||
exports.decorateWith = function decorateWith(fn, ...args) {
|
||||
return (target, name, descriptor) => ({
|
||||
...descriptor,
|
||||
value: fn(descriptor.value, ...args),
|
||||
})
|
||||
}
|
||||
|
||||
const { getOwnPropertyDescriptor, defineProperty } = Object
|
||||
|
||||
exports.decorateMethodsWith = function decorateMethodsWith(klass, map) {
|
||||
const { prototype } = klass
|
||||
for (const name of Object.keys(map)) {
|
||||
const descriptor = getOwnPropertyDescriptor(prototype, name)
|
||||
const { value } = descriptor
|
||||
|
||||
const decorator = map[name]
|
||||
descriptor.value = typeof decorator === 'function' ? decorator(value) : decorator[0](value, ...decorator.slice(1))
|
||||
defineProperty(prototype, name, descriptor)
|
||||
}
|
||||
return klass
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.0.1",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
1
@vates/disposable/.npmignore
Symbolic link
1
@vates/disposable/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -48,7 +48,7 @@ import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
// it will wait for 10 seconds before calling the disposer
|
||||
using(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
```
|
||||
|
||||
### `debounceResource.flushAll()`
|
||||
|
||||
@@ -30,7 +30,7 @@ import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
// it will wait for 10 seconds before calling the disposer
|
||||
using(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
```
|
||||
|
||||
### `debounceResource.flushAll()`
|
||||
|
||||
@@ -17,15 +17,15 @@ exports.deduped = (factory, keyFn = (...args) => args) =>
|
||||
if (state === undefined) {
|
||||
const result = factory.apply(this, arguments)
|
||||
|
||||
const createFactory = ({ value, dispose }) => {
|
||||
const createFactory = disposable => {
|
||||
const wrapper = {
|
||||
dispose() {
|
||||
if (--state.users === 0) {
|
||||
states.delete(keys)
|
||||
return dispose()
|
||||
return disposable.dispose()
|
||||
}
|
||||
},
|
||||
value,
|
||||
value: disposable.value,
|
||||
}
|
||||
|
||||
return () => {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -23,7 +23,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"ensure-array": "^1.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
1
@vates/multi-key-map/.npmignore
Symbolic link
1
@vates/multi-key-map/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
1
@vates/parse-duration/.npmignore
Symbolic link
1
@vates/parse-duration/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -44,4 +44,4 @@ You may:
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
|
||||
@@ -6,7 +6,7 @@ exports.parseDuration = value => {
|
||||
}
|
||||
const duration = ms(value)
|
||||
if (duration === undefined) {
|
||||
throw new TypeError(`not a valid duration: ${duration}`)
|
||||
throw new TypeError(`not a valid duration: ${value}`)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "0.1.0",
|
||||
"license": "ISC",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
1
@vates/read-chunk/.npmignore
Symbolic link
1
@vates/read-chunk/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,27 +1,30 @@
|
||||
exports.readChunk = (stream, size) =>
|
||||
new Promise((resolve, reject) => {
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
const readChunk = (stream, size) =>
|
||||
size === 0
|
||||
? Promise.resolve(Buffer.alloc(0))
|
||||
: new Promise((resolve, reject) => {
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
43
@vates/read-chunk/index.spec.js
Normal file
43
@vates/read-chunk/index.spec.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('returns null if stream is empty', async () => {
|
||||
expect(await readChunk(makeStream([]))).toBe(null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']))).toEqual(Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 2)).toEqual(Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 4)).toEqual(Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 10)).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('returns an empty buffer if the specified size is 0', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 0)).toEqual(Buffer.alloc(0))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
expect(await readChunk(makeStream.obj(chunks))).toBe(chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
1
@vates/toggle-scripts/.npmignore
Symbolic link
1
@vates/toggle-scripts/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -31,9 +31,6 @@
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"files": [
|
||||
"index.js"
|
||||
],
|
||||
"bin": "./index.js",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
1
@xen-orchestra/async-map/.npmignore
Symbolic link
1
@xen-orchestra/async-map/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -12,7 +12,7 @@ const wrapCall = (fn, arg, thisArg) => {
|
||||
* WARNING: Does not handle plain objects
|
||||
*
|
||||
* @template Item,This
|
||||
* @param {Iterable<Item>} arrayLike
|
||||
* @param {Iterable<Item>} iterable
|
||||
* @param {(this: This, item: Item) => (Item | PromiseLike<Item>)} mapFn
|
||||
* @param {This} [thisArg]
|
||||
* @returns {Promise<Item[]>}
|
||||
|
||||
@@ -24,10 +24,6 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"files": [
|
||||
"index.js",
|
||||
"legacy.js"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
|
||||
1
@xen-orchestra/audit-core/.eslintrc.js
Symbolic link
1
@xen-orchestra/audit-core/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
1
@xen-orchestra/audit-core/.npmignore
Symbolic link
1
@xen-orchestra/audit-core/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -9,7 +9,7 @@
|
||||
},
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=10"
|
||||
},
|
||||
"main": "dist/",
|
||||
"scripts": {
|
||||
@@ -26,14 +26,13 @@
|
||||
"@babel/plugin-proposal-decorators": "^7.8.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.0",
|
||||
"@babel/preset-env": "^7.7.4",
|
||||
"cross": "^1.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"core-js": "^3.6.4",
|
||||
"@vates/decorate-with": "^0.1.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"lodash": "^4.17.15",
|
||||
"object-hash": "^2.0.1"
|
||||
},
|
||||
"private": false,
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
// see https://github.com/babel/babel/issues/8450
|
||||
import 'core-js/features/symbol/async-iterator'
|
||||
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import defer from 'golike-defer'
|
||||
import hash from 'object-hash'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
|
||||
const log = createLogger('xo:audit-core')
|
||||
|
||||
@@ -65,7 +63,7 @@ export class AuditCore {
|
||||
this._storage = storage
|
||||
}
|
||||
|
||||
@defer
|
||||
@decorateWith(defer)
|
||||
async add($defer, subject, event, data) {
|
||||
const time = Date.now()
|
||||
$defer(await this._storage.acquireLock())
|
||||
@@ -150,7 +148,7 @@ export class AuditCore {
|
||||
}
|
||||
}
|
||||
|
||||
@defer
|
||||
@decorateWith(defer)
|
||||
async deleteRangeAndRewrite($defer, newest, oldest) {
|
||||
assert.notStrictEqual(newest, undefined)
|
||||
assert.notStrictEqual(oldest, undefined)
|
||||
|
||||
@@ -17,10 +17,10 @@ interface Record {
|
||||
}
|
||||
|
||||
export class AuditCore {
|
||||
constructor(storage: Storage) { }
|
||||
public add(subject: any, event: string, data: any): Promise<Record> { }
|
||||
public checkIntegrity(oldest: string, newest: string): Promise<number> { }
|
||||
public getFrom(newest?: string): AsyncIterator { }
|
||||
public deleteFrom(newest: string): Promise<void> { }
|
||||
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> { }
|
||||
constructor(storage: Storage) {}
|
||||
public add(subject: any, event: string, data: any): Promise<Record> {}
|
||||
public checkIntegrity(oldest: string, newest: string): Promise<number> {}
|
||||
public getFrom(newest?: string): AsyncIterator {}
|
||||
public deleteFrom(newest: string): Promise<void> {}
|
||||
public deleteRangeAndRewrite(newest: string, oldest: string): Promise<void> {}
|
||||
}
|
||||
|
||||
1
@xen-orchestra/babel-config/.npmignore
Symbolic link
1
@xen-orchestra/babel-config/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -14,25 +14,13 @@ const configs = {
|
||||
'@babel/plugin-proposal-pipeline-operator': {
|
||||
proposal: 'minimal',
|
||||
},
|
||||
'@babel/preset-env'(pkg) {
|
||||
return {
|
||||
debug: !__TEST__,
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
|
||||
// disabled until https://github.com/babel/babel/issues/8323 is resolved
|
||||
// loose: true,
|
||||
// disabled until https://github.com/babel/babel/issues/8323 is resolved
|
||||
// loose: true,
|
||||
|
||||
shippedProposals: true,
|
||||
targets: (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
}
|
||||
return { browsers: pkg.browserslist, node }
|
||||
})(),
|
||||
}
|
||||
shippedProposals: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -44,21 +32,21 @@ const getConfig = (key, ...args) => {
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = ['@babel/plugin-proposal-decorators', '@babel/plugin-proposal-class-properties']
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
presets === undefined && (presets = {})
|
||||
module.exports = function (pkg, configs = {}) {
|
||||
const plugins = {}
|
||||
const presets = {}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && PLUGINS_RE.test(name)) {
|
||||
plugins[name] = getConfig(name, pkg)
|
||||
plugins[name] = { ...getConfig(name, pkg), ...configs[name] }
|
||||
} else if (!(name in presets) && PRESETS_RE.test(name)) {
|
||||
presets[name] = getConfig(name, pkg)
|
||||
presets[name] = { ...getConfig(name, pkg), ...configs[name] }
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
ignore: __PROD__ ? [/\btests?\//, /\.spec\.js$/] : undefined,
|
||||
plugins: Object.keys(plugins)
|
||||
.map(plugin => [plugin, plugins[plugin]])
|
||||
.sort(([a], [b]) => {
|
||||
@@ -67,5 +55,15 @@ module.exports = function (pkg, plugins, presets) {
|
||||
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
|
||||
}),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
targets: (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
}
|
||||
return { browsers: pkg.browserslist, node }
|
||||
})(),
|
||||
}
|
||||
}
|
||||
|
||||
1
@xen-orchestra/backups-cli/.npmignore
Symbolic link
1
@xen-orchestra/backups-cli/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,336 +1,34 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// assigned when options are parsed by the main function
|
||||
let merge, remove
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const assert = require('assert')
|
||||
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
|
||||
const flatten = require('lodash/flatten')
|
||||
const getopts = require('getopts')
|
||||
const limitConcurrency = require('limit-concurrency-decorator').default
|
||||
const lockfile = require('proper-lockfile')
|
||||
const pipe = require('promise-toolbox/pipe')
|
||||
const { default: Vhd, mergeVhd } = require('vhd-lib')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { isValidXva } = require('@xen-orchestra/backups/isValidXva')
|
||||
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
|
||||
const { resolve } = require('path')
|
||||
|
||||
const fs = require('../_fs')
|
||||
|
||||
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
let child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
console.warn('Unused parents of VHD', child)
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
merge && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (merge) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
if (children.length !== 1) {
|
||||
console.warn('TODO: implement merging multiple children')
|
||||
children.length = 1
|
||||
child = children[0]
|
||||
}
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
console.log('merging %s: %s/%s', child, done, total)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
await mergeVhd(
|
||||
handler,
|
||||
parent,
|
||||
handler,
|
||||
child,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children),
|
||||
{
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
clearInterval(handle)
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
remove && fs.rename(parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
console.warn('Unused VHD', child)
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return remove && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
fs.readdir2,
|
||||
asyncMap(fs.readdir2),
|
||||
flatten,
|
||||
asyncMap(fs.readdir2),
|
||||
flatten,
|
||||
_ => _.filter(_ => _.endsWith('.vhd')),
|
||||
])
|
||||
|
||||
async function handleVm(vmDir) {
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(await listVhds(vmDir), async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error('this script does not support multiple VHD children')
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error while checking VHD', path)
|
||||
console.warn(' ', error)
|
||||
if (error != null && error.code === 'ERR_ASSERTION') {
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
remove && (await handler.unlink(path))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhd => {
|
||||
const parent = vhdParents[vhd]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhd]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhd)
|
||||
|
||||
console.warn('Error while checking VHD', vhd)
|
||||
console.warn(' missing parent', parent)
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
remove && deletions.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const [jsons, xvas, xvaSums] = await fs
|
||||
.readdir2(vmDir)
|
||||
.then(entries => [
|
||||
entries.filter(_ => _.endsWith('.json')),
|
||||
new Set(entries.filter(_ => _.endsWith('.xva'))),
|
||||
entries.filter(_ => _.endsWith('.xva.cheksum')),
|
||||
])
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await isValidXva(path))) {
|
||||
console.warn('Potential broken XVA', path)
|
||||
console.warn('')
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await fs.readFile(json))
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve(vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
console.warn(' missing file', linkedXva)
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
remove && (await handler.unlink(json))
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
|
||||
})()
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(' %i/%i missing VHDs', missingVhds.length, linkedVhds.length)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
remove && (await handler.unlink(json))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
console.warn('Unused VHD', vhd)
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
remove && unusedVhdsDeletion.push(handler.unlink(vhd))
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
Object.keys(vhdChainsToMerge).forEach(key => {
|
||||
const chain = vhdChainsToMerge[key]
|
||||
if (chain !== undefined) {
|
||||
unusedVhdsDeletion.push(mergeVhdChain(chain))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
unusedVhdsDeletion,
|
||||
asyncMap(unusedXvas, path => {
|
||||
console.warn('Unused XVA', path)
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return remove && handler.unlink(path)
|
||||
}),
|
||||
asyncMap(xvaSums, path => {
|
||||
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
|
||||
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
|
||||
console.warn('Unused XVA checksum', path)
|
||||
remove && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return remove && handler.unlink(path)
|
||||
}
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
|
||||
|
||||
module.exports = async function main(args) {
|
||||
const opts = getopts(args, {
|
||||
const { _, fix, remove, merge } = getopts(args, {
|
||||
alias: {
|
||||
fix: 'f',
|
||||
remove: 'r',
|
||||
merge: 'm',
|
||||
},
|
||||
boolean: ['merge', 'remove'],
|
||||
boolean: ['fix', 'merge', 'remove'],
|
||||
default: {
|
||||
merge: false,
|
||||
remove: false,
|
||||
},
|
||||
})
|
||||
|
||||
;({ remove, merge } = opts)
|
||||
await asyncMap(opts._, async vmDir => {
|
||||
await asyncMap(_, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
// TODO: implement this in `xo-server`, not easy because not compatible with
|
||||
// `@xen-orchestra/fs`.
|
||||
const release = await lockfile.lock(vmDir)
|
||||
try {
|
||||
await handleVm(vmDir)
|
||||
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
|
||||
} catch (error) {
|
||||
console.error('handleVm', vmDir, error)
|
||||
} finally {
|
||||
await release()
|
||||
console.error('adapter.cleanVm', vmDir, error)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,11 +5,12 @@ require('./_composeCommands')({
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: `[--merge] [--remove] xo-vm-backups/*
|
||||
usage: `[--fix] [--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
Detects and repair issues with VM backups.
|
||||
|
||||
Options:
|
||||
-f, --fix Fix metadata issues (like size)
|
||||
-m, --merge Merge (or continue merging) VHD files that are unused
|
||||
-r, --remove Remove unused, incomplete, orphan, or corrupted files
|
||||
`,
|
||||
|
||||
@@ -7,23 +7,16 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.7.0",
|
||||
"@xen-orchestra/fs": "^0.13.0",
|
||||
"@xen-orchestra/backups": "^0.16.0",
|
||||
"@xen-orchestra/fs": "^0.19.1",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.17.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^1.0.0"
|
||||
"promise-toolbox": "^0.20.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"files": [
|
||||
"commands",
|
||||
"*.js"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
"repository": {
|
||||
@@ -34,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.4.0",
|
||||
"version": "0.6.1",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
1
@xen-orchestra/backups/.npmignore
Symbolic link
1
@xen-orchestra/backups/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,15 +1,14 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const limitConcurrency = require('limit-concurrency-decorator').default
|
||||
const using = require('promise-toolbox/using')
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup')
|
||||
const { Task } = require('./Task')
|
||||
const { VmBackup } = require('./_VmBackup')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup')
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
@@ -87,7 +86,7 @@ exports.Backup = class Backup {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await using(
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
@@ -196,7 +195,7 @@ exports.Backup = class Backup {
|
||||
...settings[schedule.id],
|
||||
}
|
||||
|
||||
await using(
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
@@ -242,7 +241,7 @@ exports.Backup = class Backup {
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
using(this._getRecord('VM', vmUuid), vm =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
const assert = require('assert')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { importDeltaVm } = require('./_deltaVm')
|
||||
const { Task } = require('./Task')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importDeltaVm } = require('./_deltaVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi }) {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importDeltaVmSettings = { newMacAddresses }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
@@ -17,13 +19,17 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
const sizeContainer = { size: 0 }
|
||||
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
watchStreamSize(backup, sizeContainer)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
backup = await adapter.readDeltaVmBackup(metadata)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
@@ -37,6 +43,7 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importDeltaVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
@@ -50,10 +57,10 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
])
|
||||
|
||||
return {
|
||||
size: metadata.size,
|
||||
size: sizeContainer.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,22 +1,24 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const pump = require('pump')
|
||||
const using = require('promise-toolbox/using')
|
||||
const { basename, dirname, join, normalize, resolve } = require('path')
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const fromCallback = require('promise-toolbox/fromCallback.js')
|
||||
const fromEvent = require('promise-toolbox/fromEvent.js')
|
||||
const pDefer = require('promise-toolbox/defer.js')
|
||||
const { dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped')
|
||||
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdSynthetic } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, stat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir')
|
||||
const { getTmpDir } = require('./_getTmpDir')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions')
|
||||
const { lvs, pvs } = require('./_lvm')
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
@@ -24,13 +26,10 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
|
||||
const { warn } = createLogger('xo:proxy:backups:RemoteAdapter')
|
||||
const { warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
|
||||
const isMetadataFile = filename => filename.endsWith('.json')
|
||||
const isVhdFile = filename => filename.endsWith('.vhd')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
@@ -67,8 +66,8 @@ const debounceResourceFactory = factory =>
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
exports.RemoteAdapter = class RemoteAdapter {
|
||||
constructor(handler, { debounceResource, dirMode }) {
|
||||
class RemoteAdapter {
|
||||
constructor(handler, { debounceResource = res => res, dirMode } = {}) {
|
||||
this._debounceResource = debounceResource
|
||||
this._dirMode = dirMode
|
||||
this._handler = handler
|
||||
@@ -78,48 +77,6 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
return this._handler
|
||||
}
|
||||
|
||||
async _deleteVhd(path) {
|
||||
const handler = this._handler
|
||||
const vhds = await asyncMapSettled(
|
||||
await handler.list(dirname(path), {
|
||||
filter: isVhdFile,
|
||||
prependDir: true,
|
||||
}),
|
||||
async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
path,
|
||||
}
|
||||
} catch (error) {
|
||||
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
|
||||
// they are probably inconsequent to the backup process and should not
|
||||
// fail it.
|
||||
warn(`BackupNg#_deleteVhd ${path}`, { error })
|
||||
}
|
||||
}
|
||||
)
|
||||
const base = basename(path)
|
||||
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
|
||||
if (child === undefined) {
|
||||
await handler.unlink(path)
|
||||
return 0
|
||||
}
|
||||
|
||||
try {
|
||||
const childPath = child.path
|
||||
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
|
||||
await handler.rename(path, childPath)
|
||||
return mergedDataSize
|
||||
} catch (error) {
|
||||
handler.unlink(path).catch(warn)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _findPartition(devicePath, partitionId) {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
const partition = partitions.find(_ => _.id === partitionId)
|
||||
@@ -204,7 +161,7 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
}
|
||||
|
||||
_listLvmLogicalVolumes(devicePath, partition, results = []) {
|
||||
return using(this._getLvmPhysicalVolume(devicePath, partition), async path => {
|
||||
return Disposable.use(this._getLvmPhysicalVolume(devicePath, partition), async path => {
|
||||
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], path)
|
||||
const partitionId = partition !== undefined ? partition.id : ''
|
||||
lvs.forEach((lv, i) => {
|
||||
@@ -235,7 +192,7 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
using(
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
@@ -254,16 +211,9 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
let mergedDataSize = 0
|
||||
await asyncMapSettled(backups, ({ _filename, vhds }) =>
|
||||
Promise.all([
|
||||
handler.unlink(_filename),
|
||||
asyncMap(Object.values(vhds), async _ => {
|
||||
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
|
||||
}),
|
||||
])
|
||||
)
|
||||
return mergedDataSize
|
||||
|
||||
// unused VHDs will be detected by `cleanVm`
|
||||
await asyncMapSettled(backups, ({ _filename }) => VhdAbstract.unlink(handler, _filename))
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(backupId) {
|
||||
@@ -362,6 +312,17 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
|
||||
}
|
||||
|
||||
// this function will be the one where we plug the logic of the storage format by fs type/user settings
|
||||
|
||||
// if the file is named .vhd => vhd
|
||||
// if the file is named alias.vhd => alias to a vhd
|
||||
getVhdFileName(baseName) {
|
||||
if (this._handler.type === 's3') {
|
||||
return `${baseName}.alias.vhd` // we want an alias to a vhddirectory
|
||||
}
|
||||
return `${baseName}.vhd`
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const handler = this._handler
|
||||
|
||||
@@ -375,7 +336,7 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
}
|
||||
|
||||
listPartitionFiles(diskId, partitionId, path) {
|
||||
return using(this.getPartition(diskId, partitionId), async rootPath => {
|
||||
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
|
||||
path = resolveSubpath(rootPath, path)
|
||||
|
||||
const entriesMap = {}
|
||||
@@ -395,7 +356,7 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
}
|
||||
|
||||
listPartitions(diskId) {
|
||||
return using(this.getDisk(diskId), async devicePath => {
|
||||
return Disposable.use(this.getDisk(diskId), async devicePath => {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
|
||||
if (partitions.length === 0) {
|
||||
@@ -506,22 +467,79 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
|
||||
const handler = this._handler
|
||||
input = await input
|
||||
const tmpPath = `${dirname(path)}/.${basename(path)}`
|
||||
const output = await handler.createOutputStream(tmpPath, {
|
||||
let dataPath = path
|
||||
|
||||
if (path.endsWith('.alias.vhd')) {
|
||||
await createVhdDirectoryFromStream(handler, `${dirname(path)}/data/${uuidv4()}.vhd`, input, {
|
||||
concurrency: 16,
|
||||
async validator() {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
})
|
||||
await VhdAbstract.createAlias(handler, path, dataPath)
|
||||
} else {
|
||||
await this.outputStream(dataPath, input, { checksum, validator })
|
||||
}
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
async validator() {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
})
|
||||
try {
|
||||
await Promise.all([fromCallback(pump, input, output), output.checksumWritten, input.task])
|
||||
await validator(tmpPath)
|
||||
await handler.rename(tmpPath, path, { checksum })
|
||||
} catch (error) {
|
||||
await handler.unlink(tmpPath, { checksum })
|
||||
throw error
|
||||
}
|
||||
|
||||
async _createSyntheticStream(handler, paths) {
|
||||
let disposableVhds = []
|
||||
|
||||
// if it's a path : open all hierarchy of parent
|
||||
if (typeof paths === 'string') {
|
||||
let vhd,
|
||||
vhdPath = paths
|
||||
do {
|
||||
const disposable = await openVhd(handler, vhdPath)
|
||||
vhd = disposable.value
|
||||
disposableVhds.push(disposable)
|
||||
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
|
||||
} while (vhd.footer.diskType !== Constants.DISK_TYPES.DYNAMIC)
|
||||
} else {
|
||||
// only open the list of path given
|
||||
disposableVhds = paths.map(path => openVhd(handler, path))
|
||||
}
|
||||
|
||||
// I don't want the vhds to be disposed on return
|
||||
// but only when the stream is done ( or failed )
|
||||
const disposables = await Disposable.all(disposableVhds)
|
||||
const vhds = disposables.value
|
||||
|
||||
let disposed = false
|
||||
const disposeOnce = async () => {
|
||||
if (!disposed) {
|
||||
disposed = true
|
||||
|
||||
try {
|
||||
await disposables.dispose()
|
||||
} catch (error) {
|
||||
warn('_createSyntheticStream: failed to dispose VHDs', { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const synthetic = new VhdSynthetic(vhds)
|
||||
await synthetic.readHeaderAndFooter()
|
||||
await synthetic.readBlockAllocationTable()
|
||||
const stream = await synthetic.stream()
|
||||
stream.on('end', disposeOnce)
|
||||
stream.on('close', disposeOnce)
|
||||
stream.on('error', disposeOnce)
|
||||
return stream
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata) {
|
||||
@@ -530,8 +548,8 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
const dir = dirname(metadata._filename)
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.entries(vdis), async ([id, vdi]) => {
|
||||
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
|
||||
await asyncMapSettled(Object.keys(vdis), async id => {
|
||||
streams[`${id}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[id]))
|
||||
})
|
||||
|
||||
return {
|
||||
@@ -552,3 +570,16 @@ exports.RemoteAdapter = class RemoteAdapter {
|
||||
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
|
||||
}
|
||||
}
|
||||
|
||||
Object.assign(RemoteAdapter.prototype, {
|
||||
cleanVm(vmDir, { lock = true } = {}) {
|
||||
if (lock) {
|
||||
return Disposable.use(this._handler.lock(vmDir), () => cleanVm.apply(this, arguments))
|
||||
} else {
|
||||
return cleanVm.apply(this, arguments)
|
||||
}
|
||||
},
|
||||
isValidXva,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup')
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
@@ -15,7 +15,7 @@ exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.createTask('Import pool metadata'),
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
const CancelToken = require('promise-toolbox/CancelToken.js')
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const { SyncThenable } = require('./_syncThenable')
|
||||
|
||||
const logAfterEnd = () => {
|
||||
throw new Error('task has already ended')
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// Create a serializable object from an error.
|
||||
//
|
||||
// Otherwise some fields might be non-enumerable and missing from logs.
|
||||
@@ -19,156 +20,132 @@ const serializeError = error =>
|
||||
stack: error.stack,
|
||||
}
|
||||
: error
|
||||
exports.serializeError = serializeError
|
||||
|
||||
class TaskLogger {
|
||||
constructor(logFn, parentId) {
|
||||
this._log = logFn
|
||||
this._parentId = parentId
|
||||
this._taskId = undefined
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
}
|
||||
|
||||
get taskId() {
|
||||
const taskId = this._taskId
|
||||
if (taskId === undefined) {
|
||||
throw new Error('start the task first')
|
||||
}
|
||||
return taskId
|
||||
static run(opts, fn) {
|
||||
return new this(opts).run(fn, true)
|
||||
}
|
||||
|
||||
// create a subtask
|
||||
fork() {
|
||||
return new TaskLogger(this._log, this.taskId)
|
||||
}
|
||||
|
||||
info(message, data) {
|
||||
return this._log({
|
||||
data,
|
||||
event: 'info',
|
||||
message,
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
run(message, data, fn) {
|
||||
if (arguments.length === 2) {
|
||||
fn = data
|
||||
data = undefined
|
||||
static wrapFn(opts, fn) {
|
||||
// compatibility with @decorateWith
|
||||
if (typeof fn !== 'function') {
|
||||
;[fn, opts] = [opts, fn]
|
||||
}
|
||||
|
||||
return SyncThenable.tryUnwrap(
|
||||
SyncThenable.fromFunction(() => {
|
||||
if (this._taskId !== undefined) {
|
||||
throw new Error('task has already started')
|
||||
}
|
||||
|
||||
this._taskId = Math.random().toString(36).slice(2)
|
||||
|
||||
return this._log({
|
||||
data,
|
||||
event: 'start',
|
||||
message,
|
||||
parentId: this._parentId,
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
})
|
||||
.then(fn)
|
||||
.then(
|
||||
result => {
|
||||
const log = this._log
|
||||
this._log = logAfterEnd
|
||||
return SyncThenable.resolve(
|
||||
log({
|
||||
event: 'end',
|
||||
result,
|
||||
status: 'success',
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
).then(() => result)
|
||||
},
|
||||
error => {
|
||||
const log = this._log
|
||||
this._log = logAfterEnd
|
||||
return SyncThenable.resolve(
|
||||
log({
|
||||
event: 'end',
|
||||
result: serializeError(error),
|
||||
status: 'failure',
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
).then(() => {
|
||||
throw error
|
||||
})
|
||||
}
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
warning(message, data) {
|
||||
return this._log({
|
||||
data,
|
||||
event: 'warning',
|
||||
message,
|
||||
taskId: this.taskId,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
|
||||
wrapFn(fn, message, data) {
|
||||
const logger = this
|
||||
return function () {
|
||||
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
|
||||
|
||||
return logger.run(evaluate(message), evaluate(data), () => fn.apply(this, arguments))
|
||||
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const $$task = Symbol('current task logger')
|
||||
#cancelToken
|
||||
#id = Math.random().toString(36).slice(2)
|
||||
#onLog
|
||||
#zone
|
||||
|
||||
const getCurrent = () => Zone.current.data[$$task]
|
||||
|
||||
const Task = {
|
||||
info(message, data) {
|
||||
const task = getCurrent()
|
||||
if (task !== undefined) {
|
||||
return task.info(message, data)
|
||||
}
|
||||
},
|
||||
|
||||
run({ name, data, onLog }, fn) {
|
||||
let parentId
|
||||
constructor({ name, data, onLog }) {
|
||||
let parentCancelToken, parentId
|
||||
if (onLog === undefined) {
|
||||
const parent = getCurrent()
|
||||
const parent = Zone.current.data[$$task]
|
||||
if (parent === undefined) {
|
||||
return fn()
|
||||
onLog = noop
|
||||
} else {
|
||||
onLog = log => parent.#onLog(log)
|
||||
parentCancelToken = parent.#cancelToken
|
||||
parentId = parent.#id
|
||||
}
|
||||
onLog = parent._log
|
||||
parentId = parent.taskId
|
||||
}
|
||||
|
||||
const task = new TaskLogger(onLog, parentId)
|
||||
const zone = Zone.current.fork('task')
|
||||
zone.data[$$task] = task
|
||||
return task.run(name, data, zone.wrap(fn))
|
||||
},
|
||||
const zone = Zone.current.fork('@xen-orchestra/backups/Task')
|
||||
zone.data[$$task] = this
|
||||
this.#zone = zone
|
||||
|
||||
const { cancel, token } = CancelToken.source(parentCancelToken && [parentCancelToken])
|
||||
this.#cancelToken = token
|
||||
this.cancel = cancel
|
||||
|
||||
this.#onLog = onLog
|
||||
|
||||
this.#log('start', {
|
||||
data,
|
||||
message: name,
|
||||
parentId,
|
||||
})
|
||||
}
|
||||
|
||||
failure(error) {
|
||||
this.#end('failure', serializeError(error))
|
||||
}
|
||||
|
||||
info(message, data) {
|
||||
this.#log('info', { data, message })
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a function in the context of this task
|
||||
*
|
||||
* In case of error, the task will be failed.
|
||||
*
|
||||
* @typedef Result
|
||||
* @param {() => Result)} fn
|
||||
* @param {boolean} last - Whether the task should succeed if there is no error
|
||||
* @returns Result
|
||||
*/
|
||||
run(fn, last = false) {
|
||||
return this.#zone.run(() => {
|
||||
try {
|
||||
const result = fn()
|
||||
let then
|
||||
if (result != null && typeof (then = result.then) === 'function') {
|
||||
then.call(result, last && (value => this.success(value)), error => this.failure(error))
|
||||
} else if (last) {
|
||||
this.success(result)
|
||||
}
|
||||
return result
|
||||
} catch (error) {
|
||||
this.failure(error)
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
success(value) {
|
||||
this.#end('success', value)
|
||||
}
|
||||
|
||||
warning(message, data) {
|
||||
const task = getCurrent()
|
||||
if (task !== undefined) {
|
||||
return task.warning(message, data)
|
||||
}
|
||||
},
|
||||
this.#log('warning', { data, message })
|
||||
}
|
||||
|
||||
wrapFn({ name, data, onLog }, fn) {
|
||||
wrapFn(fn, last) {
|
||||
const task = this
|
||||
return function () {
|
||||
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
|
||||
return Task.run({ name: evaluate(name), data: evaluate(data), onLog }, () => fn.apply(this, arguments))
|
||||
return task.run(() => fn.apply(this, arguments), last)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
#end(status, result) {
|
||||
this.#log('end', { result, status })
|
||||
this.#onLog = logAfterEnd
|
||||
}
|
||||
|
||||
#log(event, props) {
|
||||
this.#onLog({
|
||||
...props,
|
||||
event,
|
||||
taskId: this.#id,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe')
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { Task } = require('./Task')
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
@@ -21,7 +21,7 @@ exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
_exportPoolMetadata() {
|
||||
const xapi = this._pool.$xapi
|
||||
return xapi.getResource(PATH_DB_DUMP, {
|
||||
task: xapi.createTask('Export pool metadata'),
|
||||
task: xapi.task_create('Export pool metadata'),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,22 +1,31 @@
|
||||
const findLast = require('lodash/findLast')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const keyBy = require('lodash/keyBy')
|
||||
const mapValues = require('lodash/mapValues')
|
||||
const assert = require('assert')
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { ContinuousReplicationWriter } = require('./_ContinuousReplicationWriter')
|
||||
const { DeltaBackupWriter } = require('./_DeltaBackupWriter')
|
||||
const { DisasterRecoveryWriter } = require('./_DisasterRecoveryWriter')
|
||||
const { exportDeltaVm } = require('./_deltaVm')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe')
|
||||
const { FullBackupWriter } = require('./_FullBackupWriter')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { Task } = require('./Task')
|
||||
const { watchStreamSize } = require('./_watchStreamSize')
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
const { exportDeltaVm } = require('./_deltaVm.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { FullBackupWriter } = require('./writers/FullBackupWriter.js')
|
||||
const { FullReplicationWriter } = require('./writers/FullReplicationWriter.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:proxy:backups:VmBackup')
|
||||
const { debug, warn } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
@@ -62,12 +71,12 @@ exports.VmBackup = class VmBackup {
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = []
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, ContinuousReplicationWriter]
|
||||
: [FullBackupWriter, DisasterRecoveryWriter]
|
||||
? [DeltaBackupWriter, DeltaReplicationWriter]
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
|
||||
@@ -77,7 +86,7 @@ exports.VmBackup = class VmBackup {
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.push(new BackupWriter(this, remoteId, targetSettings))
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
@@ -86,12 +95,43 @@ exports.VmBackup = class VmBackup {
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.push(new ReplicationWriter(this, sr, targetSettings))
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, warnMessage, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
try {
|
||||
await fn(writer)
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
throw error
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await fn(writer)
|
||||
} catch (error) {
|
||||
this.delete(writer)
|
||||
warn(warnMessage, { error, writer: writer.constructor.name })
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new Error('all targets have failed, step: ' + warnMessage)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
@@ -114,16 +154,17 @@ exports.VmBackup = class VmBackup {
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot = this._isDelta || vm.power_state === 'Running' || settings.snapshotRetention !== 0
|
||||
const doSnapshot =
|
||||
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot'](
|
||||
this._getSnapshotNameLabel(vm)
|
||||
)
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
@@ -146,29 +187,28 @@ exports.VmBackup = class VmBackup {
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired: this._fullVdisRequired,
|
||||
fullVdisRequired,
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, watchStreamSize)
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await asyncMap(this._writers, async writer => {
|
||||
try {
|
||||
await writer.run({
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
})
|
||||
} catch (error) {
|
||||
warn('copy failure', {
|
||||
error,
|
||||
target: writer.target,
|
||||
vm: this.vm,
|
||||
})
|
||||
}
|
||||
})
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
@@ -192,6 +232,8 @@ exports.VmBackup = class VmBackup {
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
@@ -204,21 +246,15 @@ exports.VmBackup = class VmBackup {
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await asyncMap(this._writers, async writer => {
|
||||
try {
|
||||
await writer.run({
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
})
|
||||
} catch (error) {
|
||||
warn('copy failure', {
|
||||
error,
|
||||
target: writer.target,
|
||||
vm: this.vm,
|
||||
})
|
||||
}
|
||||
})
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
@@ -249,17 +285,28 @@ exports.VmBackup = class VmBackup {
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
|
||||
|
||||
const { scheduleId } = this
|
||||
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
|
||||
|
||||
const jobSettings = this.job.settings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
const { config } = this
|
||||
const baseSettings = {
|
||||
...config.defaultSettings,
|
||||
...config.metadata.defaultSettings,
|
||||
...jobSettings[''],
|
||||
}
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...jobSettings[scheduleId],
|
||||
...jobSettings[this.vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -268,12 +315,14 @@ exports.VmBackup = class VmBackup {
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
@@ -288,22 +337,32 @@ exports.VmBackup = class VmBackup {
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
|
||||
} else {
|
||||
debug('no base VDI found', {
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
const writers = this._writers
|
||||
for (let i = 0, n = writers.length; presentBaseVdis.size !== 0 && i < n; ++i) {
|
||||
await writers[i].checkBaseVdis(presentBaseVdis, baseVm)
|
||||
}
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
return
|
||||
}
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (!presentBaseVdis.has(baseUuid)) {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
@@ -312,7 +371,19 @@ exports.VmBackup = class VmBackup {
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
async run() {
|
||||
run = defer(this.run)
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(() => writer.afterBackup())
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
@@ -322,7 +393,7 @@ exports.VmBackup = class VmBackup {
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { _settings: settings, vm } = this
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
@@ -335,7 +406,7 @@ exports.VmBackup = class VmBackup {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.length !== 0) {
|
||||
if (this._writers.size !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter')
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { Task } = require('./Task')
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
|
||||
4
@xen-orchestra/backups/_backupType.js
Normal file
4
@xen-orchestra/backups/_backupType.js
Normal file
@@ -0,0 +1,4 @@
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.cheksum')
|
||||
155
@xen-orchestra/backups/_backupWorker.js
Normal file
155
@xen-orchestra/backups/_backupWorker.js
Normal file
@@ -0,0 +1,155 @@
|
||||
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
|
||||
require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
)
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
class BackupWorker {
|
||||
#config
|
||||
#job
|
||||
#recordToXapi
|
||||
#remoteOptions
|
||||
#remotes
|
||||
#schedule
|
||||
#xapiOptions
|
||||
#xapis
|
||||
|
||||
constructor({ config, job, recordToXapi, remoteOptions, remotes, resourceCacheDelay, schedule, xapiOptions, xapis }) {
|
||||
this.#config = config
|
||||
this.#job = job
|
||||
this.#recordToXapi = recordToXapi
|
||||
this.#remoteOptions = remoteOptions
|
||||
this.#remotes = remotes
|
||||
this.#schedule = schedule
|
||||
this.#xapiOptions = xapiOptions
|
||||
this.#xapis = xapis
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
debounceResource.defaultDelay = parseDuration(resourceCacheDelay)
|
||||
this.debounceResource = debounceResource
|
||||
}
|
||||
|
||||
run() {
|
||||
return new Backup({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
const xapiId = this.#recordToXapi[uuid]
|
||||
if (xapiId === undefined) {
|
||||
throw new Error('no XAPI associated to ' + uuid)
|
||||
}
|
||||
|
||||
const xapi = yield this.getXapi(this.#xapis[xapiId])
|
||||
return xapi.getRecordByUuid(type, uuid)
|
||||
}).bind(this),
|
||||
job: this.#job,
|
||||
schedule: this.#schedule,
|
||||
}).run()
|
||||
}
|
||||
|
||||
getAdapter = Disposable.factory(this.getAdapter)
|
||||
getAdapter = deduped(this.getAdapter, remote => [remote.url])
|
||||
getAdapter = compose(this.getAdapter, function (resource) {
|
||||
return this.debounceResource(resource)
|
||||
})
|
||||
async *getAdapter(remote) {
|
||||
const handler = getHandler(remote, this.#remoteOptions)
|
||||
await handler.sync()
|
||||
try {
|
||||
yield new RemoteAdapter(handler, {
|
||||
debounceResource: this.debounceResource,
|
||||
dirMode: this.#config.dirMode,
|
||||
})
|
||||
} finally {
|
||||
await handler.forget()
|
||||
}
|
||||
}
|
||||
|
||||
getXapi = Disposable.factory(this.getXapi)
|
||||
getXapi = deduped(this.getXapi, ({ url }) => [url])
|
||||
getXapi = compose(this.getXapi, function (resource) {
|
||||
return this.debounceResource(resource)
|
||||
})
|
||||
async *getXapi({ credentials: { username: user, password }, ...opts }) {
|
||||
const xapi = new Xapi({
|
||||
...this.#xapiOptions,
|
||||
...opts,
|
||||
auth: {
|
||||
user,
|
||||
password,
|
||||
},
|
||||
})
|
||||
|
||||
await xapi.connect()
|
||||
try {
|
||||
await xapi.objectsFetched
|
||||
|
||||
yield xapi
|
||||
} finally {
|
||||
await xapi.disconnect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Received message:
|
||||
//
|
||||
// Message {
|
||||
// action: 'run'
|
||||
// data: object
|
||||
// runWithLogs: boolean
|
||||
// }
|
||||
//
|
||||
// Sent message:
|
||||
//
|
||||
// Message {
|
||||
// type: 'log' | 'result'
|
||||
// data?: object
|
||||
// status?: 'success' | 'failure'
|
||||
// result?: any
|
||||
// }
|
||||
process.on('message', async message => {
|
||||
if (message.action === 'run') {
|
||||
const backupWorker = new BackupWorker(message.data)
|
||||
try {
|
||||
const result = message.runWithLogs
|
||||
? await Task.run(
|
||||
{
|
||||
name: 'backup run',
|
||||
onLog: data =>
|
||||
process.send({
|
||||
data,
|
||||
type: 'log',
|
||||
}),
|
||||
},
|
||||
() => backupWorker.run()
|
||||
)
|
||||
: await backupWorker.run()
|
||||
|
||||
process.send({
|
||||
type: 'result',
|
||||
result,
|
||||
status: 'success',
|
||||
})
|
||||
} catch (error) {
|
||||
process.send({
|
||||
type: 'result',
|
||||
result: error,
|
||||
status: 'failure',
|
||||
})
|
||||
} finally {
|
||||
await ignoreErrors.call(backupWorker.debounceResource.flushAll())
|
||||
process.disconnect()
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -1,5 +1,5 @@
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const cancelable = require('promise-toolbox/cancelable.js')
|
||||
const CancelToken = require('promise-toolbox/CancelToken.js')
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
const Vhd = require('vhd-lib').default
|
||||
|
||||
exports.checkVhd = async function checkVhd(handler, path) {
|
||||
await new Vhd(handler, path).readHeaderAndFooter()
|
||||
}
|
||||
390
@xen-orchestra/backups/_cleanVm.integ.spec.js
Normal file
390
@xen-orchestra/backups/_cleanVm.integ.spec.js
Normal file
@@ -0,0 +1,390 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const crypto = require('crypto')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath
|
||||
|
||||
jest.setTimeout(60000)
|
||||
|
||||
beforeEach(async () => {
|
||||
tempDir = await pFromCallback(cb => tmp.dir(cb))
|
||||
handler = getHandler({ url: `file://${tempDir}` })
|
||||
await handler.sync()
|
||||
adapter = new RemoteAdapter(handler)
|
||||
jobId = uniqueId()
|
||||
vdiId = uniqueId()
|
||||
basePath = `vdis/${jobId}/${vdiId}`
|
||||
await fs.mkdirp(`${tempDir}/${basePath}`)
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
await handler.forget()
|
||||
})
|
||||
|
||||
const uniqueId = () => crypto.randomBytes(16).toString('hex')
|
||||
|
||||
async function generateVhd(path, opts = {}) {
|
||||
let vhd
|
||||
|
||||
const dataPath = opts.useAlias ? path + '.data' : path
|
||||
if (opts.mode === 'directory') {
|
||||
await handler.mkdir(dataPath)
|
||||
vhd = new VhdDirectory(handler, dataPath)
|
||||
} else {
|
||||
const fd = await handler.openFile(dataPath, 'wx')
|
||||
vhd = new VhdFile(handler, fd)
|
||||
}
|
||||
|
||||
vhd.header = { ...VHDHEADER, ...opts.header }
|
||||
vhd.footer = { ...VHDFOOTER, ...opts.footer }
|
||||
vhd.footer.uuid = Buffer.from(crypto.randomBytes(16))
|
||||
|
||||
if (vhd.header.parentUnicodeName) {
|
||||
vhd.footer.diskType = Constants.DISK_TYPES.DIFFERENCING
|
||||
} else {
|
||||
vhd.footer.diskType = Constants.DISK_TYPES.DYNAMIC
|
||||
}
|
||||
|
||||
if (opts.useAlias === true) {
|
||||
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
|
||||
}
|
||||
|
||||
await vhd.writeBlockAllocationTable()
|
||||
await vhd.writeHeader()
|
||||
await vhd.writeFooter()
|
||||
return vhd
|
||||
}
|
||||
|
||||
test('It remove broken vhd', async () => {
|
||||
// todo also tests a directory and an alias
|
||||
|
||||
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
loggued += message
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: false, onLog })
|
||||
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
|
||||
// not removed
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
// really remove it
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
expect((await handler.list(basePath)).length).toEqual(0)
|
||||
})
|
||||
|
||||
test('it remove vhd with missing or multiple ancestors', async () => {
|
||||
// one with a broken parent
|
||||
await generateVhd(`${basePath}/abandonned.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'gone.vhd',
|
||||
parentUid: Buffer.from(crypto.randomBytes(16)),
|
||||
},
|
||||
})
|
||||
|
||||
// one orphan, which is a full vhd, no parent
|
||||
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
|
||||
// a child to the orphan
|
||||
await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
// clean
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
|
||||
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
|
||||
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
|
||||
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
|
||||
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
|
||||
|
||||
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
|
||||
})
|
||||
|
||||
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
|
||||
// create a metadata file marking child and orphan as ok
|
||||
await handler.writeFile(
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
vhds: [
|
||||
`${basePath}/orphan.vhd`,
|
||||
`${basePath}/child.vhd`,
|
||||
// abandonned.json is not here
|
||||
],
|
||||
})
|
||||
)
|
||||
|
||||
await generateVhd(`${basePath}/abandonned.vhd`)
|
||||
|
||||
// one orphan, which is a full vhd, no parent
|
||||
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
|
||||
|
||||
// a child to the orphan
|
||||
await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
let matched = loggued.match(/deleting unused VHD /g) || []
|
||||
expect(matched.length).toEqual(1) // only one vhd should have been deleted
|
||||
matched = loggued.match(/abandonned.vhd is unused/g) || []
|
||||
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
|
||||
|
||||
// a missing vhd cause clean to remove all vhds
|
||||
await handler.writeFile(
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
vhds: [
|
||||
`${basePath}/deleted.vhd`, // in metadata but not in vhds
|
||||
`${basePath}/orphan.vhd`,
|
||||
`${basePath}/child.vhd`,
|
||||
// abandonned.json is not here
|
||||
],
|
||||
}),
|
||||
{ flags: 'w' }
|
||||
)
|
||||
loggued = ''
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
matched = loggued.match(/deleting unused VHD /g) || []
|
||||
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
|
||||
})
|
||||
|
||||
test('it merges delta of non destroyed chain', async () => {
|
||||
await handler.writeFile(
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
size: 209920,
|
||||
vhds: [
|
||||
`${basePath}/grandchild.vhd`, // grand child should not be merged
|
||||
`${basePath}/child.vhd`,
|
||||
// orphan is not here, he should be merged in child
|
||||
],
|
||||
})
|
||||
)
|
||||
|
||||
// one orphan, which is a full vhd, no parent
|
||||
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
|
||||
// a child to the orphan
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
// a grand child
|
||||
await generateVhd(`${basePath}/grandchild.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'child.vhd',
|
||||
parentUid: child.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
expect(loggued).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused\n`)
|
||||
loggued = ''
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
|
||||
const [unused, merging] = loggued.split('\n')
|
||||
expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
|
||||
expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`)
|
||||
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
|
||||
// only check deletion
|
||||
const remainingVhds = await handler.list(basePath)
|
||||
expect(remainingVhds.length).toEqual(2)
|
||||
expect(remainingVhds.includes('child.vhd')).toEqual(true)
|
||||
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
|
||||
})
|
||||
|
||||
test('it finish unterminated merge ', async () => {
|
||||
await handler.writeFile(
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
size: 209920,
|
||||
vhds: [
|
||||
`${basePath}/orphan.vhd`, // grand child should not be merged
|
||||
`${basePath}/child.vhd`,
|
||||
// orphan is not here, he should be merged in child
|
||||
],
|
||||
})
|
||||
)
|
||||
|
||||
// one orphan, which is a full vhd, no parent
|
||||
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
|
||||
// a child to the orphan
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
// a merge in progress file
|
||||
await handler.writeFile(
|
||||
`${basePath}/.orphan.vhd.merge.json`,
|
||||
JSON.stringify({
|
||||
parent: {
|
||||
header: orphan.header.checksum,
|
||||
},
|
||||
child: {
|
||||
header: child.header.checksum,
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
// a unfinished merging
|
||||
await adapter.cleanVm('/', { remove: true, merge: true })
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
|
||||
// only check deletion
|
||||
const remainingVhds = await handler.list(basePath)
|
||||
expect(remainingVhds.length).toEqual(1)
|
||||
expect(remainingVhds.includes('child.vhd')).toEqual(true)
|
||||
})
|
||||
|
||||
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
|
||||
// the message an resulting files should be identical to the output with vhd files which is tested independantly
|
||||
|
||||
describe('tests mulitple combination ', () => {
|
||||
for (const useAlias of [true, false]) {
|
||||
for (const vhdMode of ['file', 'directory']) {
|
||||
test(`alias : ${useAlias}, mode: ${vhdMode}`, async () => {
|
||||
// a broken VHD
|
||||
const brokenVhdDataPath = basePath + useAlias ? 'broken.data' : 'broken.vhd'
|
||||
if (vhdMode === 'directory') {
|
||||
await handler.mkdir(brokenVhdDataPath)
|
||||
} else {
|
||||
await handler.writeFile(brokenVhdDataPath, 'notreallyavhd')
|
||||
}
|
||||
if (useAlias) {
|
||||
await VhdAbstract.createAlias(handler, 'broken.alias.vhd', brokenVhdDataPath)
|
||||
}
|
||||
|
||||
// a vhd non referenced in metada
|
||||
await generateVhd(`${basePath}/nonreference.vhd`, { useAlias, mode: vhdMode })
|
||||
// an abandonded delta vhd without its parent
|
||||
await generateVhd(`${basePath}/abandonned.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'gone.vhd',
|
||||
parentUid: crypto.randomBytes(16),
|
||||
},
|
||||
})
|
||||
// an ancestor of a vhd present in metadata
|
||||
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
})
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUid: ancestor.footer.uuid,
|
||||
},
|
||||
})
|
||||
// a grand child vhd in metadata
|
||||
await generateVhd(`${basePath}/grandchild.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUid: child.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
// an older parent that was merging in clean
|
||||
const cleanAncestor = await generateVhd(`${basePath}/cleanAncestor.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
})
|
||||
// a clean vhd in metadata
|
||||
const clean = await generateVhd(`${basePath}/clean.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'cleanAncestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUid: cleanAncestor.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
await handler.writeFile(
|
||||
`${basePath}/.cleanAncestor.vhd${useAlias ? '.alias.vhd' : ''}.merge.json`,
|
||||
JSON.stringify({
|
||||
parent: {
|
||||
header: cleanAncestor.header.checksum,
|
||||
},
|
||||
child: {
|
||||
header: clean.header.checksum,
|
||||
},
|
||||
})
|
||||
)
|
||||
|
||||
// the metadata file
|
||||
await handler.writeFile(
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
vhds: [
|
||||
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
|
||||
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
|
||||
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
|
||||
],
|
||||
})
|
||||
)
|
||||
await adapter.cleanVm('/', { remove: true, merge: true })
|
||||
|
||||
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
|
||||
// ancestor and child should be merged
|
||||
// grand child and clean vhd should not have changed
|
||||
const survivors = await handler.list(basePath)
|
||||
// console.log(survivors)
|
||||
if (useAlias) {
|
||||
// the goal of the alias : do not move a full folder
|
||||
expect(survivors).toContain('ancestor.vhd.data')
|
||||
expect(survivors).toContain('grandchild.vhd.data')
|
||||
expect(survivors).toContain('cleanAncestor.vhd.data')
|
||||
expect(survivors).toContain('clean.vhd.alias.vhd')
|
||||
expect(survivors).toContain('child.vhd.alias.vhd')
|
||||
expect(survivors).toContain('grandchild.vhd.alias.vhd')
|
||||
expect(survivors.length).toEqual(6)
|
||||
} else {
|
||||
expect(survivors).toContain('clean.vhd')
|
||||
expect(survivors).toContain('child.vhd')
|
||||
expect(survivors).toContain('grandchild.vhd')
|
||||
expect(survivors.length).toEqual(3)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
392
@xen-orchestra/backups/_cleanVm.js
Normal file
392
@xen-orchestra/backups/_cleanVm.js
Normal file
@@ -0,0 +1,392 @@
|
||||
const assert = require('assert')
|
||||
const sum = require('lodash/sum')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
let child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
onLog(`the parent ${parent} of the child ${child} is unused`)
|
||||
})
|
||||
|
||||
if (merge) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
if (children.length !== 1) {
|
||||
// TODO: implement merging multiple children
|
||||
children.length = 1
|
||||
child = children[0]
|
||||
}
|
||||
|
||||
onLog(`merging ${child} into ${parent}`)
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
onLog(`merging ${child}: ${done}/${total}`)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
const mergedSize = await mergeVhd(
|
||||
handler,
|
||||
parent,
|
||||
handler,
|
||||
child,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children),
|
||||
{
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
clearInterval(handle)
|
||||
|
||||
await Promise.all([
|
||||
VhdAbstract.rename(handler, parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
onLog(`the VHD ${child} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused VHD ${child}`)
|
||||
return VhdAbstract.unlink(handler, child)
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
return mergedSize
|
||||
}
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const INTERRUPTED_VHDS_REG = /^(?:(.+)\/)?\.(.+)\.merge.json$/
|
||||
const listVhds = async (handler, vmDir) => {
|
||||
const vhds = []
|
||||
const interruptedVhds = new Set()
|
||||
|
||||
await asyncMap(
|
||||
await handler.list(`${vmDir}/vdis`, {
|
||||
ignoreMissing: true,
|
||||
prependDir: true,
|
||||
}),
|
||||
async jobDir =>
|
||||
asyncMap(
|
||||
await handler.list(jobDir, {
|
||||
prependDir: true,
|
||||
}),
|
||||
async vdiDir => {
|
||||
const list = await handler.list(vdiDir, {
|
||||
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
|
||||
prependDir: true,
|
||||
})
|
||||
|
||||
list.forEach(file => {
|
||||
const res = INTERRUPTED_VHDS_REG.exec(file)
|
||||
if (res === null) {
|
||||
vhds.push(file)
|
||||
} else {
|
||||
const [, dir, file] = res
|
||||
interruptedVhds.add(`${dir}/${file}`)
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
return { vhds, interruptedVhds }
|
||||
}
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
exports.cleanVm = async function cleanVm(
|
||||
vmDir,
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
|
||||
) {
|
||||
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
|
||||
|
||||
const handler = this._handler
|
||||
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
const vhdsList = await listVhds(handler, vmDir)
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(vhdsList.vhds, async path => {
|
||||
try {
|
||||
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !vhdsList.interruptedVhds.has(path) }), vhd => {
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPES.DIFFERENCING) {
|
||||
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error('this script does not support multiple VHD children')
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
onLog(`error while checking the VHD with path ${path}`, { error })
|
||||
if (error?.code === 'ERR_ASSERTION' && remove) {
|
||||
onLog(`deleting broken ${path}`)
|
||||
return VhdAbstract.unlink(handler, path)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// @todo : add check for data folder of alias not referenced in a valid alias
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhdPath => {
|
||||
const parent = vhdParents[vhdPath]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhdPath]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhdPath)
|
||||
|
||||
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
|
||||
if (remove) {
|
||||
onLog(`deleting orphan VHD ${vhdPath}`)
|
||||
deletions.push(VhdAbstract.unlink(handler, vhdPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const jsons = []
|
||||
const xvas = new Set()
|
||||
const xvaSums = []
|
||||
const entries = await handler.list(vmDir, {
|
||||
prependDir: true,
|
||||
})
|
||||
entries.forEach(path => {
|
||||
if (isMetadataFile(path)) {
|
||||
jsons.push(path)
|
||||
} else if (isXvaFile(path)) {
|
||||
xvas.add(path)
|
||||
} else if (isXvaSumFile(path)) {
|
||||
xvaSums.push(path)
|
||||
}
|
||||
})
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await this.isValidXva(path))) {
|
||||
onLog(`the XVA with path ${path} is potentially broken`)
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await handler.readFile(json))
|
||||
const { mode } = metadata
|
||||
let size
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve('/', vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
|
||||
size = await handler.getSize(linkedXva).catch(error => {
|
||||
onLog(`failed to get size of ${json}`, { error })
|
||||
})
|
||||
} else {
|
||||
onLog(`the XVA linked to the metadata ${json} is missing`)
|
||||
if (remove) {
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
})()
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
// we only check size of all the vhd are VhdFiles
|
||||
|
||||
const shouldComputeSize = linkedVhds.every(vhd => vhd instanceof VhdFile)
|
||||
if (shouldComputeSize) {
|
||||
try {
|
||||
await Disposable.use(Disposable.all(linkedVhds.map(vhdPath => openVhd(handler, vhdPath))), async vhds => {
|
||||
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
|
||||
size = sum(sizes)
|
||||
})
|
||||
} catch (error) {
|
||||
onLog(`failed to get size of ${json}`, { error })
|
||||
}
|
||||
}
|
||||
} else {
|
||||
onLog(`Some VHDs linked to the metadata ${json} are missing`)
|
||||
if (remove) {
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const metadataSize = metadata.size
|
||||
if (size !== undefined && metadataSize !== size) {
|
||||
onLog(`incorrect size in metadata: ${metadataSize ?? 'none'} instead of ${size}`)
|
||||
|
||||
// don't update if the the stored size is greater than found files,
|
||||
// it can indicates a problem
|
||||
if (fixMetadata && (metadataSize === undefined || metadataSize < size)) {
|
||||
try {
|
||||
metadata.size = size
|
||||
await handler.writeFile(json, JSON.stringify(metadata), { flags: 'w' })
|
||||
} catch (error) {
|
||||
onLog(`failed to update size in backup metadata ${json}`, { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
const toMerge = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
onLog(`the VHD ${vhd} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused VHD ${vhd}`)
|
||||
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
|
||||
}
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
// merge interrupted VHDs
|
||||
vhdsList.interruptedVhds.forEach(parent => {
|
||||
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
|
||||
})
|
||||
|
||||
Object.values(vhdChainsToMerge).forEach(chain => {
|
||||
if (chain !== undefined) {
|
||||
toMerge.push(chain)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const doMerge = () => {
|
||||
const promise = asyncMap(toMerge, async chain => limitedMergeVhdChain(chain, { handler, onLog, remove, merge }))
|
||||
return merge ? promise.then(sizes => ({ size: sum(sizes) })) : promise
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
...unusedVhdsDeletion,
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
||||
asyncMap(unusedXvas, path => {
|
||||
onLog(`the XVA ${path} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused XVA ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}),
|
||||
asyncMap(xvaSums, path => {
|
||||
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
|
||||
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
|
||||
onLog(`the XVA checksum ${path} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused XVA checksum ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
return {
|
||||
// boolean whether some VHDs were merged (or should be merged)
|
||||
merge: toMerge.length !== 0,
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,14 @@
|
||||
const compareVersions = require('compare-versions')
|
||||
const defer = require('golike-defer').default
|
||||
const find = require('lodash/find')
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const omit = require('lodash/omit')
|
||||
const find = require('lodash/find.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const omit = require('lodash/omit.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
const { defer } = require('golike-defer')
|
||||
|
||||
const { cancelableMap } = require('./_cancelableMap')
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
@@ -143,7 +143,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
$defer,
|
||||
deltaVm,
|
||||
sr,
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {} } = {}
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
|
||||
) {
|
||||
const { version } = deltaVm
|
||||
if (compareVersions(version, '1.0.0') < 0) {
|
||||
@@ -202,6 +202,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
blocked_operations: {
|
||||
...vmRecord.blocked_operations,
|
||||
start: 'Importing…',
|
||||
start_on: 'Importing…',
|
||||
},
|
||||
ha_always_run: false,
|
||||
is_a_template: false,
|
||||
@@ -213,6 +214,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
},
|
||||
{
|
||||
bios_strings: vmRecord.bios_strings,
|
||||
generateMacSeed: newMacAddresses,
|
||||
suspend_VDI: suspendVdi?.$ref,
|
||||
}
|
||||
)
|
||||
@@ -304,9 +306,6 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
}
|
||||
}),
|
||||
|
||||
// Wait for VDI export tasks (if any) termination.
|
||||
Promise.all(Object.values(streams).map(stream => stream.task)),
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(Object.values(deltaVm.vifs), vif => {
|
||||
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)
|
||||
@@ -325,11 +324,16 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
}
|
||||
|
||||
if (network) {
|
||||
return xapi.VIF_create({
|
||||
...vif,
|
||||
network: network.$ref,
|
||||
VM: vmRef,
|
||||
})
|
||||
return xapi.VIF_create(
|
||||
{
|
||||
...vif,
|
||||
network: network.$ref,
|
||||
VM: vmRef,
|
||||
},
|
||||
{
|
||||
MAC: newMacAddresses ? undefined : vif.MAC,
|
||||
}
|
||||
)
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
@@ -10,7 +10,7 @@ exports.getTmpDir = async function getTmpDir() {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
await mkdir(path)
|
||||
return new Disposable(path, () => rmdir(path))
|
||||
return new Disposable(() => rmdir(path), path)
|
||||
} catch (error) {
|
||||
if (i === MAX_ATTEMPTS) {
|
||||
throw error
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
const assert = require('assert')
|
||||
const fs = require('fs-extra')
|
||||
|
||||
const isGzipFile = async fd => {
|
||||
const isGzipFile = async (handler, fd) => {
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
const magicNumber = Buffer.allocUnsafe(2)
|
||||
assert.strictEqual((await fs.read(fd, magicNumber, 0, magicNumber.length, 0)).bytesRead, magicNumber.length)
|
||||
|
||||
assert.strictEqual((await handler.read(fd, magicNumber, 0)).bytesRead, magicNumber.length)
|
||||
return magicNumber[0] === 31 && magicNumber[1] === 139
|
||||
}
|
||||
|
||||
@@ -21,32 +21,33 @@ const isGzipFile = async fd => {
|
||||
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
|
||||
//
|
||||
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
|
||||
const isValidTar = async (size, fd) => {
|
||||
const isValidTar = async (handler, size, fd) => {
|
||||
if (size <= 1024 || size % 512 !== 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual((await fs.read(fd, buf, 0, buf.length, size - buf.length)).bytesRead, buf.length)
|
||||
assert.strictEqual((await handler.read(fd, buf, size - buf.length)).bytesRead, buf.length)
|
||||
return buf.every(_ => _ === 0)
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
const isValidXva = async path => {
|
||||
async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
try {
|
||||
const fd = await fs.open(path, 'r')
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
try {
|
||||
const { size } = await fs.fstat(fd)
|
||||
const size = await handler.getSize(fd)
|
||||
if (size < 20) {
|
||||
// neither a valid gzip not tar
|
||||
return false
|
||||
}
|
||||
|
||||
return (await isGzipFile(fd))
|
||||
return (await isGzipFile(handler, fd))
|
||||
? true // gzip files cannot be validated at this time
|
||||
: await isValidTar(size, fd)
|
||||
: await isValidTar(handler, size, fd)
|
||||
} finally {
|
||||
fs.close(fd).catch(noop)
|
||||
handler.closeFile(fd).catch(noop)
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
@@ -1,9 +1,9 @@
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromCallback = require('promise-toolbox/fromCallback.js')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
const { debug } = createLogger('xo:proxy:api')
|
||||
const { debug } = createLogger('xo:backups:listPartitions')
|
||||
|
||||
const IGNORED_PARTITION_TYPES = {
|
||||
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromCallback = require('promise-toolbox/fromCallback.js')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
@@ -7,23 +7,25 @@ const { execFile } = require('child_process')
|
||||
const parse = createParser({
|
||||
keyTransform: key => key.slice(5).toLowerCase(),
|
||||
})
|
||||
const makeFunction = command => async (fields, ...args) => {
|
||||
const info = await fromCallback(execFile, command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
'--unbuffered',
|
||||
'--units',
|
||||
'b',
|
||||
'-o',
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
return info
|
||||
.trim()
|
||||
.split(/\r?\n/)
|
||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
const makeFunction =
|
||||
command =>
|
||||
async (fields, ...args) => {
|
||||
const info = await fromCallback(execFile, command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
'--unbuffered',
|
||||
'--units',
|
||||
'b',
|
||||
'-o',
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
return info
|
||||
.trim()
|
||||
.split(/\r?\n/)
|
||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
exports.lvs = makeFunction('lvs')
|
||||
exports.pvs = makeFunction('pvs')
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
function fulfilledThen(cb) {
|
||||
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
|
||||
}
|
||||
|
||||
function rejectedThen(_, cb) {
|
||||
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
|
||||
}
|
||||
|
||||
class SyncThenable {
|
||||
static resolve(value) {
|
||||
if (value != null && typeof value.then === 'function') {
|
||||
return value
|
||||
}
|
||||
|
||||
return new this(false, value)
|
||||
}
|
||||
|
||||
static fromFunction(fn, ...arg) {
|
||||
try {
|
||||
return this.resolve(fn(...arg))
|
||||
} catch (error) {
|
||||
return this.reject(error)
|
||||
}
|
||||
}
|
||||
|
||||
static reject(reason) {
|
||||
return new this(true, reason)
|
||||
}
|
||||
|
||||
// unwrap if it's a SyncThenable
|
||||
static tryUnwrap(value) {
|
||||
if (value instanceof this) {
|
||||
if (value.then === rejectedThen) {
|
||||
throw value.value
|
||||
}
|
||||
return value.value
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
constructor(rejected, value) {
|
||||
this.then = rejected ? rejectedThen : fulfilledThen
|
||||
this.value = value
|
||||
}
|
||||
}
|
||||
exports.SyncThenable = SyncThenable
|
||||
@@ -1,5 +1,4 @@
|
||||
exports.watchStreamSize = function watchStreamSize(stream) {
|
||||
const container = { size: 0 }
|
||||
exports.watchStreamSize = function watchStreamSize(stream, container = { size: 0 }) {
|
||||
stream.on('data', data => {
|
||||
container.size += data.length
|
||||
})
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const mapValues = require('lodash/mapValues')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { dirname } = require('path')
|
||||
|
||||
function formatVmBackup(backup) {
|
||||
|
||||
69
@xen-orchestra/backups/merge-worker/cli.js
Executable file
69
@xen-orchestra/backups/merge-worker/cli.js
Executable file
@@ -0,0 +1,69 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const { catchGlobalErrors } = require('@xen-orchestra/log/configure.js')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { getSyncedHandler } = require('@xen-orchestra/fs')
|
||||
const { join } = require('path')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const min = require('lodash/min')
|
||||
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const { RemoteAdapter } = require('../RemoteAdapter.js')
|
||||
|
||||
const { CLEAN_VM_QUEUE } = require('./index.js')
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
catchGlobalErrors(createLogger('xo:backups:mergeWorker'))
|
||||
|
||||
const { fatal, info, warn } = createLogger('xo:backups:mergeWorker')
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
const main = Disposable.wrap(async function* main(args) {
|
||||
const handler = yield getSyncedHandler({ url: 'file://' + process.cwd() })
|
||||
|
||||
yield handler.lock(CLEAN_VM_QUEUE)
|
||||
|
||||
const adapter = new RemoteAdapter(handler)
|
||||
|
||||
const listRetry = async () => {
|
||||
const timeoutResolver = resolve => setTimeout(resolve, 10e3)
|
||||
for (let i = 0; i < 10; ++i) {
|
||||
const entries = await handler.list(CLEAN_VM_QUEUE)
|
||||
if (entries.length !== 0) {
|
||||
return entries
|
||||
}
|
||||
await new Promise(timeoutResolver)
|
||||
}
|
||||
}
|
||||
|
||||
let taskFiles
|
||||
while ((taskFiles = await listRetry()) !== undefined) {
|
||||
const taskFileBasename = min(taskFiles)
|
||||
const taskFile = join(CLEAN_VM_QUEUE, '_' + taskFileBasename)
|
||||
|
||||
// move this task to the end
|
||||
await handler.rename(join(CLEAN_VM_QUEUE, taskFileBasename), taskFile)
|
||||
try {
|
||||
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
|
||||
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
|
||||
|
||||
handler.unlink(taskFile).catch(error => warn('deleting task failure', { error }))
|
||||
} catch (error) {
|
||||
warn('failure handling task', { error })
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
info('starting')
|
||||
main(process.argv.slice(2)).then(
|
||||
() => {
|
||||
info('bye :-)')
|
||||
},
|
||||
error => {
|
||||
fatal(error)
|
||||
|
||||
process.exit(1)
|
||||
}
|
||||
)
|
||||
25
@xen-orchestra/backups/merge-worker/index.js
Normal file
25
@xen-orchestra/backups/merge-worker/index.js
Normal file
@@ -0,0 +1,25 @@
|
||||
const { join, resolve } = require('path')
|
||||
const { spawn } = require('child_process')
|
||||
const { check } = require('proper-lockfile')
|
||||
|
||||
const CLEAN_VM_QUEUE = (exports.CLEAN_VM_QUEUE = '/xo-vm-backups/.queue/clean-vm/')
|
||||
|
||||
const CLI_PATH = resolve(__dirname, 'cli.js')
|
||||
exports.run = async function runMergeWorker(remotePath) {
|
||||
try {
|
||||
// TODO: find a way to pass the acquire the lock and then pass it down the worker
|
||||
if (await check(join(remotePath, CLEAN_VM_QUEUE))) {
|
||||
// already locked, don't start another worker
|
||||
return
|
||||
}
|
||||
|
||||
spawn(CLI_PATH, {
|
||||
cwd: remotePath,
|
||||
detached: true,
|
||||
stdio: 'inherit',
|
||||
}).unref()
|
||||
} catch (error) {
|
||||
// we usually don't want to throw if the merge worker failed to start
|
||||
return error
|
||||
}
|
||||
}
|
||||
@@ -8,35 +8,39 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.7.0",
|
||||
"version": "0.16.0",
|
||||
"engines": {
|
||||
"node": ">=14.5"
|
||||
"node": ">=14.6"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/disposable": "^0.1.0",
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@vates/compose": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/fs": "^0.19.1",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"compare-versions": "^3.6.0",
|
||||
"compare-versions": "^4.0.1",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"end-of-stream": "^1.4.4",
|
||||
"ensure-array": "^1.0.0",
|
||||
"fs-extra": "^9.0.0",
|
||||
"fs-extra": "^10.0.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"limit-concurrency-decorator": "^0.5.0",
|
||||
"lodash": "^4.17.20",
|
||||
"node-zone": "^0.4.0",
|
||||
"parse-pairs": "^1.1.0",
|
||||
"promise-toolbox": "^0.17.0",
|
||||
"vhd-lib": "^1.0.0",
|
||||
"promise-toolbox": "^0.20.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"pump": "^3.0.0",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^2.0.1",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^0.4.4"
|
||||
"@xen-orchestra/xapi": "^0.8.4"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const { DIR_XO_CONFIG_BACKUPS, DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
|
||||
const { DIR_XO_CONFIG_BACKUPS, DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
|
||||
exports.parseMetadataBackupId = function parseMetadataBackupId(backupId) {
|
||||
const [dir, ...rest] = backupId.split('/')
|
||||
|
||||
38
@xen-orchestra/backups/runBackupWorker.js
Normal file
38
@xen-orchestra/backups/runBackupWorker.js
Normal file
@@ -0,0 +1,38 @@
|
||||
const path = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { fork } = require('child_process')
|
||||
|
||||
const { warn } = createLogger('xo:backups:backupWorker')
|
||||
|
||||
const PATH = path.resolve(__dirname, '_backupWorker.js')
|
||||
|
||||
exports.runBackupWorker = function runBackupWorker(params, onLog) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const worker = fork(PATH)
|
||||
|
||||
worker.on('exit', code => reject(new Error(`worker exited with code ${code}`)))
|
||||
worker.on('error', reject)
|
||||
|
||||
worker.on('message', message => {
|
||||
try {
|
||||
if (message.type === 'result') {
|
||||
if (message.status === 'success') {
|
||||
resolve(message.result)
|
||||
} else {
|
||||
reject(message.result)
|
||||
}
|
||||
} else if (message.type === 'log') {
|
||||
onLog(message.data)
|
||||
}
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
}
|
||||
})
|
||||
|
||||
worker.send({
|
||||
action: 'run',
|
||||
data: params,
|
||||
runWithLogs: onLog !== undefined,
|
||||
})
|
||||
})
|
||||
}
|
||||
92
@xen-orchestra/backups/tests.fixtures.js
Normal file
92
@xen-orchestra/backups/tests.fixtures.js
Normal file
@@ -0,0 +1,92 @@
|
||||
// a valid footer of a 2
|
||||
exports.VHDFOOTER = {
|
||||
cookie: 'conectix',
|
||||
features: 2,
|
||||
fileFormatVersion: 65536,
|
||||
dataOffset: 512,
|
||||
timestamp: 0,
|
||||
creatorApplication: 'caml',
|
||||
creatorVersion: 1,
|
||||
creatorHostOs: 0,
|
||||
originalSize: 53687091200,
|
||||
currentSize: 53687091200,
|
||||
diskGeometry: { cylinders: 25700, heads: 16, sectorsPerTrackCylinder: 255 },
|
||||
diskType: 3,
|
||||
checksum: 4294962945,
|
||||
uuid: Buffer.from('d8dbcad85265421e8b298d99c2eec551', 'utf-8'),
|
||||
saved: '',
|
||||
hidden: '',
|
||||
reserved: '',
|
||||
}
|
||||
exports.VHDHEADER = {
|
||||
cookie: 'cxsparse',
|
||||
dataOffset: undefined,
|
||||
tableOffset: 2048,
|
||||
headerVersion: 65536,
|
||||
maxTableEntries: 25600,
|
||||
blockSize: 2097152,
|
||||
checksum: 4294964241,
|
||||
parentUuid: null,
|
||||
parentTimestamp: 0,
|
||||
reserved1: 0,
|
||||
parentUnicodeName: '',
|
||||
parentLocatorEntry: [
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
{
|
||||
platformCode: 0,
|
||||
platformDataSpace: 0,
|
||||
platformDataLength: 0,
|
||||
reserved: 0,
|
||||
platformDataOffset: 0,
|
||||
},
|
||||
],
|
||||
reserved2: '',
|
||||
}
|
||||
@@ -1,40 +1,26 @@
|
||||
const assert = require('assert')
|
||||
const map = require('lodash/map')
|
||||
const mapValues = require('lodash/mapValues')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const map = require('lodash/map.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { chainVhd, checkVhdChain, default: Vhd } = require('vhd-lib')
|
||||
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { dirname } = require('path')
|
||||
|
||||
const { checkVhd } = require('./_checkVhd')
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { getVmBackupDir } = require('./_getVmBackupDir')
|
||||
const { packUuid } = require('./_packUuid')
|
||||
const { Task } = require('./Task')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
const { warn } = createLogger('xo:proxy:backups:DeltaBackupWriter')
|
||||
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
|
||||
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
|
||||
const { checkVhd } = require('./_checkVhd.js')
|
||||
const { packUuid } = require('./_packUuid.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
|
||||
exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
constructor(backup, remoteId, settings) {
|
||||
this._adapter = backup.remoteAdapters[remoteId]
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: ({ deltaExport }) => ({
|
||||
id: remoteId,
|
||||
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
|
||||
type: 'remote',
|
||||
}),
|
||||
},
|
||||
this.run
|
||||
)
|
||||
}
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
|
||||
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
const { handler } = this._adapter
|
||||
const backup = this._backup
|
||||
@@ -52,13 +38,13 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
await asyncMap(vhds, async path => {
|
||||
try {
|
||||
await checkVhdChain(handler, path)
|
||||
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
found = found || vhd.footer.uuid.equals(packUuid(baseUuid))
|
||||
await Disposable.use(
|
||||
openVhd(handler, path),
|
||||
vhd => (found = found || vhd.footer.uuid.equals(packUuid(baseUuid)))
|
||||
)
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
await ignoreErrors.call(handler.unlink(path))
|
||||
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
@@ -70,23 +56,37 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
})
|
||||
}
|
||||
|
||||
async run({ timestamp, deltaExport, sizeContainers }) {
|
||||
async beforeBackup() {
|
||||
await super.beforeBackup()
|
||||
return this._cleanVm({ merge: true })
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
name: 'export',
|
||||
data: {
|
||||
id: this._remoteId,
|
||||
isFull,
|
||||
type: 'remote',
|
||||
},
|
||||
})
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup, true)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
|
||||
async _prepare() {
|
||||
const adapter = this._adapter
|
||||
const backup = this._backup
|
||||
const settings = this._settings
|
||||
const { scheduleId, vm } = this._backup
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
const backupDir = getVmBackupDir(vm.uuid)
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
|
||||
const oldBackups = getOldEntries(
|
||||
const oldEntries = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
)
|
||||
this._oldEntries = oldEntries
|
||||
|
||||
// FIXME: implement optimized multiple VHDs merging with synthetic
|
||||
// delta
|
||||
@@ -98,21 +98,42 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
// The old backups will be eventually merged in future runs of the
|
||||
// job.
|
||||
const { maxMergedDeltasPerRun } = this._settings
|
||||
if (oldBackups.length > maxMergedDeltasPerRun) {
|
||||
oldBackups.length = maxMergedDeltasPerRun
|
||||
if (oldEntries.length > maxMergedDeltasPerRun) {
|
||||
oldEntries.length = maxMergedDeltasPerRun
|
||||
}
|
||||
|
||||
const deleteOldBackups = () =>
|
||||
Task.run({ name: 'merge' }, async () => {
|
||||
let size = 0
|
||||
// delete sequentially from newest to oldest to avoid unnecessary merges
|
||||
for (let i = oldBackups.length; i-- > 0; ) {
|
||||
size += await adapter.deleteDeltaVmBackups([oldBackups[i]])
|
||||
}
|
||||
return {
|
||||
size,
|
||||
}
|
||||
})
|
||||
if (settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
if (!this._settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
const adapter = this._adapter
|
||||
const oldEntries = this._oldEntries
|
||||
|
||||
// delete sequentially from newest to oldest to avoid unnecessary merges
|
||||
for (let i = oldEntries.length; i-- > 0; ) {
|
||||
await adapter.deleteDeltaVmBackups([oldEntries[i]])
|
||||
}
|
||||
}
|
||||
|
||||
async _transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
const adapter = this._adapter
|
||||
const backup = this._backup
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
const backupDir = getVmBackupDir(vm.uuid)
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
@@ -124,7 +145,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
// don't do delta for it
|
||||
vdi.uuid
|
||||
: vdi.$snapshot_of$uuid
|
||||
}/${basename}.vhd`
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
|
||||
const metadataFilename = `${backupDir}/${basename}.json`
|
||||
@@ -142,11 +163,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
}
|
||||
|
||||
const { deleteFirst } = settings
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
await Promise.all(
|
||||
map(deltaExport.vdis, async (vdi, id) => {
|
||||
@@ -173,7 +189,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
await adapter.outputStream(path, deltaExport.streams[`${id}.vhd`], {
|
||||
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
||||
// no checksum for VHDs, because they will be invalidated by
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
@@ -185,11 +201,11 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
await Disposable.use(openVhd(handler, path), async vhd => {
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
})
|
||||
)
|
||||
return {
|
||||
@@ -201,10 +217,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter {
|
||||
dirMode: backup.config.dirMode,
|
||||
})
|
||||
|
||||
if (!deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
}
|
||||
@@ -1,32 +1,17 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { importDeltaVm, TAG_COPY_SRC } = require('./_deltaVm')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms')
|
||||
const { Task } = require('./Task')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { importDeltaVm, TAG_COPY_SRC } = require('../_deltaVm.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
|
||||
constructor(backup, sr, settings) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
this._sr = sr
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: ({ deltaExport }) => ({
|
||||
id: sr.uuid,
|
||||
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
|
||||
type: 'SR',
|
||||
}),
|
||||
},
|
||||
this.run
|
||||
)
|
||||
}
|
||||
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
|
||||
const { MixinReplicationWriter } = require('./_MixinReplicationWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinReplicationWriter(AbstractDeltaWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._backup.job.id, sr.uuid, this._backup.vm.uuid).find(
|
||||
@@ -51,26 +36,53 @@ exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
|
||||
}
|
||||
}
|
||||
|
||||
async run({ timestamp, deltaExport, sizeContainers }) {
|
||||
const sr = this._sr
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
name: 'export',
|
||||
data: {
|
||||
id: this._sr.uuid,
|
||||
isFull,
|
||||
type: 'SR',
|
||||
},
|
||||
})
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup, true)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
|
||||
async _prepare() {
|
||||
const settings = this._settings
|
||||
const { uuid: srUuid, $xapi: xapi } = this._sr
|
||||
const { scheduleId, vm } = this._backup
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => vm.$destroy))
|
||||
|
||||
this._oldEntries = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
|
||||
if (settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
if (!this._settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
|
||||
}
|
||||
|
||||
async _transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
const sr = this._sr
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(
|
||||
asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => xapi.VM_destroy(vm.$ref))
|
||||
)
|
||||
|
||||
const oldVms = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
|
||||
const deleteOldBackups = () => asyncMapSettled(oldVms, vm => xapi.VM_destroy(vm.$ref))
|
||||
const { deleteFirst } = settings
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
|
||||
let targetVmRef
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
targetVmRef = await importDeltaVm(
|
||||
@@ -94,9 +106,11 @@ exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
|
||||
targetVm.ha_restart_priority !== '' &&
|
||||
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
|
||||
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
|
||||
targetVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
asyncMap(['start', 'start_on'], op =>
|
||||
targetVm.update_blocked_operations(
|
||||
op,
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
)
|
||||
),
|
||||
targetVm.update_other_config({
|
||||
'xo:backup:sr': srUuid,
|
||||
@@ -108,9 +122,5 @@ exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
|
||||
'xo:backup:vm': vm.uuid,
|
||||
}),
|
||||
])
|
||||
|
||||
if (!deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,20 @@
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { getVmBackupDir } = require('./_getVmBackupDir')
|
||||
const { isValidXva } = require('./isValidXva')
|
||||
const { Task } = require('./Task')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
exports.FullBackupWriter = class FullBackupWriter {
|
||||
constructor(backup, remoteId, settings) {
|
||||
this._backup = backup
|
||||
this._remoteId = remoteId
|
||||
this._settings = settings
|
||||
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
|
||||
exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: {
|
||||
id: remoteId,
|
||||
id: props.remoteId,
|
||||
type: 'remote',
|
||||
|
||||
// necessary?
|
||||
@@ -25,14 +25,13 @@ exports.FullBackupWriter = class FullBackupWriter {
|
||||
)
|
||||
}
|
||||
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
async _run({ timestamp, sizeContainer, stream }) {
|
||||
const backup = this._backup
|
||||
const remoteId = this._remoteId
|
||||
const settings = this._settings
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const adapter = backup.remoteAdapters[remoteId]
|
||||
const adapter = this._adapter
|
||||
const handler = adapter.handler
|
||||
const backupDir = getVmBackupDir(vm.uuid)
|
||||
|
||||
@@ -68,11 +67,7 @@ exports.FullBackupWriter = class FullBackupWriter {
|
||||
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
await adapter.outputStream(dataFilename, stream, {
|
||||
validator: tmpPath => {
|
||||
if (handler._getFilePath !== undefined) {
|
||||
return isValidXva(handler._getFilePath('/' + tmpPath))
|
||||
}
|
||||
},
|
||||
validator: tmpPath => adapter.isValidXva(tmpPath),
|
||||
})
|
||||
return { size: sizeContainer.size }
|
||||
})
|
||||
@@ -1,23 +1,24 @@
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate')
|
||||
const { getOldEntries } = require('./_getOldEntries')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms')
|
||||
const { Task } = require('./Task')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
exports.DisasterRecoveryWriter = class DisasterRecoveryWriter {
|
||||
constructor(backup, sr, settings) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
this._sr = sr
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
const { MixinReplicationWriter } = require('./_MixinReplicationWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplicationWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
this.run = Task.wrapFn(
|
||||
{
|
||||
name: 'export',
|
||||
data: {
|
||||
id: sr.uuid,
|
||||
id: props.sr.uuid,
|
||||
type: 'SR',
|
||||
|
||||
// necessary?
|
||||
@@ -28,7 +29,7 @@ exports.DisasterRecoveryWriter = class DisasterRecoveryWriter {
|
||||
)
|
||||
}
|
||||
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
async _run({ timestamp, sizeContainer, stream }) {
|
||||
const sr = this._sr
|
||||
const settings = this._settings
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
@@ -63,9 +64,11 @@ exports.DisasterRecoveryWriter = class DisasterRecoveryWriter {
|
||||
const targetVm = await xapi.getRecord('VM', targetVmRef)
|
||||
|
||||
await Promise.all([
|
||||
targetVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
asyncMap(['start', 'start_on'], op =>
|
||||
targetVm.update_blocked_operations(
|
||||
op,
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
)
|
||||
),
|
||||
targetVm.update_other_config({
|
||||
'xo:backup:sr': srUuid,
|
||||
26
@xen-orchestra/backups/writers/_AbstractDeltaWriter.js
Normal file
26
@xen-orchestra/backups/writers/_AbstractDeltaWriter.js
Normal file
@@ -0,0 +1,26 @@
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractDeltaWriter = class AbstractDeltaWriter extends AbstractWriter {
|
||||
checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
try {
|
||||
return await this._transfer({ timestamp, deltaExport, sizeContainers })
|
||||
} finally {
|
||||
// ensure all streams are properly closed
|
||||
for (const stream of Object.values(deltaExport.streams)) {
|
||||
stream.destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
12
@xen-orchestra/backups/writers/_AbstractFullWriter.js
Normal file
12
@xen-orchestra/backups/writers/_AbstractFullWriter.js
Normal file
@@ -0,0 +1,12 @@
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractFullWriter = class AbstractFullWriter extends AbstractWriter {
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
try {
|
||||
return await this._run({ timestamp, sizeContainer, stream })
|
||||
} finally {
|
||||
// ensure stream is properly closed
|
||||
stream.destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
10
@xen-orchestra/backups/writers/_AbstractWriter.js
Normal file
10
@xen-orchestra/backups/writers/_AbstractWriter.js
Normal file
@@ -0,0 +1,10 @@
|
||||
exports.AbstractWriter = class AbstractWriter {
|
||||
constructor({ backup, settings }) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
}
|
||||
51
@xen-orchestra/backups/writers/_MixinBackupWriter.js
Normal file
51
@xen-orchestra/backups/writers/_MixinBackupWriter.js
Normal file
@@ -0,0 +1,51 @@
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { join } = require('path')
|
||||
|
||||
const { BACKUP_DIR, getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const MergeWorker = require('../merge-worker/index.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
|
||||
const { warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
|
||||
exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
class MixinBackupWriter extends BaseClass {
|
||||
#lock
|
||||
#vmBackupDir
|
||||
|
||||
constructor({ remoteId, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._adapter = rest.backup.remoteAdapters[remoteId]
|
||||
this._remoteId = remoteId
|
||||
|
||||
this.#vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
|
||||
}
|
||||
|
||||
_cleanVm(options) {
|
||||
return this._adapter
|
||||
.cleanVm(this.#vmBackupDir, { ...options, fixMetadata: true, onLog: warn, lock: false })
|
||||
.catch(warn)
|
||||
}
|
||||
|
||||
async beforeBackup() {
|
||||
const { handler } = this._adapter
|
||||
const vmBackupDir = this.#vmBackupDir
|
||||
await handler.mktree(vmBackupDir)
|
||||
this.#lock = await handler.lock(vmBackupDir)
|
||||
}
|
||||
|
||||
async afterBackup() {
|
||||
const { disableMergeWorker } = this._backup.config
|
||||
|
||||
const { merge } = await this._cleanVm({ remove: true, merge: disableMergeWorker })
|
||||
await this.#lock.dispose()
|
||||
|
||||
// merge worker only compatible with local remotes
|
||||
const { handler } = this._adapter
|
||||
if (merge && !disableMergeWorker && typeof handler._getRealPath === 'function') {
|
||||
await handler.outputFile(join(MergeWorker.CLEAN_VM_QUEUE, formatFilenameDate(new Date())), this._backup.vm.uuid)
|
||||
const remotePath = handler._getRealPath()
|
||||
await MergeWorker.run(remotePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
exports.MixinReplicationWriter = (BaseClass = Object) =>
|
||||
class MixinReplicationWriter extends BaseClass {
|
||||
constructor({ sr, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._sr = sr
|
||||
}
|
||||
}
|
||||
6
@xen-orchestra/backups/writers/_checkVhd.js
Normal file
6
@xen-orchestra/backups/writers/_checkVhd.js
Normal file
@@ -0,0 +1,6 @@
|
||||
const openVhd = require('vhd-lib').openVhd
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
|
||||
exports.checkVhd = async function checkVhd(handler, path) {
|
||||
await Disposable.use(openVhd(handler, path), () => {})
|
||||
}
|
||||
1
@xen-orchestra/cr-seed-cli/.npmignore
Symbolic link
1
@xen-orchestra/cr-seed-cli/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const defer = require('golike-defer').default
|
||||
const { Ref, Xapi } = require('xen-api')
|
||||
const { defer } = require('golike-defer')
|
||||
|
||||
const pkg = require('./package.json')
|
||||
|
||||
@@ -77,7 +77,11 @@ ${cliName} v${pkg.version}
|
||||
'xo:backup:sr': tgtSr.uuid,
|
||||
'xo:copy_of': srcSnapshotUuid,
|
||||
}),
|
||||
tgtVm.update_blocked_operations('start', 'Start operation for this vm is blocked, clone it if you want to use it.'),
|
||||
Promise.all(
|
||||
['start', 'start_on'].map(op =>
|
||||
tgtVm.update_blocked_operations(op, 'Start operation for this vm is blocked, clone it if you want to use it.')
|
||||
)
|
||||
),
|
||||
Promise.all(
|
||||
userDevices.map(userDevice => {
|
||||
const srcDisk = srcDisks[userDevice]
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"preferGlobal": true,
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.5.1",
|
||||
"xen-api": "^0.30.0"
|
||||
"xen-api": "^0.35.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
1
@xen-orchestra/cron/.eslintrc.js
Symbolic link
1
@xen-orchestra/cron/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
1
@xen-orchestra/cron/.npmignore
Symbolic link
1
@xen-orchestra/cron/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user