Compare commits
672 Commits
nr-nbd-pro
...
nr-fix-S3-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f3c5efdd38 | ||
|
|
e00270e916 | ||
|
|
d17fd2dc11 | ||
|
|
a69d32de75 | ||
|
|
5040ce87e1 | ||
|
|
3a52944f21 | ||
|
|
cc9d741275 | ||
|
|
f0096cf0e2 | ||
|
|
1d673bf6ff | ||
|
|
d986f00b6a | ||
|
|
01c3ca4f37 | ||
|
|
497bd7dad5 | ||
|
|
1d6a0ae8f1 | ||
|
|
c5e6b5ec7a | ||
|
|
ca26b4b30d | ||
|
|
254558e9de | ||
|
|
da0cd0b99c | ||
|
|
2e49c685cc | ||
|
|
a64af4da7c | ||
|
|
68bb2fa7f0 | ||
|
|
8bc2710380 | ||
|
|
1691e7ad83 | ||
|
|
6c2cb31923 | ||
|
|
0c6d920682 | ||
|
|
a126b5b61b | ||
|
|
dadb16bb04 | ||
|
|
f29473ef4c | ||
|
|
84b3162bcd | ||
|
|
c7f1469e1f | ||
|
|
b7481f42cd | ||
|
|
2329705ad7 | ||
|
|
d1dfd93e15 | ||
|
|
4ef55b8d1f | ||
|
|
7da22094f3 | ||
|
|
cf45cb56ad | ||
|
|
df96898543 | ||
|
|
a58bf66dea | ||
|
|
0f1fc0cc79 | ||
|
|
dc41f60f52 | ||
|
|
3d21afb640 | ||
|
|
79c3667fd4 | ||
|
|
ab1549f60e | ||
|
|
5d32fa36ff | ||
|
|
8ac17ab6e3 | ||
|
|
2076141f47 | ||
|
|
6d0f479f81 | ||
|
|
f56a5a3de1 | ||
|
|
d0c34fd760 | ||
|
|
9e7afd67bc | ||
|
|
964810858b | ||
|
|
7a51361099 | ||
|
|
ec2e71a22f | ||
|
|
5b188f35b5 | ||
|
|
5683571577 | ||
|
|
db75568905 | ||
|
|
5517305973 | ||
|
|
57ef531be0 | ||
|
|
b590e29608 | ||
|
|
569d575a96 | ||
|
|
dd8bf3776e | ||
|
|
d4ea9c8892 | ||
|
|
793c6b4a5a | ||
|
|
917c9dabc7 | ||
|
|
1d1bf504de | ||
|
|
d0c07e1e97 | ||
|
|
dfff520259 | ||
|
|
bb928bbd73 | ||
|
|
f86ec98e05 | ||
|
|
48af5c7ed6 | ||
|
|
cfaf336597 | ||
|
|
b52345236d | ||
|
|
87ebaf62c1 | ||
|
|
c7721d6100 | ||
|
|
40a722a7ff | ||
|
|
d41fbb9216 | ||
|
|
8bee0925d0 | ||
|
|
b8edca53cb | ||
|
|
34a13dd293 | ||
|
|
f72e582a80 | ||
|
|
6da2865781 | ||
|
|
a0ea12cf6c | ||
|
|
317bfde574 | ||
|
|
5f53ebdf12 | ||
|
|
cb835b7b6a | ||
|
|
bf76787e49 | ||
|
|
15a4f7e273 | ||
|
|
dc3e5ffa4b | ||
|
|
b84c7cc2bb | ||
|
|
049717260d | ||
|
|
a50a96de82 | ||
|
|
8ff8c0d176 | ||
|
|
a29b63c7d1 | ||
|
|
a8400c77fb | ||
|
|
e1c40bd218 | ||
|
|
757224683f | ||
|
|
95d982f3f3 | ||
|
|
7bfdfe5e41 | ||
|
|
8888b1a89a | ||
|
|
c6ba48be10 | ||
|
|
f132c4b5d1 | ||
|
|
87f5a8f6f2 | ||
|
|
de500af30d | ||
|
|
8b5607ac89 | ||
|
|
22727f68c1 | ||
|
|
ba64f8e5b5 | ||
|
|
b3bde5857e | ||
|
|
6e36a21d18 | ||
|
|
968ebeb5a3 | ||
|
|
47e11652fb | ||
|
|
84019ed4e7 | ||
|
|
37befd89e7 | ||
|
|
aa4f1b834a | ||
|
|
e6f8fd9234 | ||
|
|
86904892f2 | ||
|
|
d176dd6533 | ||
|
|
283efe0eac | ||
|
|
0e361cb105 | ||
|
|
53aeb085ac | ||
|
|
cd8c618f08 | ||
|
|
18b74d9797 | ||
|
|
4008934bbb | ||
|
|
8ae432554e | ||
|
|
337b26176a | ||
|
|
2e643fce28 | ||
|
|
5edd271975 | ||
|
|
c219ea06bf | ||
|
|
ffacc0d8d0 | ||
|
|
70fff77a28 | ||
|
|
bcc52d586e | ||
|
|
521ded5079 | ||
|
|
73b6b59ec9 | ||
|
|
157c81b0e9 | ||
|
|
233096354c | ||
|
|
01ac23162f | ||
|
|
4e3628c6fb | ||
|
|
d6bea8aed8 | ||
|
|
a254097092 | ||
|
|
b2a3d224a5 | ||
|
|
b495c2b60b | ||
|
|
452f76cbef | ||
|
|
3a0690bfee | ||
|
|
29fd2ff5e9 | ||
|
|
a344b3b76d | ||
|
|
14cf955cb9 | ||
|
|
31193d5b40 | ||
|
|
d6dc63c491 | ||
|
|
263f693542 | ||
|
|
3f42199f8f | ||
|
|
251ccd2e38 | ||
|
|
82ccf5886e | ||
|
|
6acb1e3853 | ||
|
|
8c0238e98f | ||
|
|
e7779c3d55 | ||
|
|
bdb0ca836c | ||
|
|
53038a0372 | ||
|
|
1b0eb91d58 | ||
|
|
5814ba38ac | ||
|
|
b2ec0d288b | ||
|
|
5171378bea | ||
|
|
7f570c074b | ||
|
|
dac675143f | ||
|
|
72a5f0e220 | ||
|
|
375aaa8430 | ||
|
|
4c704a8a3a | ||
|
|
78c0f2c7e9 | ||
|
|
c262dd06e6 | ||
|
|
e0d6b501c7 | ||
|
|
efc3f45ef6 | ||
|
|
24d8ef25bb | ||
|
|
2aca775907 | ||
|
|
7aa10ef4be | ||
|
|
17ad622ce3 | ||
|
|
cc7431a092 | ||
|
|
4199d02d98 | ||
|
|
8c434760fb | ||
|
|
5f63b99dc8 | ||
|
|
edd0ae4c59 | ||
|
|
3944e6450d | ||
|
|
a8e5ad42ba | ||
|
|
d3bfb0b87b | ||
|
|
75e3e36aa8 | ||
|
|
9102b4aa1b | ||
|
|
e744d90dbb | ||
|
|
c38b957d7c | ||
|
|
282bb26da9 | ||
|
|
6b1c30157f | ||
|
|
e433251420 | ||
|
|
49ed9c7f7f | ||
|
|
5a5c0326b7 | ||
|
|
a25708be2b | ||
|
|
e8f2934534 | ||
|
|
37f8ac9da9 | ||
|
|
0ded95ce48 | ||
|
|
108e769833 | ||
|
|
5b2313ee56 | ||
|
|
368b84b7ff | ||
|
|
864946477b | ||
|
|
da67298b43 | ||
|
|
db5cb8b3a9 | ||
|
|
9643292be6 | ||
|
|
a651e34206 | ||
|
|
a4e7fd3209 | ||
|
|
d1113d40aa | ||
|
|
dcd834d3e4 | ||
|
|
c0be8a2c04 | ||
|
|
09182172cf | ||
|
|
56e903e359 | ||
|
|
9922d60e5b | ||
|
|
09ea42439e | ||
|
|
ce1acf1adc | ||
|
|
fe00badb0f | ||
|
|
2146d67dc2 | ||
|
|
6728768b3e | ||
|
|
48db3de08c | ||
|
|
b944364d1e | ||
|
|
39c2fbe8c3 | ||
|
|
c7ba640ecb | ||
|
|
f749f6be72 | ||
|
|
ccdd384c6e | ||
|
|
4061e2c149 | ||
|
|
e7b8461555 | ||
|
|
70d1537ecc | ||
|
|
cb37f85d8e | ||
|
|
9becf565a4 | ||
|
|
b1a4e5467d | ||
|
|
4bbe8488fc | ||
|
|
54a0d126b5 | ||
|
|
9b1fbf0fbf | ||
|
|
6f626974ac | ||
|
|
5c47beb1c4 | ||
|
|
b4fbe8df07 | ||
|
|
3cc9fd2782 | ||
|
|
eaecba7ec8 | ||
|
|
42a43be092 | ||
|
|
052aafd7cb | ||
|
|
4abae578f4 | ||
|
|
4132d96591 | ||
|
|
8e4c90129e | ||
|
|
31406927e6 | ||
|
|
303646efd3 | ||
|
|
9efc4f9113 | ||
|
|
31a5a42ec7 | ||
|
|
2d0ed3ec8a | ||
|
|
de288a008d | ||
|
|
3c5d73224a | ||
|
|
05f9c07836 | ||
|
|
a7ba6add39 | ||
|
|
479973bf06 | ||
|
|
854c9fe794 | ||
|
|
5a17c75fe4 | ||
|
|
4dc5eff252 | ||
|
|
7fe0d78154 | ||
|
|
2c709dc205 | ||
|
|
9353349a39 | ||
|
|
d3049b2bfa | ||
|
|
61cb2529bd | ||
|
|
e6c6e4395f | ||
|
|
959c955616 | ||
|
|
538253cdc1 | ||
|
|
b4c6594333 | ||
|
|
a7f5f8889c | ||
|
|
1c9b4cf552 | ||
|
|
ce09f487bd | ||
|
|
a5d1decf40 | ||
|
|
7024c7d598 | ||
|
|
8109253eeb | ||
|
|
b61f1e3803 | ||
|
|
db40f80be7 | ||
|
|
26eaf97032 | ||
|
|
da349374bf | ||
|
|
0ffa925fee | ||
|
|
082787c4cf | ||
|
|
be9b5332d9 | ||
|
|
97ae3ba7d3 | ||
|
|
d047f401c2 | ||
|
|
1e9e78223b | ||
|
|
6d5baebd08 | ||
|
|
4e758dbb85 | ||
|
|
40d943c620 | ||
|
|
e69b6c4dc8 | ||
|
|
23444f7083 | ||
|
|
8c077b96df | ||
|
|
4b1a055a88 | ||
|
|
b4ddcc1dec | ||
|
|
271d2e3abc | ||
|
|
37b6399398 | ||
|
|
ebf19b1506 | ||
|
|
e4dd773644 | ||
|
|
f9b3a1f293 | ||
|
|
7c9850ada8 | ||
|
|
9ef05b8afe | ||
|
|
efdd196441 | ||
|
|
6e780a3876 | ||
|
|
b475b265ae | ||
|
|
3bb7d2c294 | ||
|
|
594a148a39 | ||
|
|
779591db36 | ||
|
|
c002eeffb7 | ||
|
|
1dac973d70 | ||
|
|
f5024f0e75 | ||
|
|
cf320c08c5 | ||
|
|
8973c9550c | ||
|
|
bb671f0e93 | ||
|
|
a8774b5011 | ||
|
|
f092cd41bc | ||
|
|
b17ec9731a | ||
|
|
021810201b | ||
|
|
6038dc9c8a | ||
|
|
4df8c9610a | ||
|
|
6c12dd4f16 | ||
|
|
ad3b8fa59f | ||
|
|
cb52a8b51b | ||
|
|
22ba1302d2 | ||
|
|
7d04559921 | ||
|
|
e40e35d30c | ||
|
|
d1af9f236c | ||
|
|
45a0ff26c5 | ||
|
|
1fd330d7a4 | ||
|
|
09833f31cf | ||
|
|
20e7a036cf | ||
|
|
e6667c1782 | ||
|
|
657935eba5 | ||
|
|
67b905a757 | ||
|
|
55cede0434 | ||
|
|
c7677d6d1e | ||
|
|
d191ca54ad | ||
|
|
20f4c952fe | ||
|
|
0bd09896f3 | ||
|
|
60ecfbfb8e | ||
|
|
8921d78610 | ||
|
|
b243ff94e9 | ||
|
|
5f1c1278e3 | ||
|
|
fa56e594b1 | ||
|
|
c9b64927be | ||
|
|
3689cb2a99 | ||
|
|
3bb7541361 | ||
|
|
7b15aa5f83 | ||
|
|
690d3036db | ||
|
|
416e8d02a1 | ||
|
|
a968c2d2b7 | ||
|
|
b4787bf444 | ||
|
|
a4d90e8aff | ||
|
|
32d0606ee4 | ||
|
|
4541f7c758 | ||
|
|
65428d629c | ||
|
|
bdfd9cc617 | ||
|
|
6d324921a0 | ||
|
|
dcf0f5c5a3 | ||
|
|
d98f851a2c | ||
|
|
a95b102396 | ||
|
|
7e2fbbaae6 | ||
|
|
070e8b0b54 | ||
|
|
7b49a1296c | ||
|
|
1e278bde92 | ||
|
|
078f402819 | ||
|
|
52af565f77 | ||
|
|
853905e52f | ||
|
|
2e0e1d2aac | ||
|
|
7f33a62bb5 | ||
|
|
bdb59ea429 | ||
|
|
1c0ffe39f7 | ||
|
|
2fbfc97cca | ||
|
|
482299e765 | ||
|
|
54f4734847 | ||
|
|
0fb6cef577 | ||
|
|
7eec264961 | ||
|
|
aff874c68a | ||
|
|
27abee0850 | ||
|
|
bcfb19f7c5 | ||
|
|
306a8ce0df | ||
|
|
d9ea8d2c9c | ||
|
|
b479956bb2 | ||
|
|
b32dc0e450 | ||
|
|
5cca5d69af | ||
|
|
e0e89213d3 | ||
|
|
e246c19eb3 | ||
|
|
d282d8dd52 | ||
|
|
9601ad13ee | ||
|
|
b7603e109d | ||
|
|
066f54906b | ||
|
|
ea0aa9df70 | ||
|
|
0811da9014 | ||
|
|
d601290c46 | ||
|
|
64357aff55 | ||
|
|
a20a3311b5 | ||
|
|
ffce5d4bb5 | ||
|
|
cbfadc019a | ||
|
|
bf5427f3e8 | ||
|
|
4c27562650 | ||
|
|
e8d20532ba | ||
|
|
d928157569 | ||
|
|
872b05a7de | ||
|
|
6ea71ec6a2 | ||
|
|
139cb72209 | ||
|
|
855a15e696 | ||
|
|
eeebd3fc1b | ||
|
|
a4b209c654 | ||
|
|
43aad3d117 | ||
|
|
f2d4fdd4d2 | ||
|
|
a630106d80 | ||
|
|
c7acd455c5 | ||
|
|
555a9d4883 | ||
|
|
ec4ce0c70c | ||
|
|
edf275badc | ||
|
|
2e91285f02 | ||
|
|
ec69ba7e0e | ||
|
|
3804ca18cb | ||
|
|
9ea3222da8 | ||
|
|
df48524ca5 | ||
|
|
b3aff1162c | ||
|
|
891ca8a31b | ||
|
|
ba99ac8b17 | ||
|
|
1ff25943dc | ||
|
|
deb58e40d5 | ||
|
|
eab6eb8fab | ||
|
|
ff65367851 | ||
|
|
f16e29c63e | ||
|
|
cdfeb094b3 | ||
|
|
b63c5d2987 | ||
|
|
015309c882 | ||
|
|
20377e9c56 | ||
|
|
08857a6198 | ||
|
|
d9ce1b3a97 | ||
|
|
d166073b16 | ||
|
|
f858c196f4 | ||
|
|
57612eeced | ||
|
|
be2257153c | ||
|
|
d920a97f4f | ||
|
|
322f2a1728 | ||
|
|
cfe6b0d9ab | ||
|
|
e229deb238 | ||
|
|
8cdde947bc | ||
|
|
c1b3ddf87a | ||
|
|
27d97add1e | ||
|
|
3783724c40 | ||
|
|
67bc4ffe68 | ||
|
|
453bbfbbde | ||
|
|
ff463c4261 | ||
|
|
748b77ae7a | ||
|
|
58c1005657 | ||
|
|
9271eb61ac | ||
|
|
c82cee25a5 | ||
|
|
2e5dfa5845 | ||
|
|
693c07b927 | ||
|
|
71a6f70f46 | ||
|
|
2952b5a7ec | ||
|
|
baa5847949 | ||
|
|
b9ce0bd99d | ||
|
|
aac61d8120 | ||
|
|
1f6edfdbcc | ||
|
|
9d1ce7fadf | ||
|
|
fd560c351f | ||
|
|
b45556062d | ||
|
|
5be45599ed | ||
|
|
9b2533dbc9 | ||
|
|
ec1a4b1974 | ||
|
|
bb9fde17c9 | ||
|
|
8cb524080c | ||
|
|
171ec54781 | ||
|
|
5d9503b78c | ||
|
|
f56cb69c2e | ||
|
|
4eb9aa9ccb | ||
|
|
11801f306c | ||
|
|
95c2944f30 | ||
|
|
5bd4c54ab6 | ||
|
|
95d6d0a0fe | ||
|
|
7941be083a | ||
|
|
e36efaec08 | ||
|
|
637afdb540 | ||
|
|
dafdedef9a | ||
|
|
ce17ee2ae6 | ||
|
|
e74daa97d2 | ||
|
|
44d64d1b80 | ||
|
|
1a4731aa83 | ||
|
|
a75e1c52b7 | ||
|
|
1b97cb263c | ||
|
|
5c9a47b6b7 | ||
|
|
8a5fe86193 | ||
|
|
d9531e24a3 | ||
|
|
624f328269 | ||
|
|
a6f4e6771d | ||
|
|
a506c21b80 | ||
|
|
981193ed23 | ||
|
|
85a6204db2 | ||
|
|
b82aba1181 | ||
|
|
0a6dea2c79 | ||
|
|
69b6d75927 | ||
|
|
eff2d48cc5 | ||
|
|
ca5af2505c | ||
|
|
a958fe86d7 | ||
|
|
3ed488e10f | ||
|
|
dcc11f16b1 | ||
|
|
209706b70d | ||
|
|
1bc80eb485 | ||
|
|
9ab9e3fe46 | ||
|
|
d654c096ed | ||
|
|
f5d5884988 | ||
|
|
2c016204bf | ||
|
|
04fd625bde | ||
|
|
8455d4a49f | ||
|
|
a3960bb7c5 | ||
|
|
769262d60e | ||
|
|
942567586f | ||
|
|
ba6baaec0a | ||
|
|
a8ac6fc738 | ||
|
|
b027d3b1d6 | ||
|
|
71f9d268c9 | ||
|
|
2b91d4af99 | ||
|
|
0ec0e286ba | ||
|
|
258ae64568 | ||
|
|
90cafa126f | ||
|
|
43d31e285c | ||
|
|
57945e6751 | ||
|
|
fce56cbf4c | ||
|
|
7a13771198 | ||
|
|
819c798e99 | ||
|
|
8560ca0661 | ||
|
|
82cdfe7014 | ||
|
|
52642f5854 | ||
|
|
6c6f9f5a44 | ||
|
|
039ce15253 | ||
|
|
695a4c785c | ||
|
|
7d7f160159 | ||
|
|
b454b4dff1 | ||
|
|
e5d711dd28 | ||
|
|
10b127ca55 | ||
|
|
fb4dff4fca | ||
|
|
ef25b364ec | ||
|
|
9394db986d | ||
|
|
9226c6cac1 | ||
|
|
283193e992 | ||
|
|
72f8a6d220 | ||
|
|
f5e4fb49c3 | ||
|
|
3cd15c783c | ||
|
|
bf51ba860a | ||
|
|
6aa8515df4 | ||
|
|
3bf4ee35a1 | ||
|
|
e08c600740 | ||
|
|
f823690b44 | ||
|
|
350b0c1e3c | ||
|
|
b01a6124a9 | ||
|
|
b00652f9eb | ||
|
|
19159a203a | ||
|
|
be8c77af5a | ||
|
|
8bb7803d23 | ||
|
|
54a85a8dd0 | ||
|
|
6fd40c0a7c | ||
|
|
97dd423486 | ||
|
|
281d60df4f | ||
|
|
43933f4089 | ||
|
|
4f7e140737 | ||
|
|
2b6945a382 | ||
|
|
8a3ae59f77 | ||
|
|
db253875cc | ||
|
|
a8359dcb75 | ||
|
|
e5dac06d91 | ||
|
|
e9f82558ed | ||
|
|
26f5ef5e31 | ||
|
|
874e889b36 | ||
|
|
bece5f7083 | ||
|
|
2f535e6db1 | ||
|
|
61c3057060 | ||
|
|
063d7d5cc4 | ||
|
|
0e0211050b | ||
|
|
c8c7245da1 | ||
|
|
3e27e50bab | ||
|
|
6b9d3ed60e | ||
|
|
11a78111de | ||
|
|
2655421171 | ||
|
|
c6bc2ea485 | ||
|
|
289b7a3dbe | ||
|
|
70083c6dca | ||
|
|
3e25b92369 | ||
|
|
806eaaf14b | ||
|
|
fb3f2d46fa | ||
|
|
14d06fe754 | ||
|
|
752146028b | ||
|
|
6c6ae30ce5 | ||
|
|
b00750bfa3 | ||
|
|
55eac005a0 | ||
|
|
257524de18 | ||
|
|
d4f78056dd | ||
|
|
66c054f24b | ||
|
|
711b722118 | ||
|
|
26614b5f40 | ||
|
|
9240211f3e | ||
|
|
67d84d956e | ||
|
|
97b620f98f | ||
|
|
2f5c91a1e1 | ||
|
|
038dad834d | ||
|
|
b3cd265955 | ||
|
|
2c670bc838 | ||
|
|
30c2b8e192 | ||
|
|
a00d45522b | ||
|
|
525369e0ce | ||
|
|
ba413f3e8f | ||
|
|
4afebca77b | ||
|
|
d2eb92143d | ||
|
|
e01d3c64fe | ||
|
|
9f497c9c2c | ||
|
|
9aae154c4e | ||
|
|
339f012794 | ||
|
|
af500d7b7b | ||
|
|
16a71b3917 | ||
|
|
7dfa104f65 | ||
|
|
44a7b1761f | ||
|
|
22c8ea255c | ||
|
|
a1c10828d8 | ||
|
|
25d69d1bd7 | ||
|
|
a84961f8ba | ||
|
|
e17b6790b5 | ||
|
|
815aed52d3 | ||
|
|
a03581ccd3 | ||
|
|
c10f6e6c6a | ||
|
|
18abd0384f | ||
|
|
4292bdd7b4 | ||
|
|
1149648399 | ||
|
|
b6846eb21d | ||
|
|
d19546fcb4 | ||
|
|
6a1eb198d1 | ||
|
|
e4757d4345 | ||
|
|
3873a59a37 | ||
|
|
cf9f6c10d7 | ||
|
|
8bcd9debc2 | ||
|
|
510a159eee | ||
|
|
062fb3ba30 | ||
|
|
3bc477d21b | ||
|
|
79eb2feb2c | ||
|
|
1fa42a5753 | ||
|
|
2eaab408dd | ||
|
|
f7fd0d9121 | ||
|
|
3b7b776ac4 | ||
|
|
43abc8440b | ||
|
|
37515b5da9 | ||
|
|
2dec327013 | ||
|
|
8f4dae3134 | ||
|
|
a584daa92d | ||
|
|
43431aa9a0 | ||
|
|
f196d2abec | ||
|
|
4a6724f664 | ||
|
|
a960737207 | ||
|
|
da08bd7fff | ||
|
|
517430f23d | ||
|
|
48e82ac15b | ||
|
|
eead64ff71 | ||
|
|
9ac6db2f4c | ||
|
|
92cf6bb887 | ||
|
|
1d3978ce2f | ||
|
|
16c71da487 | ||
|
|
214dbafd62 | ||
|
|
89b162704c | ||
|
|
fbf906d97c | ||
|
|
7961ff0785 | ||
|
|
00e53f455b | ||
|
|
d1d4839a09 | ||
|
|
31b19725b7 | ||
|
|
a776eaf61a | ||
|
|
ae2a92d229 | ||
|
|
dedc4aa8b9 | ||
|
|
7a8ca2f068 | ||
|
|
fdf52a3d59 | ||
|
|
e0987059d3 | ||
|
|
ee7217c7c9 | ||
|
|
1027659f34 | ||
|
|
424a212cc3 | ||
|
|
949ddbdcd7 | ||
|
|
7fcfc306f9 | ||
|
|
a691e033eb | ||
|
|
b76f62d470 | ||
|
|
01a90a1694 | ||
|
|
97bcc7afb6 | ||
|
|
9fa0ec440d |
22
.eslintrc.js
22
.eslintrc.js
@@ -1,13 +1,5 @@
|
||||
module.exports = {
|
||||
extends: [
|
||||
'plugin:eslint-comments/recommended',
|
||||
|
||||
'standard',
|
||||
'standard-jsx',
|
||||
'prettier',
|
||||
'prettier/standard',
|
||||
'prettier/react',
|
||||
],
|
||||
extends: ['plugin:eslint-comments/recommended', 'standard', 'standard-jsx', 'prettier'],
|
||||
globals: {
|
||||
__DEV__: true,
|
||||
$Dict: true,
|
||||
@@ -21,19 +13,13 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.js', '*-cli.js', '**/*cli*/**/*.js'],
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
|
||||
rules: {
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parser: 'babel-eslint',
|
||||
parserOptions: {
|
||||
ecmaFeatures: {
|
||||
legacyDecorators: true,
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// disabled because XAPI objects are using camel case
|
||||
camelcase: ['off'],
|
||||
@@ -48,9 +34,5 @@ module.exports = {
|
||||
'lines-between-class-members': 'off',
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
'no-var': 'error',
|
||||
'node/no-extraneous-import': 'error',
|
||||
'node/no-extraneous-require': 'error',
|
||||
'prefer-const': 'error',
|
||||
},
|
||||
}
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -4,20 +4,24 @@
|
||||
/lerna-debug.log
|
||||
/lerna-debug.log.*
|
||||
|
||||
/@vates/*/dist/
|
||||
/@vates/*/node_modules/
|
||||
/@xen-orchestra/*/dist/
|
||||
/@xen-orchestra/*/node_modules/
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/@xen-orchestra/proxy/src/app/mixins/index.mjs
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/examples/node_modules/
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
/packages/xo-server/.xo-server.*
|
||||
/packages/xo-server/src/api/index.js
|
||||
/packages/xo-server/src/xapi/mixins/index.js
|
||||
/packages/xo-server/src/xo-mixins/index.js
|
||||
/packages/xo-server/src/api/index.mjs
|
||||
/packages/xo-server/src/xapi/mixins/index.mjs
|
||||
/packages/xo-server/src/xo-mixins/index.mjs
|
||||
|
||||
/packages/xo-server-auth-ldap/ldap.cache.conf
|
||||
|
||||
|
||||
@@ -3,4 +3,9 @@ module.exports = {
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
// https://team.vates.fr/vates/pl/a1i8af1b9id7pgzm3jcg4toacy
|
||||
printWidth: 120,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- 12
|
||||
- 14
|
||||
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
|
||||
1
@vates/coalesce-calls/.npmignore
Symbolic link
1
@vates/coalesce-calls/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -20,9 +20,6 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"files": [
|
||||
"index.js"
|
||||
],
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
|
||||
1
@vates/compose/.npmignore
Symbolic link
1
@vates/compose/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
81
@vates/compose/README.md
Normal file
81
@vates/compose/README.md
Normal file
@@ -0,0 +1,81 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/compose
|
||||
|
||||
[](https://npmjs.org/package/@vates/compose)  [](https://bundlephobia.com/result?p=@vates/compose) [](https://npmjs.org/package/@vates/compose)
|
||||
|
||||
> Compose functions from left to right
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/compose):
|
||||
|
||||
```
|
||||
> npm install --save @vates/compose
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { compose } from '@vates/compose'
|
||||
|
||||
const add2 = x => x + 2
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = x => mul3(add2(x))
|
||||
const f = compose(add2, mul3)
|
||||
|
||||
console.log(f(5))
|
||||
// → 21
|
||||
```
|
||||
|
||||
> The call context (`this`) of the composed function is forwarded to all functions.
|
||||
|
||||
The first function is called with all arguments of the composed function:
|
||||
|
||||
```js
|
||||
const add = (x, y) => x + y
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = (x, y) => mul3(add(x, y))
|
||||
const f = compose(add, mul3)
|
||||
|
||||
console.log(f(4, 5))
|
||||
// → 27
|
||||
```
|
||||
|
||||
Functions may also be passed in an array:
|
||||
|
||||
```js
|
||||
const f = compose([add2, mul3])
|
||||
```
|
||||
|
||||
Options can be passed as first parameter:
|
||||
|
||||
```js
|
||||
const f = compose(
|
||||
{
|
||||
// compose async functions
|
||||
async: true,
|
||||
|
||||
// compose from right to left
|
||||
right: true,
|
||||
},
|
||||
[add2, mul3]
|
||||
)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
48
@vates/compose/USAGE.md
Normal file
48
@vates/compose/USAGE.md
Normal file
@@ -0,0 +1,48 @@
|
||||
```js
|
||||
import { compose } from '@vates/compose'
|
||||
|
||||
const add2 = x => x + 2
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = x => mul3(add2(x))
|
||||
const f = compose(add2, mul3)
|
||||
|
||||
console.log(f(5))
|
||||
// → 21
|
||||
```
|
||||
|
||||
> The call context (`this`) of the composed function is forwarded to all functions.
|
||||
|
||||
The first function is called with all arguments of the composed function:
|
||||
|
||||
```js
|
||||
const add = (x, y) => x + y
|
||||
const mul3 = x => x * 3
|
||||
|
||||
// const f = (x, y) => mul3(add(x, y))
|
||||
const f = compose(add, mul3)
|
||||
|
||||
console.log(f(4, 5))
|
||||
// → 27
|
||||
```
|
||||
|
||||
Functions may also be passed in an array:
|
||||
|
||||
```js
|
||||
const f = compose([add2, mul3])
|
||||
```
|
||||
|
||||
Options can be passed as first parameter:
|
||||
|
||||
```js
|
||||
const f = compose(
|
||||
{
|
||||
// compose async functions
|
||||
async: true,
|
||||
|
||||
// compose from right to left
|
||||
right: true,
|
||||
},
|
||||
[add2, mul3]
|
||||
)
|
||||
```
|
||||
46
@vates/compose/index.js
Normal file
46
@vates/compose/index.js
Normal file
@@ -0,0 +1,46 @@
|
||||
'use strict'
|
||||
|
||||
const defaultOpts = { async: false, right: false }
|
||||
|
||||
exports.compose = function compose(opts, fns) {
|
||||
if (Array.isArray(opts)) {
|
||||
fns = opts
|
||||
opts = defaultOpts
|
||||
} else if (typeof opts === 'object') {
|
||||
opts = Object.assign({}, defaultOpts, opts)
|
||||
if (!Array.isArray(fns)) {
|
||||
fns = Array.prototype.slice.call(arguments, 1)
|
||||
}
|
||||
} else {
|
||||
fns = Array.from(arguments)
|
||||
opts = defaultOpts
|
||||
}
|
||||
|
||||
const n = fns.length
|
||||
if (n === 0) {
|
||||
throw new TypeError('at least one function must be passed')
|
||||
}
|
||||
if (n === 1) {
|
||||
return fns[0]
|
||||
}
|
||||
|
||||
if (opts.right) {
|
||||
fns.reverse()
|
||||
}
|
||||
|
||||
return opts.async
|
||||
? async function () {
|
||||
let value = await fns[0].apply(this, arguments)
|
||||
for (let i = 1; i < n; ++i) {
|
||||
value = await fns[i].call(this, value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
: function () {
|
||||
let value = fns[0].apply(this, arguments)
|
||||
for (let i = 1; i < n; ++i) {
|
||||
value = fns[i].call(this, value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
}
|
||||
66
@vates/compose/index.spec.js
Normal file
66
@vates/compose/index.spec.js
Normal file
@@ -0,0 +1,66 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { compose } = require('./')
|
||||
|
||||
const add2 = x => x + 2
|
||||
const mul3 = x => x * 3
|
||||
|
||||
describe('compose()', () => {
|
||||
it('throws when no functions is passed', () => {
|
||||
expect(() => compose()).toThrow(TypeError)
|
||||
expect(() => compose([])).toThrow(TypeError)
|
||||
})
|
||||
|
||||
it('applies from left to right', () => {
|
||||
expect(compose(add2, mul3)(5)).toBe(21)
|
||||
})
|
||||
|
||||
it('accepts functions in an array', () => {
|
||||
expect(compose([add2, mul3])(5)).toBe(21)
|
||||
})
|
||||
|
||||
it('can apply from right to left', () => {
|
||||
expect(compose({ right: true }, add2, mul3)(5)).toBe(17)
|
||||
})
|
||||
|
||||
it('accepts options with functions in an array', () => {
|
||||
expect(compose({ right: true }, [add2, mul3])(5)).toBe(17)
|
||||
})
|
||||
|
||||
it('can compose async functions', async () => {
|
||||
expect(
|
||||
await compose(
|
||||
{ async: true },
|
||||
async x => x + 2,
|
||||
async x => x * 3
|
||||
)(5)
|
||||
).toBe(21)
|
||||
})
|
||||
|
||||
it('forwards all args to first function', () => {
|
||||
expect.assertions(1)
|
||||
|
||||
const expectedArgs = [Math.random(), Math.random()]
|
||||
compose(
|
||||
(...args) => {
|
||||
expect(args).toEqual(expectedArgs)
|
||||
},
|
||||
// add a second function to avoid the one function special case
|
||||
Function.prototype
|
||||
)(...expectedArgs)
|
||||
})
|
||||
|
||||
it('forwards context to all functions', () => {
|
||||
expect.assertions(2)
|
||||
|
||||
const expectedThis = {}
|
||||
compose(
|
||||
function () {
|
||||
expect(this).toBe(expectedThis)
|
||||
},
|
||||
function () {
|
||||
expect(this).toBe(expectedThis)
|
||||
}
|
||||
).call(expectedThis)
|
||||
})
|
||||
})
|
||||
24
@vates/compose/package.json
Normal file
24
@vates/compose/package.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/compose",
|
||||
"description": "Compose functions from left to right",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/compose",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/compose",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "2.0.0",
|
||||
"engines": {
|
||||
"node": ">=7.6"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
1
@vates/decorate-with/.npmignore
Symbolic link
1
@vates/decorate-with/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
1
@vates/disposable/.npmignore
Symbolic link
1
@vates/disposable/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
89
@vates/disposable/README.md
Normal file
89
@vates/disposable/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/disposable
|
||||
|
||||
[](https://npmjs.org/package/@vates/disposable)  [](https://bundlephobia.com/result?p=@vates/disposable) [](https://npmjs.org/package/@vates/disposable)
|
||||
|
||||
> Utilities for disposables
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/disposable):
|
||||
|
||||
```
|
||||
> npm install --save @vates/disposable
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
This library contains utilities for disposables as defined by the [`promise-toolbox` library](https://github.com/JsCommunity/promise-toolbox#resource-management).
|
||||
|
||||
### `deduped(fn, keyFn)`
|
||||
|
||||
Creates a new function that wraps `fn` and instead of creating a new disposables at each call, returns copies of the same one when `keyFn` returns the same keys.
|
||||
|
||||
Those copies contains the same value and can be disposed independently, the source disposable will only be disposed when all copies are disposed.
|
||||
|
||||
`keyFn` is called with the same context and arguments as the wrapping function and must returns an array of keys which will be used to identify which disposables should be grouped together.
|
||||
|
||||
```js
|
||||
import { deduped } from '@vates/disposable/deduped'
|
||||
|
||||
// the connection with the passed host will be established once at the first call, then, it will be shared with the next calls
|
||||
const getConnection = deduped(async function (host)) {
|
||||
const connection = new Connection(host)
|
||||
return new Disposabe(connection, () => connection.close())
|
||||
}, host => [host])
|
||||
```
|
||||
|
||||
### `debounceResource(disposable, delay)`
|
||||
|
||||
Creates a new disposable with the same value and with a delayed disposer.
|
||||
|
||||
On calling this disposer, the source disposable will be disposed when the `delay` is passed.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
// it will wait for 10 seconds before calling the disposer
|
||||
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
```
|
||||
|
||||
### `debounceResource.flushAll()`
|
||||
|
||||
Disposes all delayed disposers and cancels the delaying of the disposables that are in usage.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
const res1 = await debounceResource(res, 10e3)
|
||||
const res2 = await debounceResource(res, 10e3)
|
||||
const res3 = await debounceResource(res, 10e3)
|
||||
|
||||
rest1.dispose()
|
||||
rest2.dispose()
|
||||
// res3 is in usage
|
||||
|
||||
debounceResource.flushAll()
|
||||
// res1 and res2 are immediately disposed
|
||||
// res3 will be disposed immediately when its disposer will be called
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
56
@vates/disposable/USAGE.md
Normal file
56
@vates/disposable/USAGE.md
Normal file
@@ -0,0 +1,56 @@
|
||||
This library contains utilities for disposables as defined by the [`promise-toolbox` library](https://github.com/JsCommunity/promise-toolbox#resource-management).
|
||||
|
||||
### `deduped(fn, keyFn)`
|
||||
|
||||
Creates a new function that wraps `fn` and instead of creating a new disposables at each call, returns copies of the same one when `keyFn` returns the same keys.
|
||||
|
||||
Those copies contains the same value and can be disposed independently, the source disposable will only be disposed when all copies are disposed.
|
||||
|
||||
`keyFn` is called with the same context and arguments as the wrapping function and must returns an array of keys which will be used to identify which disposables should be grouped together.
|
||||
|
||||
```js
|
||||
import { deduped } from '@vates/disposable/deduped'
|
||||
|
||||
// the connection with the passed host will be established once at the first call, then, it will be shared with the next calls
|
||||
const getConnection = deduped(async function (host)) {
|
||||
const connection = new Connection(host)
|
||||
return new Disposabe(connection, () => connection.close())
|
||||
}, host => [host])
|
||||
```
|
||||
|
||||
### `debounceResource(disposable, delay)`
|
||||
|
||||
Creates a new disposable with the same value and with a delayed disposer.
|
||||
|
||||
On calling this disposer, the source disposable will be disposed when the `delay` is passed.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
// it will wait for 10 seconds before calling the disposer
|
||||
Disposable.use(debounceResource(getConnection(host), 10e3), connection => {})
|
||||
```
|
||||
|
||||
### `debounceResource.flushAll()`
|
||||
|
||||
Disposes all delayed disposers and cancels the delaying of the disposables that are in usage.
|
||||
|
||||
```js
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource'
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
|
||||
const res1 = await debounceResource(res, 10e3)
|
||||
const res2 = await debounceResource(res, 10e3)
|
||||
const res3 = await debounceResource(res, 10e3)
|
||||
|
||||
rest1.dispose()
|
||||
rest2.dispose()
|
||||
// res3 is in usage
|
||||
|
||||
debounceResource.flushAll()
|
||||
// res1 and res2 are immediately disposed
|
||||
// res3 will be disposed immediately when its disposer will be called
|
||||
```
|
||||
56
@vates/disposable/debounceResource.js
Normal file
56
@vates/disposable/debounceResource.js
Normal file
@@ -0,0 +1,56 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { warn } = createLogger('vates:disposable:debounceResource')
|
||||
|
||||
exports.createDebounceResource = () => {
|
||||
const flushers = new Set()
|
||||
async function debounceResource(pDisposable, delay = debounceResource.defaultDelay) {
|
||||
if (delay === 0) {
|
||||
return pDisposable
|
||||
}
|
||||
|
||||
const disposable = await pDisposable
|
||||
|
||||
let timeoutId
|
||||
const disposeWrapper = async () => {
|
||||
if (timeoutId !== undefined) {
|
||||
clearTimeout(timeoutId)
|
||||
timeoutId = undefined
|
||||
flushers.delete(flusher)
|
||||
|
||||
try {
|
||||
await disposable.dispose()
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const flusher = () => {
|
||||
const shouldDisposeNow = timeoutId !== undefined
|
||||
if (shouldDisposeNow) {
|
||||
return disposeWrapper()
|
||||
} else {
|
||||
// will dispose ASAP
|
||||
delay = 0
|
||||
}
|
||||
}
|
||||
flushers.add(flusher)
|
||||
|
||||
return {
|
||||
dispose() {
|
||||
timeoutId = setTimeout(disposeWrapper, delay)
|
||||
},
|
||||
value: disposable.value,
|
||||
}
|
||||
}
|
||||
debounceResource.flushAll = () => {
|
||||
// iterate on a sync way in order to not remove a flusher added on processing flushers
|
||||
const promise = asyncMap(flushers, flush => flush())
|
||||
flushers.clear()
|
||||
return promise
|
||||
}
|
||||
|
||||
return debounceResource
|
||||
}
|
||||
29
@vates/disposable/debounceResource.spec.js
Normal file
29
@vates/disposable/debounceResource.spec.js
Normal file
@@ -0,0 +1,29 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { createDebounceResource } = require('./debounceResource')
|
||||
|
||||
jest.useFakeTimers()
|
||||
|
||||
describe('debounceResource()', () => {
|
||||
it('calls the resource disposer after 10 seconds', async () => {
|
||||
const debounceResource = createDebounceResource()
|
||||
const delay = 10e3
|
||||
const dispose = jest.fn()
|
||||
|
||||
const resource = await debounceResource(
|
||||
Promise.resolve({
|
||||
value: '',
|
||||
dispose,
|
||||
}),
|
||||
delay
|
||||
)
|
||||
|
||||
resource.dispose()
|
||||
|
||||
expect(dispose).not.toBeCalled()
|
||||
|
||||
jest.advanceTimersByTime(delay)
|
||||
|
||||
expect(dispose).toBeCalled()
|
||||
})
|
||||
})
|
||||
52
@vates/disposable/deduped.js
Normal file
52
@vates/disposable/deduped.js
Normal file
@@ -0,0 +1,52 @@
|
||||
const ensureArray = require('ensure-array')
|
||||
const { MultiKeyMap } = require('@vates/multi-key-map')
|
||||
|
||||
function State(factory) {
|
||||
this.factory = factory
|
||||
this.users = 0
|
||||
}
|
||||
|
||||
const call = fn => fn()
|
||||
|
||||
exports.deduped = (factory, keyFn = (...args) => args) =>
|
||||
(function () {
|
||||
const states = new MultiKeyMap()
|
||||
return function () {
|
||||
const keys = ensureArray(keyFn.apply(this, arguments))
|
||||
let state = states.get(keys)
|
||||
if (state === undefined) {
|
||||
const result = factory.apply(this, arguments)
|
||||
|
||||
const createFactory = disposable => {
|
||||
const wrapper = {
|
||||
dispose() {
|
||||
if (--state.users === 0) {
|
||||
states.delete(keys)
|
||||
return disposable.dispose()
|
||||
}
|
||||
},
|
||||
value: disposable.value,
|
||||
}
|
||||
|
||||
return () => {
|
||||
return wrapper
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof result.then !== 'function') {
|
||||
state = new State(createFactory(result))
|
||||
} else {
|
||||
result.catch(() => {
|
||||
states.delete(keys)
|
||||
})
|
||||
const pFactory = result.then(createFactory)
|
||||
state = new State(() => pFactory.then(call))
|
||||
}
|
||||
|
||||
states.set(keys, state)
|
||||
}
|
||||
|
||||
++state.users
|
||||
return state.factory()
|
||||
}
|
||||
})()
|
||||
76
@vates/disposable/deduped.spec.js
Normal file
76
@vates/disposable/deduped.spec.js
Normal file
@@ -0,0 +1,76 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { deduped } = require('./deduped')
|
||||
|
||||
describe('deduped()', () => {
|
||||
it('calls the resource function only once', async () => {
|
||||
const value = {}
|
||||
const getResource = jest.fn(async () => ({
|
||||
value,
|
||||
dispose: Function.prototype,
|
||||
}))
|
||||
|
||||
const dedupedGetResource = deduped(getResource)
|
||||
|
||||
const { value: v1 } = await dedupedGetResource()
|
||||
const { value: v2 } = await dedupedGetResource()
|
||||
|
||||
expect(getResource).toHaveBeenCalledTimes(1)
|
||||
expect(v1).toBe(value)
|
||||
expect(v2).toBe(value)
|
||||
})
|
||||
|
||||
it('only disposes the source disposable when its all copies dispose', async () => {
|
||||
const dispose = jest.fn()
|
||||
const getResource = async () => ({
|
||||
value: '',
|
||||
dispose,
|
||||
})
|
||||
|
||||
const dedupedGetResource = deduped(getResource)
|
||||
|
||||
const { dispose: d1 } = await dedupedGetResource()
|
||||
const { dispose: d2 } = await dedupedGetResource()
|
||||
|
||||
d1()
|
||||
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
|
||||
d2()
|
||||
|
||||
expect(dispose).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('works with sync factory', () => {
|
||||
const value = {}
|
||||
const dispose = jest.fn()
|
||||
const dedupedGetResource = deduped(() => ({ value, dispose }))
|
||||
|
||||
const d1 = dedupedGetResource()
|
||||
expect(d1.value).toBe(value)
|
||||
|
||||
const d2 = dedupedGetResource()
|
||||
expect(d2.value).toBe(value)
|
||||
|
||||
d1.dispose()
|
||||
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
|
||||
d2.dispose()
|
||||
|
||||
expect(dispose).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
|
||||
it('no race condition on dispose before async acquisition', async () => {
|
||||
const dispose = jest.fn()
|
||||
const dedupedGetResource = deduped(async () => ({ value: 42, dispose }))
|
||||
|
||||
const d1 = await dedupedGetResource()
|
||||
|
||||
dedupedGetResource()
|
||||
|
||||
d1.dispose()
|
||||
|
||||
expect(dispose).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
30
@vates/disposable/package.json
Normal file
30
@vates/disposable/package.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/disposable",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/disposable",
|
||||
"description": "Utilities for disposables",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/disposable",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"ensure-array": "^1.0.0"
|
||||
}
|
||||
}
|
||||
1
@vates/multi-key-map/.npmignore
Symbolic link
1
@vates/multi-key-map/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
53
@vates/multi-key-map/README.md
Normal file
53
@vates/multi-key-map/README.md
Normal file
@@ -0,0 +1,53 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/multi-key-map
|
||||
|
||||
[](https://npmjs.org/package/@vates/multi-key-map)  [](https://bundlephobia.com/result?p=@vates/multi-key-map) [](https://npmjs.org/package/@vates/multi-key-map)
|
||||
|
||||
> Create map with values affected to multiple keys
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
|
||||
|
||||
```
|
||||
> npm install --save @vates/multi-key-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
20
@vates/multi-key-map/USAGE.md
Normal file
20
@vates/multi-key-map/USAGE.md
Normal file
@@ -0,0 +1,20 @@
|
||||
```js
|
||||
import { MultiKeyMap } from '@vates/multi-key-map'
|
||||
|
||||
const map = new MultiKeyMap()
|
||||
|
||||
const OBJ = {}
|
||||
map.set([], 0)
|
||||
map.set(['foo'], 1)
|
||||
map.set(['foo', 'bar'], 2)
|
||||
map.set(['bar', 'foo'], 3)
|
||||
map.set([OBJ], 4)
|
||||
map.set([{}], 5)
|
||||
|
||||
map.get([]) // 0
|
||||
map.get(['foo']) // 1
|
||||
map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
```
|
||||
@@ -67,7 +67,7 @@ function set(node, i, keys, value) {
|
||||
return node
|
||||
}
|
||||
|
||||
export default class MultiKeyMap {
|
||||
exports.MultiKeyMap = class MultiKeyMap {
|
||||
constructor() {
|
||||
// each node is either a value or a Node if it contains children
|
||||
this._root = undefined
|
||||
@@ -1,6 +1,6 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import MultiKeyMap from './_MultiKeyMap'
|
||||
const { MultiKeyMap } = require('./')
|
||||
|
||||
describe('MultiKeyMap', () => {
|
||||
it('works', () => {
|
||||
28
@vates/multi-key-map/package.json
Normal file
28
@vates/multi-key-map/package.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/multi-key-map",
|
||||
"description": "Create map with values affected to multiple keys",
|
||||
"keywords": [
|
||||
"cache",
|
||||
"map"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/multi-key-map",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/multi-key-map",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
1
@vates/parse-duration/.npmignore
Symbolic link
1
@vates/parse-duration/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -44,4 +44,4 @@ You may:
|
||||
|
||||
## License
|
||||
|
||||
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
|
||||
@@ -6,7 +6,7 @@ exports.parseDuration = value => {
|
||||
}
|
||||
const duration = ms(value)
|
||||
if (duration === undefined) {
|
||||
throw new TypeError(`not a valid duration: ${duration}`)
|
||||
throw new TypeError(`not a valid duration: ${value}`)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"version": "0.1.0",
|
||||
"license": "ISC",
|
||||
"version": "0.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
1
@vates/read-chunk/.npmignore
Symbolic link
1
@vates/read-chunk/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -1,27 +1,30 @@
|
||||
exports.readChunk = (stream, size) =>
|
||||
new Promise((resolve, reject) => {
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
const readChunk = (stream, size) =>
|
||||
size === 0
|
||||
? Promise.resolve(Buffer.alloc(0))
|
||||
: new Promise((resolve, reject) => {
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
43
@vates/read-chunk/index.spec.js
Normal file
43
@vates/read-chunk/index.spec.js
Normal file
@@ -0,0 +1,43 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('returns null if stream is empty', async () => {
|
||||
expect(await readChunk(makeStream([]))).toBe(null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']))).toEqual(Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 2)).toEqual(Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 4)).toEqual(Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 10)).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('returns an empty buffer if the specified size is 0', async () => {
|
||||
expect(await readChunk(makeStream(['foo', 'bar']), 0)).toEqual(Buffer.alloc(0))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
expect(await readChunk(makeStream.obj(chunks))).toBe(chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
1
@vates/toggle-scripts/.npmignore
Symbolic link
1
@vates/toggle-scripts/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
59
@vates/toggle-scripts/README.md
Normal file
59
@vates/toggle-scripts/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/toggle-scripts
|
||||
|
||||
[](https://npmjs.org/package/@vates/toggle-scripts)  [](https://bundlephobia.com/result?p=@vates/toggle-scripts) [](https://npmjs.org/package/@vates/toggle-scripts)
|
||||
|
||||
> Easily enable/disable scripts in package.json
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/toggle-scripts):
|
||||
|
||||
```
|
||||
> npm install --save @vates/toggle-scripts
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
Usage: toggle-scripts options...
|
||||
|
||||
Easily enable/disable scripts in package.json
|
||||
|
||||
Options
|
||||
+<script> Enable the script <script>, ie remove the prefix `_`
|
||||
-<script> Disable the script <script>, ie prefix it with `_`
|
||||
|
||||
Examples
|
||||
toggle-scripts +postinstall +preuninstall
|
||||
toggle-scripts -postinstall -preuninstall
|
||||
```
|
||||
|
||||
For example, if you want `postinstall` hook only in dev:
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"scripts": {
|
||||
"postinstall": "<some dev only command>",
|
||||
"prepublishOnly": "toggle-scripts -postinstall",
|
||||
"postpublish": "toggle-scripts +postinstall"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
26
@vates/toggle-scripts/USAGE.md
Normal file
26
@vates/toggle-scripts/USAGE.md
Normal file
@@ -0,0 +1,26 @@
|
||||
```
|
||||
Usage: toggle-scripts options...
|
||||
|
||||
Easily enable/disable scripts in package.json
|
||||
|
||||
Options
|
||||
+<script> Enable the script <script>, ie remove the prefix `_`
|
||||
-<script> Disable the script <script>, ie prefix it with `_`
|
||||
|
||||
Examples
|
||||
toggle-scripts +postinstall +preuninstall
|
||||
toggle-scripts -postinstall -preuninstall
|
||||
```
|
||||
|
||||
For example, if you want `postinstall` hook only in dev:
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"scripts": {
|
||||
"postinstall": "<some dev only command>",
|
||||
"prepublishOnly": "toggle-scripts -postinstall",
|
||||
"postpublish": "toggle-scripts +postinstall"
|
||||
}
|
||||
}
|
||||
```
|
||||
60
@vates/toggle-scripts/index.js
Executable file
60
@vates/toggle-scripts/index.js
Executable file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const fs = require('fs')
|
||||
|
||||
const mapKeys = (object, iteratee) => {
|
||||
const result = {}
|
||||
for (const key of Object.keys(object)) {
|
||||
result[iteratee(key, object)] = object[key]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
const args = process.argv.slice(2)
|
||||
if (args.length === 0) {
|
||||
const { description, name, version } = require('./package.json')
|
||||
const bin = 'toggle-scripts'
|
||||
process.stdout.write(`Usage: ${bin} options...
|
||||
|
||||
${description}
|
||||
|
||||
Options
|
||||
+<script> Enable the script <script>, ie remove the prefix \`_\`
|
||||
-<script> Disable the script <script>, ie prefix it with \`_\`
|
||||
|
||||
Examples
|
||||
${bin} +postinstall +preuninstall
|
||||
${bin} -postinstall -preuninstall
|
||||
|
||||
${name} v${version}
|
||||
`)
|
||||
process.exit()
|
||||
}
|
||||
|
||||
const plan = { __proto__: null }
|
||||
for (const arg of args) {
|
||||
const action = arg[0]
|
||||
const script = arg.slice(1)
|
||||
|
||||
if (action === '+') {
|
||||
plan['_' + script] = script
|
||||
} else if (action === '-') {
|
||||
plan[script] = '_' + script
|
||||
} else {
|
||||
throw new Error('invalid param: ' + arg)
|
||||
}
|
||||
}
|
||||
|
||||
const pkgPath = process.env.npm_package_json || './package.json'
|
||||
const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'))
|
||||
pkg.scripts = mapKeys(pkg.scripts, (name, scripts) => {
|
||||
const newName = plan[name]
|
||||
if (newName === undefined) {
|
||||
return name
|
||||
}
|
||||
if (newName in scripts) {
|
||||
throw new Error('script already defined: ' + name)
|
||||
}
|
||||
return newName
|
||||
})
|
||||
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + '\n')
|
||||
38
@vates/toggle-scripts/package.json
Normal file
38
@vates/toggle-scripts/package.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/toggle-scripts",
|
||||
"description": "Easily enable/disable scripts in package.json",
|
||||
"keywords": [
|
||||
"dev",
|
||||
"disable",
|
||||
"enable",
|
||||
"lifecycle",
|
||||
"npm",
|
||||
"package.json",
|
||||
"pinst",
|
||||
"postinstall",
|
||||
"script",
|
||||
"scripts",
|
||||
"toggle"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/toggle-scripts",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/toggle-scripts",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"bin": "./index.js",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
1
@xen-orchestra/async-map/.npmignore
Symbolic link
1
@xen-orchestra/async-map/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
[](https://npmjs.org/package/@xen-orchestra/async-map)  [](https://bundlephobia.com/result?p=@xen-orchestra/async-map) [](https://npmjs.org/package/@xen-orchestra/async-map)
|
||||
|
||||
> Similar to Promise.all + lodash.map but wait for all promises to be settled
|
||||
> Promise.all + map for all iterables
|
||||
|
||||
## Install
|
||||
|
||||
@@ -16,10 +16,61 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/async
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
### `asyncMap(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
const array = await asyncMap(collection, iteratee)
|
||||
Similar to `Promise.all + Array#map` for all iterables: calls `iteratee` for each item in `iterable`, and returns a promise of an array containing the awaited result of each calls to `iteratee`.
|
||||
|
||||
It rejects as soon as te first call to `iteratee` rejects.
|
||||
|
||||
```js
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMap(iterable, iteratee, thisArg)
|
||||
```
|
||||
|
||||
It can be used with any iterables (`Array`, `Map`, etc.):
|
||||
|
||||
```js
|
||||
const map = new Map()
|
||||
map.set('foo', 42)
|
||||
map.set('bar', 3.14)
|
||||
|
||||
const array = await asyncMap(map, async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the map can be accessed via `this`
|
||||
})
|
||||
```
|
||||
|
||||
#### Use with plain objects
|
||||
|
||||
Plain objects are not iterable, but you can use `Object.keys`, `Object.values` or `Object.entries` to help:
|
||||
|
||||
```js
|
||||
const object = {
|
||||
foo: 42,
|
||||
bar: 3.14,
|
||||
}
|
||||
|
||||
const array = await asyncMap(
|
||||
Object.entries(object),
|
||||
async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the object can be accessed via `this` because it's been passed as third arg
|
||||
},
|
||||
object
|
||||
)
|
||||
```
|
||||
|
||||
### `asyncMapSettled(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
Similar to `asyncMap` but waits for all promises to settle before rejecting.
|
||||
|
||||
```js
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMapSettled(iterable, iteratee, thisArg)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -1,5 +1,56 @@
|
||||
```js
|
||||
import asyncMap from '@xen-orchestra/async-map'
|
||||
### `asyncMap(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
const array = await asyncMap(collection, iteratee)
|
||||
Similar to `Promise.all + Array#map` for all iterables: calls `iteratee` for each item in `iterable`, and returns a promise of an array containing the awaited result of each calls to `iteratee`.
|
||||
|
||||
It rejects as soon as te first call to `iteratee` rejects.
|
||||
|
||||
```js
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMap(iterable, iteratee, thisArg)
|
||||
```
|
||||
|
||||
It can be used with any iterables (`Array`, `Map`, etc.):
|
||||
|
||||
```js
|
||||
const map = new Map()
|
||||
map.set('foo', 42)
|
||||
map.set('bar', 3.14)
|
||||
|
||||
const array = await asyncMap(map, async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the map can be accessed via `this`
|
||||
})
|
||||
```
|
||||
|
||||
#### Use with plain objects
|
||||
|
||||
Plain objects are not iterable, but you can use `Object.keys`, `Object.values` or `Object.entries` to help:
|
||||
|
||||
```js
|
||||
const object = {
|
||||
foo: 42,
|
||||
bar: 3.14,
|
||||
}
|
||||
|
||||
const array = await asyncMap(
|
||||
Object.entries(object),
|
||||
async function ([key, value]) {
|
||||
// TODO: do async computation
|
||||
//
|
||||
// the object can be accessed via `this` because it's been passed as third arg
|
||||
},
|
||||
object
|
||||
)
|
||||
```
|
||||
|
||||
### `asyncMapSettled(iterable, iteratee, thisArg = iterable)`
|
||||
|
||||
Similar to `asyncMap` but waits for all promises to settle before rejecting.
|
||||
|
||||
```js
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
|
||||
const array = await asyncMapSettled(iterable, iteratee, thisArg)
|
||||
```
|
||||
|
||||
71
@xen-orchestra/async-map/index.js
Normal file
71
@xen-orchestra/async-map/index.js
Normal file
@@ -0,0 +1,71 @@
|
||||
const wrapCall = (fn, arg, thisArg) => {
|
||||
try {
|
||||
return Promise.resolve(fn.call(thisArg, arg))
|
||||
} catch (error) {
|
||||
return Promise.reject(error)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Similar to Promise.all + Array#map but supports all iterables and does not trigger ESLint array-callback-return
|
||||
*
|
||||
* WARNING: Does not handle plain objects
|
||||
*
|
||||
* @template Item,This
|
||||
* @param {Iterable<Item>} iterable
|
||||
* @param {(this: This, item: Item) => (Item | PromiseLike<Item>)} mapFn
|
||||
* @param {This} [thisArg]
|
||||
* @returns {Promise<Item[]>}
|
||||
*/
|
||||
exports.asyncMap = function asyncMap(iterable, mapFn, thisArg = iterable) {
|
||||
return Promise.all(Array.from(iterable, mapFn, thisArg))
|
||||
}
|
||||
|
||||
/**
|
||||
* Like `asyncMap` but wait for all promises to settle before rejecting
|
||||
*
|
||||
* @template Item,This
|
||||
* @param {Iterable<Item>} iterable
|
||||
* @param {(this: This, item: Item) => (Item | PromiseLike<Item>)} mapFn
|
||||
* @param {This} [thisArg]
|
||||
* @returns {Promise<Item[]>}
|
||||
*/
|
||||
exports.asyncMapSettled = function asyncMapSettled(iterable, mapFn, thisArg = iterable) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const onError = e => {
|
||||
if (result !== undefined) {
|
||||
error = e
|
||||
result = undefined
|
||||
}
|
||||
if (--n === 0) {
|
||||
reject(error)
|
||||
}
|
||||
}
|
||||
const onValue = (i, value) => {
|
||||
const hasError = result === undefined
|
||||
if (!hasError) {
|
||||
result[i] = value
|
||||
}
|
||||
if (--n === 0) {
|
||||
if (hasError) {
|
||||
reject(error)
|
||||
} else {
|
||||
resolve(result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let n = 0
|
||||
for (const item of iterable) {
|
||||
const i = n++
|
||||
wrapCall(mapFn, item, thisArg).then(value => onValue(i, value), onError)
|
||||
}
|
||||
|
||||
if (n === 0) {
|
||||
return resolve([])
|
||||
}
|
||||
|
||||
let error
|
||||
let result = new Array(n)
|
||||
})
|
||||
}
|
||||
71
@xen-orchestra/async-map/index.spec.js
Normal file
71
@xen-orchestra/async-map/index.spec.js
Normal file
@@ -0,0 +1,71 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
const { asyncMapSettled } = require('./')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
describe('asyncMapSettled', () => {
|
||||
it('works', async () => {
|
||||
const values = [Math.random(), Math.random()]
|
||||
const spy = jest.fn(async v => v * 2)
|
||||
const iterable = new Set(values)
|
||||
|
||||
// returns an array containing the result of each calls
|
||||
expect(await asyncMapSettled(iterable, spy)).toEqual(values.map(value => value * 2))
|
||||
|
||||
for (let i = 0, n = values.length; i < n; ++i) {
|
||||
// each call receive the current item as sole argument
|
||||
expect(spy.mock.calls[i]).toEqual([values[i]])
|
||||
|
||||
// each call as this bind to the iterable
|
||||
expect(spy.mock.instances[i]).toBe(iterable)
|
||||
}
|
||||
})
|
||||
|
||||
it('can use a specified thisArg', () => {
|
||||
const thisArg = {}
|
||||
const spy = jest.fn()
|
||||
asyncMapSettled(['foo'], spy, thisArg)
|
||||
expect(spy.mock.instances[0]).toBe(thisArg)
|
||||
})
|
||||
|
||||
it('rejects only when all calls as resolved', async () => {
|
||||
const defers = []
|
||||
const promise = asyncMapSettled([1, 2], () => {
|
||||
let resolve, reject
|
||||
// eslint-disable-next-line promise/param-names
|
||||
const promise = new Promise((_resolve, _reject) => {
|
||||
resolve = _resolve
|
||||
reject = _reject
|
||||
})
|
||||
defers.push({ promise, resolve, reject })
|
||||
return promise
|
||||
})
|
||||
|
||||
let hasSettled = false
|
||||
promise.catch(noop).then(() => {
|
||||
hasSettled = true
|
||||
})
|
||||
|
||||
const error = new Error()
|
||||
defers[0].reject(error)
|
||||
|
||||
// wait for all microtasks to settle
|
||||
await new Promise(resolve => setImmediate(resolve))
|
||||
|
||||
expect(hasSettled).toBe(false)
|
||||
|
||||
defers[1].resolve()
|
||||
|
||||
// wait for all microtasks to settle
|
||||
await new Promise(resolve => setImmediate(resolve))
|
||||
|
||||
expect(hasSettled).toBe(true)
|
||||
await expect(promise).rejects.toBe(error)
|
||||
})
|
||||
|
||||
it('issues when latest promise rejects', async () => {
|
||||
const error = new Error()
|
||||
await expect(asyncMapSettled([1], () => Promise.reject(error))).rejects.toBe(error)
|
||||
})
|
||||
})
|
||||
@@ -9,14 +9,18 @@
|
||||
// (V1, K) => MaybePromise<V2>
|
||||
// ): Promise<V2[]>
|
||||
|
||||
import map from 'lodash/map'
|
||||
const map = require('lodash/map')
|
||||
|
||||
// Similar to map() + Promise.all() but wait for all promises to
|
||||
// settle before rejecting (with the first error)
|
||||
const asyncMap = (collection, iteratee) => {
|
||||
/**
|
||||
* Similar to map() + Promise.all() but wait for all promises to settle before
|
||||
* rejecting (with the first error)
|
||||
*
|
||||
* @deprecated Don't support iterables, please use new implementations
|
||||
*/
|
||||
module.exports = function asyncMapLegacy(collection, iteratee) {
|
||||
let then
|
||||
if (collection != null && typeof (then = collection.then) === 'function') {
|
||||
return then.call(collection, collection => asyncMap(collection, iteratee))
|
||||
return then.call(collection, collection => asyncMapLegacy(collection, iteratee))
|
||||
}
|
||||
|
||||
let errorContainer
|
||||
@@ -39,5 +43,3 @@ const asyncMap = (collection, iteratee) => {
|
||||
return values
|
||||
})
|
||||
}
|
||||
|
||||
export { asyncMap as default }
|
||||
@@ -1,10 +1,17 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/async-map",
|
||||
"version": "0.0.0",
|
||||
"version": "0.1.2",
|
||||
"license": "ISC",
|
||||
"description": "Similar to Promise.all + lodash.map but wait for all promises to be settled",
|
||||
"keywords": [],
|
||||
"description": "Promise.all + map for all iterables",
|
||||
"keywords": [
|
||||
"array",
|
||||
"async",
|
||||
"iterable",
|
||||
"map",
|
||||
"settled",
|
||||
"typescript"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/async-map",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
@@ -17,36 +24,13 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"bin": {},
|
||||
"files": [
|
||||
"dist/"
|
||||
],
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"@babel/preset-flow": "^7.0.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1 @@
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(
|
||||
require('./package.json')
|
||||
)
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
|
||||
1
@xen-orchestra/audit-core/.eslintrc.js
Symbolic link
1
@xen-orchestra/audit-core/.eslintrc.js
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,24 +0,0 @@
|
||||
/benchmark/
|
||||
/benchmarks/
|
||||
*.bench.js
|
||||
*.bench.js.map
|
||||
|
||||
/examples/
|
||||
example.js
|
||||
example.js.map
|
||||
*.example.js
|
||||
*.example.js.map
|
||||
|
||||
/fixture/
|
||||
/fixtures/
|
||||
*.fixture.js
|
||||
*.fixture.js.map
|
||||
*.fixtures.js
|
||||
*.fixtures.js.map
|
||||
|
||||
/test/
|
||||
/tests/
|
||||
*.spec.js
|
||||
*.spec.js.map
|
||||
|
||||
__snapshots__/
|
||||
1
@xen-orchestra/audit-core/.npmignore
Symbolic link
1
@xen-orchestra/audit-core/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -26,14 +26,14 @@
|
||||
"@babel/plugin-proposal-decorators": "^7.8.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.0",
|
||||
"@babel/preset-env": "^7.7.4",
|
||||
"cross": "^1.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/decorate-with": "^0.0.1",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"core-js": "^3.6.4",
|
||||
"golike-defer": "^0.4.1",
|
||||
"lodash": "^4.17.15",
|
||||
"golike-defer": "^0.5.1",
|
||||
"object-hash": "^2.0.1"
|
||||
},
|
||||
"private": false,
|
||||
|
||||
@@ -2,9 +2,10 @@
|
||||
import 'core-js/features/symbol/async-iterator'
|
||||
|
||||
import assert from 'assert'
|
||||
import createLogger from '@xen-orchestra/log'
|
||||
import defer from 'golike-defer'
|
||||
import hash from 'object-hash'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
|
||||
const log = createLogger('xo:audit-core')
|
||||
|
||||
@@ -65,7 +66,7 @@ export class AuditCore {
|
||||
this._storage = storage
|
||||
}
|
||||
|
||||
@defer
|
||||
@decorateWith(defer)
|
||||
async add($defer, subject, event, data) {
|
||||
const time = Date.now()
|
||||
$defer(await this._storage.acquireLock())
|
||||
@@ -119,9 +120,7 @@ export class AuditCore {
|
||||
if (record === undefined) {
|
||||
throw new MissingRecordError(newest, nValid)
|
||||
}
|
||||
if (
|
||||
newest !== createHash(record, newest.slice(1, newest.indexOf('$', 1)))
|
||||
) {
|
||||
if (newest !== createHash(record, newest.slice(1, newest.indexOf('$', 1)))) {
|
||||
throw new AlteredRecordError(newest, nValid, record)
|
||||
}
|
||||
newest = record.previousId
|
||||
@@ -152,7 +151,7 @@ export class AuditCore {
|
||||
}
|
||||
}
|
||||
|
||||
@defer
|
||||
@decorateWith(defer)
|
||||
async deleteRangeAndRewrite($defer, newest, oldest) {
|
||||
assert.notStrictEqual(newest, undefined)
|
||||
assert.notStrictEqual(oldest, undefined)
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import {
|
||||
AlteredRecordError,
|
||||
AuditCore,
|
||||
MissingRecordError,
|
||||
NULL_ID,
|
||||
Storage,
|
||||
} from '.'
|
||||
import { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } from '.'
|
||||
|
||||
const asyncIteratorToArray = async asyncIterator => {
|
||||
const array = []
|
||||
@@ -88,16 +82,13 @@ describe('auditCore', () => {
|
||||
it('detects that a record is missing', async () => {
|
||||
const [newestRecord, deletedRecord] = await storeAuditRecords()
|
||||
|
||||
const nValidRecords = await auditCore.checkIntegrity(
|
||||
NULL_ID,
|
||||
newestRecord.id
|
||||
)
|
||||
const nValidRecords = await auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
expect(nValidRecords).toBe(DATA.length)
|
||||
|
||||
await db.del(deletedRecord.id)
|
||||
await expect(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
).rejects.toEqual(new MissingRecordError(deletedRecord.id, 1))
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
new MissingRecordError(deletedRecord.id, 1)
|
||||
)
|
||||
})
|
||||
|
||||
it('detects that a record has been altered', async () => {
|
||||
@@ -106,9 +97,7 @@ describe('auditCore', () => {
|
||||
alteredRecord.event = ''
|
||||
await db.put(alteredRecord)
|
||||
|
||||
await expect(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
).rejects.toEqual(
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
new AlteredRecordError(alteredRecord.id, 1, alteredRecord)
|
||||
)
|
||||
})
|
||||
|
||||
1
@xen-orchestra/babel-config/.npmignore
Symbolic link
1
@xen-orchestra/babel-config/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -14,58 +14,39 @@ const configs = {
|
||||
'@babel/plugin-proposal-pipeline-operator': {
|
||||
proposal: 'minimal',
|
||||
},
|
||||
'@babel/preset-env'(pkg) {
|
||||
return {
|
||||
debug: !__TEST__,
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
|
||||
// disabled until https://github.com/babel/babel/issues/8323 is resolved
|
||||
// loose: true,
|
||||
// disabled until https://github.com/babel/babel/issues/8323 is resolved
|
||||
// loose: true,
|
||||
|
||||
shippedProposals: true,
|
||||
targets: (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
}
|
||||
return { browsers: pkg.browserslist, node }
|
||||
})(),
|
||||
}
|
||||
shippedProposals: true,
|
||||
},
|
||||
}
|
||||
|
||||
const getConfig = (key, ...args) => {
|
||||
const config = configs[key]
|
||||
return config === undefined
|
||||
? {}
|
||||
: typeof config === 'function'
|
||||
? config(...args)
|
||||
: config
|
||||
return config === undefined ? {} : typeof config === 'function' ? config(...args) : config
|
||||
}
|
||||
|
||||
// some plugins must be used in a specific order
|
||||
const pluginsOrder = [
|
||||
'@babel/plugin-proposal-decorators',
|
||||
'@babel/plugin-proposal-class-properties',
|
||||
]
|
||||
const pluginsOrder = ['@babel/plugin-proposal-decorators', '@babel/plugin-proposal-class-properties']
|
||||
|
||||
module.exports = function (pkg, plugins, presets) {
|
||||
plugins === undefined && (plugins = {})
|
||||
presets === undefined && (presets = {})
|
||||
module.exports = function (pkg, configs = {}) {
|
||||
const plugins = {}
|
||||
const presets = {}
|
||||
|
||||
Object.keys(pkg.devDependencies || {}).forEach(name => {
|
||||
if (!(name in presets) && PLUGINS_RE.test(name)) {
|
||||
plugins[name] = getConfig(name, pkg)
|
||||
plugins[name] = { ...getConfig(name, pkg), ...configs[name] }
|
||||
} else if (!(name in presets) && PRESETS_RE.test(name)) {
|
||||
presets[name] = getConfig(name, pkg)
|
||||
presets[name] = { ...getConfig(name, pkg), ...configs[name] }
|
||||
}
|
||||
})
|
||||
|
||||
return {
|
||||
comments: !__PROD__,
|
||||
ignore: __TEST__ ? undefined : [/\.spec\.js$/],
|
||||
ignore: __PROD__ ? [/\.spec\.js$/] : undefined,
|
||||
plugins: Object.keys(plugins)
|
||||
.map(plugin => [plugin, plugins[plugin]])
|
||||
.sort(([a], [b]) => {
|
||||
@@ -74,5 +55,15 @@ module.exports = function (pkg, plugins, presets) {
|
||||
return oA !== -1 && oB !== -1 ? oA - oB : a < b ? -1 : 1
|
||||
}),
|
||||
presets: Object.keys(presets).map(preset => [preset, presets[preset]]),
|
||||
targets: (() => {
|
||||
let node = (pkg.engines || {}).node
|
||||
if (node !== undefined) {
|
||||
const trimChars = '^=>~'
|
||||
while (trimChars.includes(node[0])) {
|
||||
node = node.slice(1)
|
||||
}
|
||||
}
|
||||
return { browsers: pkg.browserslist, node }
|
||||
})(),
|
||||
}
|
||||
}
|
||||
|
||||
1
@xen-orchestra/backups-cli/.npmignore
Symbolic link
1
@xen-orchestra/backups-cli/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
@@ -12,6 +12,26 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backu
|
||||
> npm install --global @xen-orchestra/backups-cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
> xo-backups --help
|
||||
Usage:
|
||||
|
||||
xo-backups clean-vms [--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
Detects and repair issues with VM backups.
|
||||
|
||||
Options:
|
||||
-m, --merge Merge (or continue merging) VHD files that are unused
|
||||
-r, --remove Remove unused, incomplete, orphan, or corrupted files
|
||||
|
||||
|
||||
xo-backups create-symlink-index xo-vm-backups <field path>
|
||||
|
||||
xo-backups info xo-vm-backups/*
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
```
|
||||
> xo-backups --help
|
||||
Usage:
|
||||
|
||||
xo-backups clean-vms [--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
Detects and repair issues with VM backups.
|
||||
|
||||
Options:
|
||||
-m, --merge Merge (or continue merging) VHD files that are unused
|
||||
-r, --remove Remove unused, incomplete, orphan, or corrupted files
|
||||
|
||||
|
||||
xo-backups create-symlink-index xo-vm-backups <field path>
|
||||
|
||||
xo-backups info xo-vm-backups/*
|
||||
```
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
const curryRight = require('lodash/curryRight')
|
||||
|
||||
module.exports = curryRight((iterable, fn) =>
|
||||
Promise.all(
|
||||
Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn)
|
||||
)
|
||||
)
|
||||
@@ -1,340 +1,33 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
// assigned when options are parsed by the main function
|
||||
let force
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
const assert = require('assert')
|
||||
const flatten = require('lodash/flatten')
|
||||
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
|
||||
const getopts = require('getopts')
|
||||
const limitConcurrency = require('limit-concurrency-decorator').default
|
||||
const lockfile = require('proper-lockfile')
|
||||
const pipe = require('promise-toolbox/pipe')
|
||||
const { default: Vhd, mergeVhd } = require('vhd-lib')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
|
||||
const { isValidXva } = require('@xen-orchestra/backups/isValidXva')
|
||||
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
|
||||
const { resolve } = require('path')
|
||||
|
||||
const asyncMap = require('../_asyncMap')
|
||||
const fs = require('../_fs')
|
||||
|
||||
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
let child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
console.warn('Unused parents of VHD', child)
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
console.warn(' ', parent)
|
||||
})
|
||||
force && console.warn(' merging…')
|
||||
console.warn('')
|
||||
if (force) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
if (children.length !== 1) {
|
||||
console.warn('TODO: implement merging multiple children')
|
||||
children.length = 1
|
||||
child = children[0]
|
||||
}
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
console.log('merging %s: %s/%s', child, done, total)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
await mergeVhd(
|
||||
handler,
|
||||
parent,
|
||||
handler,
|
||||
child,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children),
|
||||
{
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
clearInterval(handle)
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
force && fs.rename(parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
console.warn('Unused VHD', child)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(child)
|
||||
}),
|
||||
])
|
||||
})
|
||||
|
||||
const listVhds = pipe([
|
||||
vmDir => vmDir + '/vdis',
|
||||
fs.readdir2,
|
||||
asyncMap(fs.readdir2),
|
||||
flatten,
|
||||
asyncMap(fs.readdir2),
|
||||
flatten,
|
||||
_ => _.filter(_ => _.endsWith('.vhd')),
|
||||
])
|
||||
|
||||
async function handleVm(vmDir) {
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(await listVhds(vmDir), async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
|
||||
const parent = resolve(dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error(
|
||||
'this script does not support multiple VHD children'
|
||||
)
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Error while checking VHD', path)
|
||||
console.warn(' ', error)
|
||||
if (error != null && error.code === 'ERR_ASSERTION') {
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(path))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhd => {
|
||||
const parent = vhdParents[vhd]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhd]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhd)
|
||||
|
||||
console.warn('Error while checking VHD', vhd)
|
||||
console.warn(' missing parent', parent)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && deletions.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const [jsons, xvas, xvaSums] = await fs
|
||||
.readdir2(vmDir)
|
||||
.then(entries => [
|
||||
entries.filter(_ => _.endsWith('.json')),
|
||||
new Set(entries.filter(_ => _.endsWith('.xva'))),
|
||||
entries.filter(_ => _.endsWith('.xva.cheksum')),
|
||||
])
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await isValidXva(path))) {
|
||||
console.warn('Potential broken XVA', path)
|
||||
console.warn('')
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await fs.readFile(json))
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve(vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
console.warn(' missing file', linkedXva)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve(vmDir, vhds[key]))
|
||||
})()
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
} else {
|
||||
console.warn('Error while checking backup', json)
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
console.warn(
|
||||
' %i/%i missing VHDs',
|
||||
missingVhds.length,
|
||||
linkedVhds.length
|
||||
)
|
||||
missingVhds.forEach(vhd => {
|
||||
console.warn(' ', vhd)
|
||||
})
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && (await handler.unlink(json))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
console.warn('Unused VHD', vhd)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
force && unusedVhdsDeletion.push(handler.unlink(vhd))
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
Object.keys(vhdChainsToMerge).forEach(key => {
|
||||
const chain = vhdChainsToMerge[key]
|
||||
if (chain !== undefined) {
|
||||
unusedVhdsDeletion.push(mergeVhdChain(chain))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
unusedVhdsDeletion,
|
||||
asyncMap(unusedXvas, path => {
|
||||
console.warn('Unused XVA', path)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(path)
|
||||
}),
|
||||
asyncMap(xvaSums, path => {
|
||||
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
|
||||
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
|
||||
console.warn('Unused XVA checksum', path)
|
||||
force && console.warn(' deleting…')
|
||||
console.warn('')
|
||||
return force && handler.unlink(path)
|
||||
}
|
||||
}),
|
||||
])
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
|
||||
|
||||
module.exports = async function main(args) {
|
||||
const opts = getopts(args, {
|
||||
const { _, remove, merge } = getopts(args, {
|
||||
alias: {
|
||||
force: 'f',
|
||||
remove: 'r',
|
||||
merge: 'm',
|
||||
},
|
||||
boolean: ['force'],
|
||||
boolean: ['merge', 'remove'],
|
||||
default: {
|
||||
force: false,
|
||||
merge: false,
|
||||
remove: false,
|
||||
},
|
||||
})
|
||||
|
||||
;({ force } = opts)
|
||||
await asyncMap(opts._, async vmDir => {
|
||||
await asyncMap(_, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
|
||||
// TODO: implement this in `xo-server`, not easy because not compatible with
|
||||
// `@xen-orchestra/fs`.
|
||||
const release = await lockfile.lock(vmDir)
|
||||
try {
|
||||
await handleVm(vmDir)
|
||||
await adapter.cleanVm(vmDir, { remove, merge, onLog: log => console.warn(log) })
|
||||
} catch (error) {
|
||||
console.error('handleVm', vmDir, error)
|
||||
} finally {
|
||||
await release()
|
||||
console.error('adapter.cleanVm', vmDir, error)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
const filenamify = require('filenamify')
|
||||
const get = require('lodash/get')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { dirname, join, relative } = require('path')
|
||||
|
||||
const asyncMap = require('../_asyncMap')
|
||||
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
|
||||
|
||||
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
const groupBy = require('lodash/groupBy')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createHash } = require('crypto')
|
||||
const { dirname, resolve } = require('path')
|
||||
|
||||
const asyncMap = require('../_asyncMap')
|
||||
const { readdir2, readFile, getSize } = require('../_fs')
|
||||
|
||||
const sha512 = str => createHash('sha512').update(str).digest('hex')
|
||||
@@ -10,9 +10,7 @@ const sum = values => values.reduce((a, b) => a + b)
|
||||
|
||||
module.exports = async function info(vmDirs) {
|
||||
const jsonFiles = (
|
||||
await asyncMap(vmDirs, async vmDir =>
|
||||
(await readdir2(vmDir)).filter(_ => _.endsWith('.json'))
|
||||
)
|
||||
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
|
||||
).flat()
|
||||
|
||||
const hashes = { __proto__: null }
|
||||
@@ -39,9 +37,7 @@ module.exports = async function info(vmDirs) {
|
||||
size:
|
||||
json.length +
|
||||
(await (metadata.mode === 'delta'
|
||||
? asyncMap(Object.values(metadata.vhds), _ =>
|
||||
getSize(resolve(jsonDir, _))
|
||||
).then(sum)
|
||||
? asyncMap(Object.values(metadata.vhds), _ => getSize(resolve(jsonDir, _))).then(sum)
|
||||
: getSize(resolve(jsonDir, metadata.xva)))),
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -5,7 +5,14 @@ require('./_composeCommands')({
|
||||
get main() {
|
||||
return require('./commands/clean-vms')
|
||||
},
|
||||
usage: '[--force] xo-vm-backups/*',
|
||||
usage: `[--merge] [--remove] xo-vm-backups/*
|
||||
|
||||
Detects and repair issues with VM backups.
|
||||
|
||||
Options:
|
||||
-m, --merge Merge (or continue merging) VHD files that are unused
|
||||
-r, --remove Remove unused, incomplete, orphan, or corrupted files
|
||||
`,
|
||||
},
|
||||
'create-symlink-index': {
|
||||
get main() {
|
||||
|
||||
@@ -6,23 +6,17 @@
|
||||
"preferGlobal": true,
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/backups": "^0.1.1",
|
||||
"@xen-orchestra/fs": "^0.11.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.11.0",
|
||||
"@xen-orchestra/fs": "^0.17.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"limit-concurrency-decorator": "^0.4.0",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.15.0",
|
||||
"proper-lockfile": "^4.1.1",
|
||||
"vhd-lib": "^0.8.0"
|
||||
"promise-toolbox": "^0.19.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=7.10.1"
|
||||
},
|
||||
"files": [
|
||||
"commands",
|
||||
"*.js"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
|
||||
"name": "@xen-orchestra/backups-cli",
|
||||
"repository": {
|
||||
@@ -33,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.2.1",
|
||||
"version": "0.6.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
1
@xen-orchestra/backups/.npmignore
Symbolic link
1
@xen-orchestra/backups/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
263
@xen-orchestra/backups/Backup.js
Normal file
263
@xen-orchestra/backups/Backup.js
Normal file
@@ -0,0 +1,263 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getAdapter = Disposable.factory(function* (remoteId) {
|
||||
return {
|
||||
adapter: yield getAdapter(remoteId),
|
||||
remoteId,
|
||||
}
|
||||
})
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
run() {
|
||||
const type = this._job.type
|
||||
if (type === 'backup') {
|
||||
return this._runVmBackup()
|
||||
} else if (type === 'metadataBackup') {
|
||||
return this._runMetadataBackup()
|
||||
} else {
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const settings = {
|
||||
...config.defaultSettings,
|
||||
...config.metadata.defaultSettings,
|
||||
...job.settings[''],
|
||||
...job.settings[schedule.id],
|
||||
}
|
||||
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
remoteIds.map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _runVmBackup() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
|
||||
const config = this._config
|
||||
const { settings } = job
|
||||
const scheduleSettings = {
|
||||
...config.defaultSettings,
|
||||
...config.vm.defaultSettings,
|
||||
...settings[''],
|
||||
...settings[schedule.id],
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (srs, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && scheduleSettings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
job,
|
||||
// remotes,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...scheduleSettings, ...settings[vmUuid] },
|
||||
srs,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
const { concurrency } = scheduleSettings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
40
@xen-orchestra/backups/DurablePartition.js
Normal file
40
@xen-orchestra/backups/DurablePartition.js
Normal file
@@ -0,0 +1,40 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
flushAll() {
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
return asyncMap(Object.keys(partitionDisposers), path => {
|
||||
const disposers = partitionDisposers[path]
|
||||
delete partitionDisposers[path]
|
||||
return asyncMap(disposers, d => d(path).catch(noop => {}))
|
||||
})
|
||||
}
|
||||
|
||||
async mount(adapter, diskId, partitionId) {
|
||||
const { value: path, dispose } = await adapter.getPartition(diskId, partitionId)
|
||||
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
if (partitionDisposers[path] === undefined) {
|
||||
partitionDisposers[path] = []
|
||||
}
|
||||
partitionDisposers[path].push(dispose)
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
async unmount(path) {
|
||||
const partitionDisposers = this.#partitionDisposers
|
||||
const disposers = partitionDisposers[path]
|
||||
if (disposers === undefined) {
|
||||
throw new Error(`No partition corresponding to the path ${path} found`)
|
||||
}
|
||||
|
||||
await disposers.pop()()
|
||||
if (disposers.length === 0) {
|
||||
delete partitionDisposers[path]
|
||||
}
|
||||
}
|
||||
}
|
||||
66
@xen-orchestra/backups/ImportVmBackup.js
Normal file
66
@xen-orchestra/backups/ImportVmBackup.js
Normal file
@@ -0,0 +1,66 @@
|
||||
const assert = require('assert')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importDeltaVm } = require('./_deltaVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importDeltaVmSettings = { newMacAddresses }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const adapter = this._adapter
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
const sizeContainer = { size: 0 }
|
||||
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
watchStreamSize(backup, sizeContainer)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
backup = await adapter.readDeltaVmBackup(metadata)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
{
|
||||
name: 'transfer',
|
||||
},
|
||||
async () => {
|
||||
const xapi = this._xapi
|
||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importDeltaVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||
xapi.call(
|
||||
'VM.set_name_label',
|
||||
vmRef,
|
||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||
),
|
||||
])
|
||||
|
||||
return {
|
||||
size: sizeContainer.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
559
@xen-orchestra/backups/RemoteAdapter.js
Normal file
559
@xen-orchestra/backups/RemoteAdapter.js
Normal file
@@ -0,0 +1,559 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const fromCallback = require('promise-toolbox/fromCallback.js')
|
||||
const fromEvent = require('promise-toolbox/fromEvent.js')
|
||||
const pDefer = require('promise-toolbox/defer.js')
|
||||
const pump = require('pump')
|
||||
const { basename, dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, stat } = require('fs-extra')
|
||||
const { ZipFile } = require('yazl')
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile, isVhdFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
|
||||
const { warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
try {
|
||||
const subFiles = await readdir(realPath)
|
||||
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOTDIR') {
|
||||
throw error
|
||||
}
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const createSafeReaddir = (handler, methodName) => (path, options) =>
|
||||
handler.list(path, options).catch(error => {
|
||||
if (error?.code !== 'ENOENT') {
|
||||
warn(`${methodName} ${path}`, { error })
|
||||
}
|
||||
return []
|
||||
})
|
||||
|
||||
const debounceResourceFactory = factory =>
|
||||
function () {
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
class RemoteAdapter {
|
||||
constructor(handler, { debounceResource = res => res, dirMode } = {}) {
|
||||
this._debounceResource = debounceResource
|
||||
this._dirMode = dirMode
|
||||
this._handler = handler
|
||||
}
|
||||
|
||||
get handler() {
|
||||
return this._handler
|
||||
}
|
||||
|
||||
async _deleteVhd(path) {
|
||||
const handler = this._handler
|
||||
const vhds = await asyncMapSettled(
|
||||
await handler.list(dirname(path), {
|
||||
filter: isVhdFile,
|
||||
prependDir: true,
|
||||
}),
|
||||
async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
return {
|
||||
footer: vhd.footer,
|
||||
header: vhd.header,
|
||||
path,
|
||||
}
|
||||
} catch (error) {
|
||||
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
|
||||
// they are probably inconsequent to the backup process and should not
|
||||
// fail it.
|
||||
warn(`BackupNg#_deleteVhd ${path}`, { error })
|
||||
}
|
||||
}
|
||||
)
|
||||
const base = basename(path)
|
||||
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
|
||||
if (child === undefined) {
|
||||
await handler.unlink(path)
|
||||
return 0
|
||||
}
|
||||
|
||||
try {
|
||||
const childPath = child.path
|
||||
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
|
||||
await handler.rename(path, childPath)
|
||||
return mergedDataSize
|
||||
} catch (error) {
|
||||
handler.unlink(path).catch(warn)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _findPartition(devicePath, partitionId) {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
const partition = partitions.find(_ => _.id === partitionId)
|
||||
if (partition === undefined) {
|
||||
throw new Error(`partition ${partitionId} not found`)
|
||||
}
|
||||
return partition
|
||||
}
|
||||
|
||||
_getLvmLogicalVolumes = Disposable.factory(this._getLvmLogicalVolumes)
|
||||
_getLvmLogicalVolumes = deduped(this._getLvmLogicalVolumes, (devicePath, pvId, vgName) => [devicePath, pvId, vgName])
|
||||
_getLvmLogicalVolumes = debounceResourceFactory(this._getLvmLogicalVolumes)
|
||||
async *_getLvmLogicalVolumes(devicePath, pvId, vgName) {
|
||||
yield this._getLvmPhysicalVolume(devicePath, pvId && (await this._findPartition(devicePath, pvId)))
|
||||
|
||||
await fromCallback(execFile, 'vgchange', ['-ay', vgName])
|
||||
try {
|
||||
yield lvs(['lv_name', 'lv_path'], vgName)
|
||||
} finally {
|
||||
await fromCallback(execFile, 'vgchange', ['-an', vgName])
|
||||
}
|
||||
}
|
||||
|
||||
_getLvmPhysicalVolume = Disposable.factory(this._getLvmPhysicalVolume)
|
||||
_getLvmPhysicalVolume = deduped(this._getLvmPhysicalVolume, (devicePath, partition) => [devicePath, partition?.id])
|
||||
_getLvmPhysicalVolume = debounceResourceFactory(this._getLvmPhysicalVolume)
|
||||
async *_getLvmPhysicalVolume(devicePath, partition) {
|
||||
const args = []
|
||||
if (partition !== undefined) {
|
||||
args.push('-o', partition.start * 512, '--sizelimit', partition.size)
|
||||
}
|
||||
args.push('--show', '-f', devicePath)
|
||||
const path = (await fromCallback(execFile, 'losetup', args)).trim()
|
||||
try {
|
||||
await fromCallback(execFile, 'pvscan', ['--cache', path])
|
||||
yield path
|
||||
} finally {
|
||||
try {
|
||||
const vgNames = await pvs('vg_name', path)
|
||||
await fromCallback(execFile, 'vgchange', ['-an', ...vgNames])
|
||||
} finally {
|
||||
await fromCallback(execFile, 'losetup', ['-d', path])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_getPartition = Disposable.factory(this._getPartition)
|
||||
_getPartition = deduped(this._getPartition, (devicePath, partition) => [devicePath, partition?.id])
|
||||
_getPartition = debounceResourceFactory(this._getPartition)
|
||||
async *_getPartition(devicePath, partition) {
|
||||
const options = ['loop', 'ro']
|
||||
|
||||
if (partition !== undefined) {
|
||||
const { size, start } = partition
|
||||
options.push(`sizelimit=${size}`)
|
||||
if (start !== undefined) {
|
||||
options.push(`offset=${start * 512}`)
|
||||
}
|
||||
}
|
||||
|
||||
const path = yield getTmpDir()
|
||||
const mount = options => {
|
||||
return fromCallback(execFile, 'mount', [
|
||||
`--options=${options.join(',')}`,
|
||||
`--source=${devicePath}`,
|
||||
`--target=${path}`,
|
||||
])
|
||||
}
|
||||
|
||||
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
|
||||
// another fs, try without
|
||||
try {
|
||||
await mount([...options, 'norecovery'])
|
||||
} catch (error) {
|
||||
await mount(options)
|
||||
}
|
||||
try {
|
||||
yield path
|
||||
} finally {
|
||||
await fromCallback(execFile, 'umount', ['--lazy', path])
|
||||
}
|
||||
}
|
||||
|
||||
_listLvmLogicalVolumes(devicePath, partition, results = []) {
|
||||
return Disposable.use(this._getLvmPhysicalVolume(devicePath, partition), async path => {
|
||||
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], path)
|
||||
const partitionId = partition !== undefined ? partition.id : ''
|
||||
lvs.forEach((lv, i) => {
|
||||
const name = lv.lv_name
|
||||
if (name !== '') {
|
||||
results.push({
|
||||
id: `${partitionId}/${lv.vg_name}/${name}`,
|
||||
name,
|
||||
size: lv.lv_size,
|
||||
})
|
||||
}
|
||||
})
|
||||
return results
|
||||
})
|
||||
}
|
||||
|
||||
_usePartitionFiles = Disposable.factory(this._usePartitionFiles)
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
).catch(error => {
|
||||
warn(error)
|
||||
reject(error)
|
||||
})
|
||||
return promise
|
||||
}
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
let mergedDataSize = 0
|
||||
await asyncMapSettled(backups, ({ _filename, vhds }) =>
|
||||
Promise.all([
|
||||
handler.unlink(_filename),
|
||||
asyncMap(Object.values(vhds), async _ => {
|
||||
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
|
||||
}),
|
||||
])
|
||||
)
|
||||
return mergedDataSize
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(backupId) {
|
||||
const uuidReg = '\\w{8}(-\\w{4}){3}-\\w{12}'
|
||||
const metadataDirReg = 'xo-(config|pool-metadata)-backups'
|
||||
const timestampReg = '\\d{8}T\\d{6}Z'
|
||||
const regexp = new RegExp(`^${metadataDirReg}/${uuidReg}(/${uuidReg})?/${timestampReg}`)
|
||||
if (!regexp.test(backupId)) {
|
||||
throw new Error(`The id (${backupId}) not correspond to a metadata folder`)
|
||||
}
|
||||
|
||||
await this._handler.rmtree(backupId)
|
||||
}
|
||||
|
||||
async deleteOldMetadataBackups(dir, retention) {
|
||||
const handler = this.handler
|
||||
let list = await handler.list(dir)
|
||||
list.sort()
|
||||
list = list.filter(timestamp => /^\d{8}T\d{6}Z$/.test(timestamp)).slice(0, -retention)
|
||||
await asyncMapSettled(list, timestamp => handler.rmtree(`${dir}/${timestamp}`))
|
||||
}
|
||||
|
||||
async deleteFullVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
await asyncMapSettled(backups, ({ _filename, xva }) =>
|
||||
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
|
||||
)
|
||||
}
|
||||
|
||||
async deleteVmBackup(filename) {
|
||||
const metadata = JSON.parse(String(await this._handler.readFile(filename)))
|
||||
metadata._filename = filename
|
||||
|
||||
if (metadata.mode === 'delta') {
|
||||
await this.deleteDeltaVmBackups([metadata])
|
||||
} else if (metadata.mode === 'full') {
|
||||
await this.deleteFullVmBackups([metadata])
|
||||
} else {
|
||||
throw new Error(`no deleter for backup mode ${metadata.mode}`)
|
||||
}
|
||||
}
|
||||
|
||||
getDisk = Disposable.factory(this.getDisk)
|
||||
getDisk = deduped(this.getDisk, diskId => [diskId])
|
||||
getDisk = debounceResourceFactory(this.getDisk)
|
||||
async *getDisk(diskId) {
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
let max = 0
|
||||
let maxEntry
|
||||
const entries = await readdir(mountDir)
|
||||
entries.forEach(entry => {
|
||||
const matches = RE_VHDI.exec(entry)
|
||||
if (matches !== null) {
|
||||
const value = +matches[1]
|
||||
if (value > max) {
|
||||
max = value
|
||||
maxEntry = entry
|
||||
}
|
||||
}
|
||||
})
|
||||
if (max === 0) {
|
||||
throw new Error('no disks found')
|
||||
}
|
||||
|
||||
yield `${mountDir}/${maxEntry}`
|
||||
} finally {
|
||||
await fromCallback(execFile, 'fusermount', ['-uz', mountDir])
|
||||
}
|
||||
}
|
||||
|
||||
// partitionId values:
|
||||
//
|
||||
// - undefined: raw disk
|
||||
// - `<partitionId>`: partitioned disk
|
||||
// - `<pvId>/<vgName>/<lvName>`: LVM on a partitioned disk
|
||||
// - `/<vgName>/lvName>`: LVM on a raw disk
|
||||
getPartition = Disposable.factory(this.getPartition)
|
||||
async *getPartition(diskId, partitionId) {
|
||||
const devicePath = yield this.getDisk(diskId)
|
||||
if (partitionId === undefined) {
|
||||
return yield this._getPartition(devicePath)
|
||||
}
|
||||
|
||||
const isLvmPartition = partitionId.includes('/')
|
||||
if (isLvmPartition) {
|
||||
const [pvId, vgName, lvName] = partitionId.split('/')
|
||||
const lvs = yield this._getLvmLogicalVolumes(devicePath, pvId !== '' ? pvId : undefined, vgName)
|
||||
return yield this._getPartition(lvs.find(_ => _.lv_name === lvName).lv_path)
|
||||
}
|
||||
|
||||
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const handler = this._handler
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
backups[vmUuid] = vmBackups
|
||||
})
|
||||
|
||||
return backups
|
||||
}
|
||||
|
||||
listPartitionFiles(diskId, partitionId, path) {
|
||||
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
|
||||
path = resolveSubpath(rootPath, path)
|
||||
|
||||
const entriesMap = {}
|
||||
await asyncMap(await readdir(path), async name => {
|
||||
try {
|
||||
const stats = await stat(`${path}/${name}`)
|
||||
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return entriesMap
|
||||
})
|
||||
}
|
||||
|
||||
listPartitions(diskId) {
|
||||
return Disposable.use(this.getDisk(diskId), async devicePath => {
|
||||
const partitions = await listPartitions(devicePath)
|
||||
|
||||
if (partitions.length === 0) {
|
||||
try {
|
||||
// handle potential raw LVM physical volume
|
||||
return await this._listLvmLogicalVolumes(devicePath, undefined, partitions)
|
||||
} catch (error) {
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
const results = []
|
||||
await asyncMapSettled(partitions, partition =>
|
||||
partition.type === LVM_PARTITION_TYPE
|
||||
? this._listLvmLogicalVolumes(devicePath, partition, results)
|
||||
: results.push(partition)
|
||||
)
|
||||
return results
|
||||
})
|
||||
}
|
||||
|
||||
async listPoolMetadataBackups() {
|
||||
const handler = this._handler
|
||||
const safeReaddir = createSafeReaddir(handler, 'listPoolMetadataBackups')
|
||||
|
||||
const backupsByPool = {}
|
||||
await asyncMap(await safeReaddir(DIR_XO_POOL_METADATA_BACKUPS, { prependDir: true }), async scheduleDir =>
|
||||
asyncMap(await safeReaddir(scheduleDir), async poolId => {
|
||||
const backups = backupsByPool[poolId] ?? (backupsByPool[poolId] = [])
|
||||
return asyncMap(await safeReaddir(`${scheduleDir}/${poolId}`, { prependDir: true }), async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: backupDir,
|
||||
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
|
||||
})
|
||||
} catch (error) {
|
||||
warn(`listPoolMetadataBackups ${backupDir}`, {
|
||||
error,
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
)
|
||||
|
||||
// delete empty entries and sort backups
|
||||
Object.keys(backupsByPool).forEach(poolId => {
|
||||
const backups = backupsByPool[poolId]
|
||||
if (backups.length === 0) {
|
||||
delete backupsByPool[poolId]
|
||||
} else {
|
||||
backups.sort(compareTimestamp)
|
||||
}
|
||||
})
|
||||
|
||||
return backupsByPool
|
||||
}
|
||||
|
||||
async listVmBackups(vmUuid, predicate) {
|
||||
const handler = this._handler
|
||||
const backups = []
|
||||
|
||||
try {
|
||||
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
|
||||
filter: isMetadataFile,
|
||||
prependDir: true,
|
||||
})
|
||||
await asyncMap(files, async file => {
|
||||
try {
|
||||
const metadata = await this.readVmBackupMetadata(file)
|
||||
if (predicate === undefined || predicate(metadata)) {
|
||||
// inject an id usable by importVmBackupNg()
|
||||
metadata.id = metadata._filename
|
||||
|
||||
backups.push(metadata)
|
||||
}
|
||||
} catch (error) {
|
||||
warn(`listVmBackups ${file}`, { error })
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
let code
|
||||
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async listXoMetadataBackups() {
|
||||
const handler = this._handler
|
||||
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
|
||||
|
||||
const backups = []
|
||||
await asyncMap(await safeReaddir(DIR_XO_CONFIG_BACKUPS, { prependDir: true }), async scheduleDir =>
|
||||
asyncMap(await safeReaddir(scheduleDir, { prependDir: true }), async backupDir => {
|
||||
try {
|
||||
backups.push({
|
||||
id: backupDir,
|
||||
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
|
||||
})
|
||||
} catch (error) {
|
||||
warn(`listXoMetadataBackups ${backupDir}`, { error })
|
||||
}
|
||||
})
|
||||
)
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
async validator() {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata) {
|
||||
const handler = this._handler
|
||||
const { vbds, vdis, vhds, vifs, vm } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async id => {
|
||||
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
|
||||
})
|
||||
|
||||
return {
|
||||
streams,
|
||||
vbds,
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm,
|
||||
}
|
||||
}
|
||||
|
||||
readFullVmBackup(metadata) {
|
||||
return this._handler.createReadStream(resolve('/', dirname(metadata._filename), metadata.xva))
|
||||
}
|
||||
|
||||
async readVmBackupMetadata(path) {
|
||||
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
|
||||
}
|
||||
}
|
||||
|
||||
Object.assign(RemoteAdapter.prototype, {
|
||||
cleanVm(vmDir, { lock = true } = {}) {
|
||||
if (lock) {
|
||||
return Disposable.use(this._handler.lock(vmDir), () => cleanVm.apply(this, arguments))
|
||||
} else {
|
||||
return cleanVm.apply(this, arguments)
|
||||
}
|
||||
},
|
||||
isValidXva,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
24
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
24
@xen-orchestra/backups/RestoreMetadataBackup.js
Normal file
@@ -0,0 +1,24 @@
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
}
|
||||
}
|
||||
}
|
||||
151
@xen-orchestra/backups/Task.js
Normal file
151
@xen-orchestra/backups/Task.js
Normal file
@@ -0,0 +1,151 @@
|
||||
const CancelToken = require('promise-toolbox/CancelToken.js')
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const logAfterEnd = () => {
|
||||
throw new Error('task has already ended')
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// Create a serializable object from an error.
|
||||
//
|
||||
// Otherwise some fields might be non-enumerable and missing from logs.
|
||||
const serializeError = error =>
|
||||
error instanceof Error
|
||||
? {
|
||||
...error, // Copy enumerable properties.
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
name: error.name,
|
||||
stack: error.stack,
|
||||
}
|
||||
: error
|
||||
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
}
|
||||
|
||||
static run(opts, fn) {
|
||||
return new this(opts).run(fn, true)
|
||||
}
|
||||
|
||||
static wrapFn(opts, fn) {
|
||||
// compatibility with @decorateWith
|
||||
if (typeof fn !== 'function') {
|
||||
;[fn, opts] = [opts, fn]
|
||||
}
|
||||
|
||||
return function () {
|
||||
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
|
||||
#cancelToken
|
||||
#id = Math.random().toString(36).slice(2)
|
||||
#onLog
|
||||
#zone
|
||||
|
||||
constructor({ name, data, onLog }) {
|
||||
let parentCancelToken, parentId
|
||||
if (onLog === undefined) {
|
||||
const parent = Zone.current.data[$$task]
|
||||
if (parent === undefined) {
|
||||
onLog = noop
|
||||
} else {
|
||||
onLog = log => parent.#onLog(log)
|
||||
parentCancelToken = parent.#cancelToken
|
||||
parentId = parent.#id
|
||||
}
|
||||
}
|
||||
|
||||
const zone = Zone.current.fork('@xen-orchestra/backups/Task')
|
||||
zone.data[$$task] = this
|
||||
this.#zone = zone
|
||||
|
||||
const { cancel, token } = CancelToken.source(parentCancelToken && [parentCancelToken])
|
||||
this.#cancelToken = token
|
||||
this.cancel = cancel
|
||||
|
||||
this.#onLog = onLog
|
||||
|
||||
this.#log('start', {
|
||||
data,
|
||||
message: name,
|
||||
parentId,
|
||||
})
|
||||
}
|
||||
|
||||
failure(error) {
|
||||
this.#end('failure', serializeError(error))
|
||||
}
|
||||
|
||||
info(message, data) {
|
||||
this.#log('info', { data, message })
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a function in the context of this task
|
||||
*
|
||||
* In case of error, the task will be failed.
|
||||
*
|
||||
* @typedef Result
|
||||
* @param {() => Result)} fn
|
||||
* @param {boolean} last - Whether the task should succeed if there is no error
|
||||
* @returns Result
|
||||
*/
|
||||
run(fn, last = false) {
|
||||
return this.#zone.run(() => {
|
||||
try {
|
||||
const result = fn()
|
||||
let then
|
||||
if (result != null && typeof (then = result.then) === 'function') {
|
||||
then.call(result, last && (value => this.success(value)), error => this.failure(error))
|
||||
} else if (last) {
|
||||
this.success(result)
|
||||
}
|
||||
return result
|
||||
} catch (error) {
|
||||
this.failure(error)
|
||||
throw error
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
success(value) {
|
||||
this.#end('success', value)
|
||||
}
|
||||
|
||||
warning(message, data) {
|
||||
this.#log('warning', { data, message })
|
||||
}
|
||||
|
||||
wrapFn(fn, last) {
|
||||
const task = this
|
||||
return function () {
|
||||
return task.run(() => fn.apply(this, arguments), last)
|
||||
}
|
||||
}
|
||||
|
||||
#end(status, result) {
|
||||
this.#log('end', { result, status })
|
||||
this.#onLog = logAfterEnd
|
||||
}
|
||||
|
||||
#log(event, props) {
|
||||
this.#onLog({
|
||||
...props,
|
||||
event,
|
||||
taskId: this.#id,
|
||||
timestamp: Date.now(),
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
}
|
||||
75
@xen-orchestra/backups/_PoolMetadataBackup.js
Normal file
75
@xen-orchestra/backups/_PoolMetadataBackup.js
Normal file
@@ -0,0 +1,75 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
|
||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
this._pool = pool
|
||||
this._remoteAdapters = remoteAdapters
|
||||
this._schedule = schedule
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
_exportPoolMetadata() {
|
||||
const xapi = this._pool.$xapi
|
||||
return xapi.getResource(PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Export pool metadata'),
|
||||
})
|
||||
}
|
||||
|
||||
async run() {
|
||||
const timestamp = Date.now()
|
||||
|
||||
const { _job: job, _schedule: schedule, _pool: pool } = this
|
||||
const poolDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${schedule.id}/${pool.$id}`
|
||||
const dir = `${poolDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const stream = await this._exportPoolMetadata()
|
||||
const fileName = `${dir}/data`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
pool,
|
||||
poolMaster: pool.$master,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
Object.entries(this._remoteAdapters),
|
||||
([remoteId, adapter]) =>
|
||||
Task.run(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}) for the remote (${remoteId}). (${job.id})`,
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
// forkStreamUnpipe should be used in a sync way, do not wait for a promise before using it
|
||||
await adapter.outputStream(fileName, forkStreamUnpipe(stream), { checksum: false })
|
||||
await adapter.handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode: this._config.dirMode,
|
||||
})
|
||||
await adapter.deleteOldMetadataBackups(poolDir, this._settings.retentionPoolMetadata)
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
}
|
||||
}
|
||||
382
@xen-orchestra/backups/_VmBackup.js
Normal file
382
@xen-orchestra/backups/_VmBackup.js
Normal file
@@ -0,0 +1,382 @@
|
||||
const assert = require('assert')
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
const { exportDeltaVm } = require('./_deltaVm.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { FullBackupWriter } = require('./writers/FullBackupWriter.js')
|
||||
const { FullReplicationWriter } = require('./writers/FullReplicationWriter.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
exports.VmBackup = class VmBackup {
|
||||
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.remotes = remotes
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, DeltaReplicationWriter]
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, warnMessage, parallel = true) {
|
||||
const writers = this._writers
|
||||
if (writers.size === 0) {
|
||||
return
|
||||
}
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await fn(writer)
|
||||
} catch (error) {
|
||||
this.delete(writer)
|
||||
warn(warnMessage, { error, writer: writer.constructor.name })
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new Error('all targets have failed, step: ' + warnMessage)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot =
|
||||
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
|
||||
|
||||
const { scheduleId } = this
|
||||
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
|
||||
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
const xapi = this._xapi
|
||||
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const snapshotOf = await xapi.getField('VDI', baseRef, 'snapshot_of')
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (!presentBaseVdis.has(baseUuid)) {
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
run = defer(this.run)
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(() => writer.afterBackup())
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
await this._selectBaseVm()
|
||||
}
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
}
|
||||
}
|
||||
62
@xen-orchestra/backups/_XoMetadataBackup.js
Normal file
62
@xen-orchestra/backups/_XoMetadataBackup.js
Normal file
@@ -0,0 +1,62 @@
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
this._remoteAdapters = remoteAdapters
|
||||
this._schedule = schedule
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const timestamp = Date.now()
|
||||
|
||||
const { _job: job, _schedule: schedule } = this
|
||||
const scheduleDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
|
||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const data = job.xoMetadata
|
||||
const fileName = `${dir}/data.json`
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
scheduleName: schedule.name,
|
||||
timestamp,
|
||||
},
|
||||
null,
|
||||
2
|
||||
)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
Object.entries(this._remoteAdapters),
|
||||
([remoteId, adapter]) =>
|
||||
Task.run(
|
||||
{
|
||||
name: `Starting XO metadata backup for the remote (${remoteId}). (${job.id})`,
|
||||
data: {
|
||||
id: remoteId,
|
||||
type: 'remote',
|
||||
},
|
||||
},
|
||||
async () => {
|
||||
const handler = adapter.handler
|
||||
const dirMode = this._config.dirMode
|
||||
await handler.outputFile(fileName, data, { dirMode })
|
||||
await handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode,
|
||||
})
|
||||
await adapter.deleteOldMetadataBackups(scheduleDir, this._settings.retentionXoMetadata)
|
||||
}
|
||||
).catch(() => {}) // errors are handled by logs
|
||||
)
|
||||
}
|
||||
}
|
||||
4
@xen-orchestra/backups/_backupType.js
Normal file
4
@xen-orchestra/backups/_backupType.js
Normal file
@@ -0,0 +1,4 @@
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.cheksum')
|
||||
155
@xen-orchestra/backups/_backupWorker.js
Normal file
155
@xen-orchestra/backups/_backupWorker.js
Normal file
@@ -0,0 +1,155 @@
|
||||
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
|
||||
require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
)
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
class BackupWorker {
|
||||
#config
|
||||
#job
|
||||
#recordToXapi
|
||||
#remoteOptions
|
||||
#remotes
|
||||
#schedule
|
||||
#xapiOptions
|
||||
#xapis
|
||||
|
||||
constructor({ config, job, recordToXapi, remoteOptions, remotes, resourceCacheDelay, schedule, xapiOptions, xapis }) {
|
||||
this.#config = config
|
||||
this.#job = job
|
||||
this.#recordToXapi = recordToXapi
|
||||
this.#remoteOptions = remoteOptions
|
||||
this.#remotes = remotes
|
||||
this.#schedule = schedule
|
||||
this.#xapiOptions = xapiOptions
|
||||
this.#xapis = xapis
|
||||
|
||||
const debounceResource = createDebounceResource()
|
||||
debounceResource.defaultDelay = parseDuration(resourceCacheDelay)
|
||||
this.debounceResource = debounceResource
|
||||
}
|
||||
|
||||
run() {
|
||||
return new Backup({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
const xapiId = this.#recordToXapi[uuid]
|
||||
if (xapiId === undefined) {
|
||||
throw new Error('no XAPI associated to ' + uuid)
|
||||
}
|
||||
|
||||
const xapi = yield this.getXapi(this.#xapis[xapiId])
|
||||
return xapi.getRecordByUuid(type, uuid)
|
||||
}).bind(this),
|
||||
job: this.#job,
|
||||
schedule: this.#schedule,
|
||||
}).run()
|
||||
}
|
||||
|
||||
getAdapter = Disposable.factory(this.getAdapter)
|
||||
getAdapter = deduped(this.getAdapter, remote => [remote.url])
|
||||
getAdapter = compose(this.getAdapter, function (resource) {
|
||||
return this.debounceResource(resource)
|
||||
})
|
||||
async *getAdapter(remote) {
|
||||
const handler = getHandler(remote, this.#remoteOptions)
|
||||
await handler.sync()
|
||||
try {
|
||||
yield new RemoteAdapter(handler, {
|
||||
debounceResource: this.debounceResource,
|
||||
dirMode: this.#config.dirMode,
|
||||
})
|
||||
} finally {
|
||||
await handler.forget()
|
||||
}
|
||||
}
|
||||
|
||||
getXapi = Disposable.factory(this.getXapi)
|
||||
getXapi = deduped(this.getXapi, ({ url }) => [url])
|
||||
getXapi = compose(this.getXapi, function (resource) {
|
||||
return this.debounceResource(resource)
|
||||
})
|
||||
async *getXapi({ credentials: { username: user, password }, ...opts }) {
|
||||
const xapi = new Xapi({
|
||||
...this.#xapiOptions,
|
||||
...opts,
|
||||
auth: {
|
||||
user,
|
||||
password,
|
||||
},
|
||||
})
|
||||
|
||||
await xapi.connect()
|
||||
try {
|
||||
await xapi.objectsFetched
|
||||
|
||||
yield xapi
|
||||
} finally {
|
||||
await xapi.disconnect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Received message:
|
||||
//
|
||||
// Message {
|
||||
// action: 'run'
|
||||
// data: object
|
||||
// runWithLogs: boolean
|
||||
// }
|
||||
//
|
||||
// Sent message:
|
||||
//
|
||||
// Message {
|
||||
// type: 'log' | 'result'
|
||||
// data?: object
|
||||
// status?: 'success' | 'failure'
|
||||
// result?: any
|
||||
// }
|
||||
process.on('message', async message => {
|
||||
if (message.action === 'run') {
|
||||
const backupWorker = new BackupWorker(message.data)
|
||||
try {
|
||||
const result = message.runWithLogs
|
||||
? await Task.run(
|
||||
{
|
||||
name: 'backup run',
|
||||
onLog: data =>
|
||||
process.send({
|
||||
data,
|
||||
type: 'log',
|
||||
}),
|
||||
},
|
||||
() => backupWorker.run()
|
||||
)
|
||||
: await backupWorker.run()
|
||||
|
||||
process.send({
|
||||
type: 'result',
|
||||
result,
|
||||
status: 'success',
|
||||
})
|
||||
} catch (error) {
|
||||
process.send({
|
||||
type: 'result',
|
||||
result: error,
|
||||
status: 'failure',
|
||||
})
|
||||
} finally {
|
||||
await ignoreErrors.call(backupWorker.debounceResource.flushAll())
|
||||
process.disconnect()
|
||||
}
|
||||
}
|
||||
})
|
||||
20
@xen-orchestra/backups/_cancelableMap.js
Normal file
20
@xen-orchestra/backups/_cancelableMap.js
Normal file
@@ -0,0 +1,20 @@
|
||||
const cancelable = require('promise-toolbox/cancelable.js')
|
||||
const CancelToken = require('promise-toolbox/CancelToken.js')
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
// If any of the executions fails, the cancel token will be triggered and the
|
||||
// first reason will be rejected.
|
||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||
try {
|
||||
return await Promise.all(
|
||||
Array.from(iterable, function (item) {
|
||||
return callback.call(this, token, item)
|
||||
})
|
||||
)
|
||||
} catch (error) {
|
||||
await cancel()
|
||||
throw error
|
||||
}
|
||||
})
|
||||
332
@xen-orchestra/backups/_cleanVm.js
Normal file
332
@xen-orchestra/backups/_cleanVm.js
Normal file
@@ -0,0 +1,332 @@
|
||||
const assert = require('assert')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { default: Vhd, mergeVhd } = require('vhd-lib')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants.js')
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
let child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
onLog(`the parent ${parent} of the child ${child} is unused`)
|
||||
})
|
||||
|
||||
if (merge) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
if (children.length !== 1) {
|
||||
// TODO: implement merging multiple children
|
||||
children.length = 1
|
||||
child = children[0]
|
||||
}
|
||||
|
||||
onLog(`merging ${child} into ${parent}`)
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
onLog(`merging ${child}: ${done}/${total}`)
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
await mergeVhd(
|
||||
handler,
|
||||
parent,
|
||||
handler,
|
||||
child,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children),
|
||||
{
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
clearInterval(handle)
|
||||
|
||||
await Promise.all([
|
||||
handler.rename(parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
onLog(`the VHD ${child} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused VHD ${child}`)
|
||||
return handler.unlink(child)
|
||||
}
|
||||
}),
|
||||
])
|
||||
}
|
||||
})
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const INTERRUPTED_VHDS_REG = /^(?:(.+)\/)?\.(.+)\.merge.json$/
|
||||
const listVhds = async (handler, vmDir) => {
|
||||
const vhds = []
|
||||
const interruptedVhds = new Set()
|
||||
|
||||
await asyncMap(
|
||||
await handler.list(`${vmDir}/vdis`, {
|
||||
ignoreMissing: true,
|
||||
prependDir: true,
|
||||
}),
|
||||
async jobDir =>
|
||||
asyncMap(
|
||||
await handler.list(jobDir, {
|
||||
prependDir: true,
|
||||
}),
|
||||
async vdiDir => {
|
||||
const list = await handler.list(vdiDir, {
|
||||
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
|
||||
prependDir: true,
|
||||
})
|
||||
|
||||
list.forEach(file => {
|
||||
const res = INTERRUPTED_VHDS_REG.exec(file)
|
||||
if (res === null) {
|
||||
vhds.push(file)
|
||||
} else {
|
||||
const [, dir, file] = res
|
||||
interruptedVhds.add(`${dir}/${file}`)
|
||||
}
|
||||
})
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
return { vhds, interruptedVhds }
|
||||
}
|
||||
|
||||
exports.cleanVm = async function cleanVm(vmDir, { remove, merge, onLog = noop }) {
|
||||
const handler = this._handler
|
||||
|
||||
const vhds = new Set()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
const vhdsList = await listVhds(handler, vmDir)
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(vhdsList.vhds, async path => {
|
||||
try {
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter(!vhdsList.interruptedVhds.has(path))
|
||||
vhds.add(path)
|
||||
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
|
||||
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
if (parent in vhdChildren) {
|
||||
const error = new Error('this script does not support multiple VHD children')
|
||||
error.parent = parent
|
||||
error.child1 = vhdChildren[parent]
|
||||
error.child2 = path
|
||||
throw error // should we throw?
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
} catch (error) {
|
||||
onLog(`error while checking the VHD with path ${path}`, { error })
|
||||
if (error?.code === 'ERR_ASSERTION' && remove) {
|
||||
onLog(`deleting broken ${path}`)
|
||||
await handler.unlink(path)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
{
|
||||
const deletions = []
|
||||
|
||||
// return true if the VHD has been deleted or is missing
|
||||
const deleteIfOrphan = vhd => {
|
||||
const parent = vhdParents[vhd]
|
||||
if (parent === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
delete vhdParents[vhd]
|
||||
|
||||
deleteIfOrphan(parent)
|
||||
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhd)
|
||||
|
||||
onLog(`the parent ${parent} of the VHD ${vhd} is missing`)
|
||||
if (remove) {
|
||||
onLog(`deleting orphan VHD ${vhd}`)
|
||||
deletions.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// > A property that is deleted before it has been visited will not be
|
||||
// > visited later.
|
||||
// >
|
||||
// > -- https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/for...in#Deleted_added_or_modified_properties
|
||||
for (const child in vhdParents) {
|
||||
deleteIfOrphan(child)
|
||||
}
|
||||
|
||||
await Promise.all(deletions)
|
||||
}
|
||||
|
||||
const jsons = []
|
||||
const xvas = new Set()
|
||||
const xvaSums = []
|
||||
const entries = await handler.list(vmDir, {
|
||||
prependDir: true,
|
||||
})
|
||||
entries.forEach(path => {
|
||||
if (isMetadataFile(path)) {
|
||||
jsons.push(path)
|
||||
} else if (isXvaFile(path)) {
|
||||
xvas.add(path)
|
||||
} else if (isXvaSumFile(path)) {
|
||||
xvaSums.push(path)
|
||||
}
|
||||
})
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await this.isValidXva(path))) {
|
||||
onLog(`the XVA with path ${path} is potentially broken`)
|
||||
}
|
||||
})
|
||||
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
const metadata = JSON.parse(await handler.readFile(json))
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve('/', vmDir, metadata.xva)
|
||||
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
onLog(`the XVA linked to the metadata ${json} is missing`)
|
||||
if (remove) {
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
const { vhds } = metadata
|
||||
return Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
})()
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (linkedVhds.every(_ => vhds.has(_))) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
} else {
|
||||
onLog(`Some VHDs linked to the metadata ${json} are missing`)
|
||||
if (remove) {
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: parallelize by vm/job/vdi
|
||||
const unusedVhdsDeletion = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
const toCheck = new Set(unusedVhds)
|
||||
|
||||
const getUsedChildChainOrDelete = vhd => {
|
||||
if (vhd in vhdChainsToMerge) {
|
||||
const chain = vhdChainsToMerge[vhd]
|
||||
delete vhdChainsToMerge[vhd]
|
||||
return chain
|
||||
}
|
||||
|
||||
if (!unusedVhds.has(vhd)) {
|
||||
return [vhd]
|
||||
}
|
||||
|
||||
// no longer needs to be checked
|
||||
toCheck.delete(vhd)
|
||||
|
||||
const child = vhdChildren[vhd]
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
onLog(`the VHD ${vhd} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused VHD ${vhd}`)
|
||||
unusedVhdsDeletion.push(handler.unlink(vhd))
|
||||
}
|
||||
}
|
||||
|
||||
toCheck.forEach(vhd => {
|
||||
vhdChainsToMerge[vhd] = getUsedChildChainOrDelete(vhd)
|
||||
})
|
||||
|
||||
// merge interrupted VHDs
|
||||
if (merge) {
|
||||
vhdsList.interruptedVhds.forEach(parent => {
|
||||
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
|
||||
})
|
||||
}
|
||||
|
||||
Object.keys(vhdChainsToMerge).forEach(key => {
|
||||
const chain = vhdChainsToMerge[key]
|
||||
if (chain !== undefined) {
|
||||
unusedVhdsDeletion.push(mergeVhdChain(chain, { handler, onLog, remove, merge }))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
...unusedVhdsDeletion,
|
||||
asyncMap(unusedXvas, path => {
|
||||
onLog(`the XVA ${path} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused XVA ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}),
|
||||
asyncMap(xvaSums, path => {
|
||||
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
|
||||
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
|
||||
onLog(`the XVA checksum ${path} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused XVA checksum ${path}`)
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}
|
||||
}),
|
||||
])
|
||||
}
|
||||
349
@xen-orchestra/backups/_deltaVm.js
Normal file
349
@xen-orchestra/backups/_deltaVm.js
Normal file
@@ -0,0 +1,349 @@
|
||||
const compareVersions = require('compare-versions')
|
||||
const find = require('lodash/find.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const omit = require('lodash/omit.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
const { defer } = require('golike-defer')
|
||||
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
|
||||
const TAG_COPY_SRC = 'xo:copy_of'
|
||||
exports.TAG_COPY_SRC = TAG_COPY_SRC
|
||||
|
||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||
|
||||
exports.exportDeltaVm = async function exportDeltaVm(
|
||||
vm,
|
||||
baseVm,
|
||||
{
|
||||
cancelToken = CancelToken.none,
|
||||
|
||||
// Sets of UUIDs of VDIs that must be exported as full.
|
||||
fullVdisRequired = new Set(),
|
||||
|
||||
disableBaseTags = false,
|
||||
} = {}
|
||||
) {
|
||||
// refs of VM's VDIs → base's VDIs.
|
||||
const baseVdis = {}
|
||||
baseVm &&
|
||||
baseVm.$VBDs.forEach(vbd => {
|
||||
let vdi, snapshotOf
|
||||
if ((vdi = vbd.$VDI) && (snapshotOf = vdi.$snapshot_of) && !fullVdisRequired.has(snapshotOf.uuid)) {
|
||||
baseVdis[vdi.snapshot_of] = vdi
|
||||
}
|
||||
})
|
||||
|
||||
const streams = {}
|
||||
const vdis = {}
|
||||
const vbds = {}
|
||||
await cancelableMap(cancelToken, vm.$VBDs, async (cancelToken, vbd) => {
|
||||
let vdi
|
||||
if (vbd.type !== 'Disk' || !(vdi = vbd.$VDI)) {
|
||||
// Ignore this VBD.
|
||||
return
|
||||
}
|
||||
|
||||
// If the VDI name start with `[NOBAK]`, do not export it.
|
||||
if (vdi.name_label.startsWith('[NOBAK]')) {
|
||||
// FIXME: find a way to not create the VDI snapshot in the
|
||||
// first time.
|
||||
//
|
||||
// The snapshot must not exist otherwise it could break the
|
||||
// next export.
|
||||
ignoreErrors.call(vdi.$destroy())
|
||||
return
|
||||
}
|
||||
|
||||
vbds[vbd.$ref] = vbd
|
||||
|
||||
const vdiRef = vdi.$ref
|
||||
if (vdiRef in vdis) {
|
||||
// This VDI has already been managed.
|
||||
return
|
||||
}
|
||||
|
||||
// Look for a snapshot of this vdi in the base VM.
|
||||
const baseVdi = baseVdis[vdi.snapshot_of]
|
||||
|
||||
vdis[vdiRef] = {
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
|
||||
},
|
||||
$snapshot_of$uuid: vdi.$snapshot_of?.uuid,
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
|
||||
streams[`${vdiRef}.vhd`] = await vdi.$exportContent({
|
||||
baseRef: baseVdi?.$ref,
|
||||
cancelToken,
|
||||
format: 'vhd',
|
||||
})
|
||||
})
|
||||
|
||||
const suspendVdi = vm.$suspend_VDI
|
||||
if (suspendVdi !== undefined) {
|
||||
const vdiRef = suspendVdi.$ref
|
||||
vdis[vdiRef] = {
|
||||
...suspendVdi,
|
||||
$SR$uuid: suspendVdi.$SR.uuid,
|
||||
}
|
||||
streams[`${vdiRef}.vhd`] = await suspendVdi.$exportContent({
|
||||
cancelToken,
|
||||
format: 'vhd',
|
||||
})
|
||||
}
|
||||
|
||||
const vifs = {}
|
||||
vm.$VIFs.forEach(vif => {
|
||||
const network = vif.$network
|
||||
vifs[vif.$ref] = {
|
||||
...vif,
|
||||
$network$uuid: network.uuid,
|
||||
$network$name_label: network.name_label,
|
||||
$network$VLAN: network.$PIFs[0]?.VLAN,
|
||||
}
|
||||
})
|
||||
|
||||
return Object.defineProperty(
|
||||
{
|
||||
version: '1.1.0',
|
||||
vbds,
|
||||
vdis,
|
||||
vifs,
|
||||
vm: {
|
||||
...vm,
|
||||
other_config:
|
||||
baseVm && !disableBaseTags
|
||||
? {
|
||||
...vm.other_config,
|
||||
[TAG_BASE_DELTA]: baseVm.uuid,
|
||||
}
|
||||
: omit(vm.other_config, TAG_BASE_DELTA),
|
||||
},
|
||||
},
|
||||
'streams',
|
||||
{
|
||||
configurable: true,
|
||||
value: streams,
|
||||
writable: true,
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
$defer,
|
||||
deltaVm,
|
||||
sr,
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
|
||||
) {
|
||||
const { version } = deltaVm
|
||||
if (compareVersions(version, '1.0.0') < 0) {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
|
||||
const vmRecord = deltaVm.vm
|
||||
const xapi = sr.$xapi
|
||||
|
||||
let baseVm
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const baseVdis = {}
|
||||
baseVm &&
|
||||
baseVm.$VBDs.forEach(vbd => {
|
||||
const vdi = vbd.$VDI
|
||||
if (vdi !== undefined) {
|
||||
baseVdis[vbd.VDI] = vbd.$VDI
|
||||
}
|
||||
})
|
||||
const vdiRecords = deltaVm.vdis
|
||||
|
||||
// 0. Create suspend_VDI
|
||||
let suspendVdi
|
||||
if (vmRecord.power_state === 'Suspended') {
|
||||
const vdi = vdiRecords[vmRecord.suspend_VDI]
|
||||
suspendVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
sr: mapVdisSrs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => suspendVdi.$destroy())
|
||||
}
|
||||
|
||||
// 1. Create the VM.
|
||||
const vmRef = await xapi.VM_create(
|
||||
{
|
||||
...vmRecord,
|
||||
affinity: undefined,
|
||||
blocked_operations: {
|
||||
...vmRecord.blocked_operations,
|
||||
start: 'Importing…',
|
||||
},
|
||||
ha_always_run: false,
|
||||
is_a_template: false,
|
||||
name_label: '[Importing…] ' + vmRecord.name_label,
|
||||
other_config: {
|
||||
...vmRecord.other_config,
|
||||
[TAG_COPY_SRC]: vmRecord.uuid,
|
||||
},
|
||||
},
|
||||
{
|
||||
bios_strings: vmRecord.bios_strings,
|
||||
generateMacSeed: newMacAddresses,
|
||||
suspend_VDI: suspendVdi?.$ref,
|
||||
}
|
||||
)
|
||||
$defer.onFailure.call(xapi, 'VM_destroy', vmRef)
|
||||
|
||||
// 2. Delete all VBDs which may have been created by the import.
|
||||
await asyncMap(await xapi.getField('VM', vmRef, 'VBDs'), ref => ignoreErrors.call(xapi.call('VBD.destroy', ref)))
|
||||
|
||||
// 3. Create VDIs & VBDs.
|
||||
const vbdRecords = deltaVm.vbds
|
||||
const vbds = groupBy(vbdRecords, 'VDI')
|
||||
const newVdis = {}
|
||||
await asyncMap(Object.keys(vdiRecords), async vdiRef => {
|
||||
const vdi = vdiRecords[vdiRef]
|
||||
let newVdi
|
||||
|
||||
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVdiUuid) {
|
||||
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
|
||||
if (!baseVdi) {
|
||||
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
||||
}
|
||||
|
||||
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
|
||||
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
|
||||
} else if (vdiRef === vmRecord.suspend_VDI) {
|
||||
// suspendVDI has already created
|
||||
newVdi = suspendVdi
|
||||
} else {
|
||||
newVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
SR: mapVdisSrs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
}
|
||||
|
||||
const vdiVbds = vbds[vdiRef]
|
||||
if (vdiVbds !== undefined) {
|
||||
await asyncMap(Object.values(vdiVbds), vbd =>
|
||||
xapi.VBD_create({
|
||||
...vbd,
|
||||
VDI: newVdi.$ref,
|
||||
VM: vmRef,
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
newVdis[vdiRef] = newVdi
|
||||
})
|
||||
|
||||
const networksByNameLabelByVlan = {}
|
||||
let defaultNetwork
|
||||
Object.values(xapi.objects.all).forEach(object => {
|
||||
if (object.$type === 'network') {
|
||||
const pif = object.$PIFs[0]
|
||||
if (pif === undefined) {
|
||||
// ignore network
|
||||
return
|
||||
}
|
||||
const vlan = pif.VLAN
|
||||
const networksByNameLabel = networksByNameLabelByVlan[vlan] || (networksByNameLabelByVlan[vlan] = {})
|
||||
defaultNetwork = networksByNameLabel[object.name_label] = object
|
||||
}
|
||||
})
|
||||
|
||||
const { streams } = deltaVm
|
||||
|
||||
await Promise.all([
|
||||
// Import VDI contents.
|
||||
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
|
||||
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
||||
if (typeof stream === 'function') {
|
||||
stream = await stream()
|
||||
}
|
||||
if (stream.length === undefined) {
|
||||
stream = await createVhdStreamWithLength(stream)
|
||||
}
|
||||
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
|
||||
}
|
||||
}),
|
||||
|
||||
// Wait for VDI export tasks (if any) termination.
|
||||
Promise.all(Object.values(streams).map(stream => stream.task)),
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(Object.values(deltaVm.vifs), vif => {
|
||||
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)
|
||||
|
||||
if (network === undefined) {
|
||||
const { $network$VLAN: vlan = -1 } = vif
|
||||
const networksByNameLabel = networksByNameLabelByVlan[vlan]
|
||||
if (networksByNameLabel !== undefined) {
|
||||
network = networksByNameLabel[vif.$network$name_label]
|
||||
if (network === undefined) {
|
||||
network = networksByNameLabel[Object.keys(networksByNameLabel)[0]]
|
||||
}
|
||||
} else {
|
||||
network = defaultNetwork
|
||||
}
|
||||
}
|
||||
|
||||
if (network) {
|
||||
return xapi.VIF_create(
|
||||
{
|
||||
...vif,
|
||||
network: network.$ref,
|
||||
VM: vmRef,
|
||||
},
|
||||
{
|
||||
MAC: newMacAddresses ? undefined : vif.MAC,
|
||||
}
|
||||
)
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
await Promise.all([
|
||||
deltaVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', deltaVm.vm.name_label),
|
||||
])
|
||||
|
||||
return vmRef
|
||||
})
|
||||
@@ -1,4 +1,4 @@
|
||||
function extractIdsFromSimplePattern(pattern) {
|
||||
exports.extractIdsFromSimplePattern = function extractIdsFromSimplePattern(pattern) {
|
||||
if (pattern === undefined) {
|
||||
return []
|
||||
}
|
||||
@@ -27,4 +27,3 @@ function extractIdsFromSimplePattern(pattern) {
|
||||
|
||||
throw new Error('invalid pattern')
|
||||
}
|
||||
exports.extractIdsFromSimplePattern = extractIdsFromSimplePattern
|
||||
28
@xen-orchestra/backups/_forkStreamUnpipe.js
Normal file
28
@xen-orchestra/backups/_forkStreamUnpipe.js
Normal file
@@ -0,0 +1,28 @@
|
||||
const eos = require('end-of-stream')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
|
||||
const { forks = 0 } = stream
|
||||
stream.forks = forks + 1
|
||||
|
||||
const proxy = new PassThrough()
|
||||
stream.pipe(proxy)
|
||||
eos(stream, error => {
|
||||
if (error !== undefined) {
|
||||
proxy.destroy(error)
|
||||
}
|
||||
})
|
||||
eos(proxy, _ => {
|
||||
stream.forks--
|
||||
stream.unpipe(proxy)
|
||||
|
||||
if (stream.forks === 0) {
|
||||
stream.destroy(new Error('no more consumers for this stream'))
|
||||
}
|
||||
})
|
||||
return proxy
|
||||
}
|
||||
4
@xen-orchestra/backups/_getOldEntries.js
Normal file
4
@xen-orchestra/backups/_getOldEntries.js
Normal file
@@ -0,0 +1,4 @@
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
}
|
||||
20
@xen-orchestra/backups/_getTmpDir.js
Normal file
20
@xen-orchestra/backups/_getTmpDir.js
Normal file
@@ -0,0 +1,20 @@
|
||||
const Disposable = require('promise-toolbox/Disposable.js')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
|
||||
const MAX_ATTEMPTS = 3
|
||||
|
||||
exports.getTmpDir = async function getTmpDir() {
|
||||
for (let i = 0; true; ++i) {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
await mkdir(path)
|
||||
return new Disposable(() => rmdir(path), path)
|
||||
} catch (error) {
|
||||
if (i === MAX_ATTEMPTS) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
6
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
6
@xen-orchestra/backups/_getVmBackupDir.js
Normal file
@@ -0,0 +1,6 @@
|
||||
const BACKUP_DIR = 'xo-vm-backups'
|
||||
exports.BACKUP_DIR = BACKUP_DIR
|
||||
|
||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
@@ -1,13 +1,10 @@
|
||||
const assert = require('assert')
|
||||
const fs = require('fs-extra')
|
||||
|
||||
const isGzipFile = async fd => {
|
||||
const isGzipFile = async (handler, fd) => {
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
const magicNumber = Buffer.allocUnsafe(2)
|
||||
assert.strictEqual(
|
||||
(await fs.read(fd, magicNumber, 0, magicNumber.length, 0)).bytesRead,
|
||||
magicNumber.length
|
||||
)
|
||||
|
||||
assert.strictEqual((await handler.read(fd, magicNumber, 0)).bytesRead, magicNumber.length)
|
||||
return magicNumber[0] === 31 && magicNumber[1] === 139
|
||||
}
|
||||
|
||||
@@ -24,35 +21,33 @@ const isGzipFile = async fd => {
|
||||
// /^Ref:\d+/\d+\.checksum$/ and then validating the tar structure from it
|
||||
//
|
||||
// https://github.com/npm/node-tar/issues/234#issuecomment-538190295
|
||||
const isValidTar = async (size, fd) => {
|
||||
const isValidTar = async (handler, size, fd) => {
|
||||
if (size <= 1024 || size % 512 !== 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
const buf = Buffer.allocUnsafe(1024)
|
||||
assert.strictEqual(
|
||||
(await fs.read(fd, buf, 0, buf.length, size - buf.length)).bytesRead,
|
||||
buf.length
|
||||
)
|
||||
assert.strictEqual((await handler.read(fd, buf, size - buf.length)).bytesRead, buf.length)
|
||||
return buf.every(_ => _ === 0)
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
const isValidXva = async path => {
|
||||
async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
try {
|
||||
const fd = await fs.open(path, 'r')
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
try {
|
||||
const { size } = await fs.fstat(fd)
|
||||
const size = await handler.getSize(fd)
|
||||
if (size < 20) {
|
||||
// neither a valid gzip not tar
|
||||
return false
|
||||
}
|
||||
|
||||
return (await isGzipFile(fd))
|
||||
return (await isGzipFile(handler, fd))
|
||||
? true // gzip files cannot be validated at this time
|
||||
: await isValidTar(size, fd)
|
||||
: await isValidTar(handler, size, fd)
|
||||
} finally {
|
||||
fs.close(fd).catch(noop)
|
||||
handler.closeFile(fd).catch(noop)
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
52
@xen-orchestra/backups/_listPartitions.js
Normal file
52
@xen-orchestra/backups/_listPartitions.js
Normal file
@@ -0,0 +1,52 @@
|
||||
const fromCallback = require('promise-toolbox/fromCallback.js')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
const { debug } = createLogger('xo:backups:listPartitions')
|
||||
|
||||
const IGNORED_PARTITION_TYPES = {
|
||||
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
|
||||
0x05: true,
|
||||
0x0f: true,
|
||||
0x15: true,
|
||||
0x5e: true,
|
||||
0x5f: true,
|
||||
0x85: true,
|
||||
0x91: true,
|
||||
0x9b: true,
|
||||
0xc5: true,
|
||||
0xcf: true,
|
||||
0xd5: true,
|
||||
|
||||
0x82: true, // swap
|
||||
}
|
||||
|
||||
const LVM_PARTITION_TYPE = 0x8e
|
||||
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
|
||||
|
||||
const parsePartxLine = createParser({
|
||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||
valueTransform: (value, key) => (key === 'start' || key === 'size' || key === 'type' ? +value : value),
|
||||
})
|
||||
|
||||
// returns an empty array in case of a non-partitioned disk
|
||||
exports.listPartitions = async function listPartitions(devicePath) {
|
||||
const parts = await fromCallback(execFile, 'partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
'--pairs',
|
||||
devicePath,
|
||||
]).catch(error => {
|
||||
// partx returns 1 since v2.33 when failing to read partitions.
|
||||
//
|
||||
// Prior versions are correctly handled by the nominal case.
|
||||
debug('listPartitions', { error })
|
||||
return ''
|
||||
})
|
||||
|
||||
return parts
|
||||
.split(/\r?\n/)
|
||||
.map(parsePartxLine)
|
||||
.filter(({ type }) => type != null && !(type in IGNORED_PARTITION_TYPES))
|
||||
}
|
||||
29
@xen-orchestra/backups/_lvm.js
Normal file
29
@xen-orchestra/backups/_lvm.js
Normal file
@@ -0,0 +1,29 @@
|
||||
const fromCallback = require('promise-toolbox/fromCallback.js')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
|
||||
// ===================================================================
|
||||
|
||||
const parse = createParser({
|
||||
keyTransform: key => key.slice(5).toLowerCase(),
|
||||
})
|
||||
const makeFunction = command => async (fields, ...args) => {
|
||||
const info = await fromCallback(execFile, command, [
|
||||
'--noheading',
|
||||
'--nosuffix',
|
||||
'--nameprefixes',
|
||||
'--unbuffered',
|
||||
'--units',
|
||||
'b',
|
||||
'-o',
|
||||
String(fields),
|
||||
...args,
|
||||
])
|
||||
return info
|
||||
.trim()
|
||||
.split(/\r?\n/)
|
||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
exports.lvs = makeFunction('lvs')
|
||||
exports.pvs = makeFunction('pvs')
|
||||
7
@xen-orchestra/backups/_watchStreamSize.js
Normal file
7
@xen-orchestra/backups/_watchStreamSize.js
Normal file
@@ -0,0 +1,7 @@
|
||||
exports.watchStreamSize = function watchStreamSize(stream, container = { size: 0 }) {
|
||||
stream.on('data', data => {
|
||||
container.size += data.length
|
||||
})
|
||||
stream.pause()
|
||||
return container
|
||||
}
|
||||
34
@xen-orchestra/backups/formatVmBackups.js
Normal file
34
@xen-orchestra/backups/formatVmBackups.js
Normal file
@@ -0,0 +1,34 @@
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { dirname } = require('path')
|
||||
|
||||
function formatVmBackup(backup) {
|
||||
return {
|
||||
disks:
|
||||
backup.vhds === undefined
|
||||
? []
|
||||
: Object.keys(backup.vhds).map(vdiId => {
|
||||
const vdi = backup.vdis[vdiId]
|
||||
return {
|
||||
id: `${dirname(backup._filename)}/${backup.vhds[vdiId]}`,
|
||||
name: vdi.name_label,
|
||||
uuid: vdi.uuid,
|
||||
}
|
||||
}),
|
||||
|
||||
id: backup.id,
|
||||
jobId: backup.jobId,
|
||||
mode: backup.mode,
|
||||
scheduleId: backup.scheduleId,
|
||||
size: backup.size,
|
||||
timestamp: backup.timestamp,
|
||||
vm: {
|
||||
name_description: backup.vm.name_description,
|
||||
name_label: backup.vm.name_label,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// format all backups as returned by RemoteAdapter#listAllVmBackups()
|
||||
exports.formatVmBackups = function formatVmBackups(backupsByVM) {
|
||||
return mapValues(backupsByVM, backups => backups.map(formatVmBackup))
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = (retention, entries) =>
|
||||
entries === undefined
|
||||
? []
|
||||
: retention > 0
|
||||
? entries.slice(0, -retention)
|
||||
: entries
|
||||
@@ -8,16 +8,37 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.1",
|
||||
"version": "0.11.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
"node": ">=14.6"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/compose": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^0.17.0",
|
||||
"@xen-orchestra/log": "^0.2.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"compare-versions": "^3.6.0",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"fs-extra": "^9.0.0"
|
||||
"end-of-stream": "^1.4.4",
|
||||
"fs-extra": "^9.0.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"limit-concurrency-decorator": "^0.5.0",
|
||||
"lodash": "^4.17.20",
|
||||
"node-zone": "^0.4.0",
|
||||
"parse-pairs": "^1.1.0",
|
||||
"pump": "^3.0.0",
|
||||
"promise-toolbox": "^0.19.2",
|
||||
"vhd-lib": "^1.0.0",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^0.6.2"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
23
@xen-orchestra/backups/parseMetadataBackupId.js
Normal file
23
@xen-orchestra/backups/parseMetadataBackupId.js
Normal file
@@ -0,0 +1,23 @@
|
||||
const { DIR_XO_CONFIG_BACKUPS, DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
|
||||
exports.parseMetadataBackupId = function parseMetadataBackupId(backupId) {
|
||||
const [dir, ...rest] = backupId.split('/')
|
||||
if (dir === DIR_XO_CONFIG_BACKUPS) {
|
||||
const [scheduleId, timestamp] = rest
|
||||
return {
|
||||
type: 'xoConfig',
|
||||
scheduleId,
|
||||
timestamp,
|
||||
}
|
||||
} else if (dir === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
const [scheduleId, poolUuid, timestamp] = rest
|
||||
return {
|
||||
type: 'pool',
|
||||
poolUuid,
|
||||
scheduleId,
|
||||
timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`not supported backup dir (${dir})`)
|
||||
}
|
||||
38
@xen-orchestra/backups/runBackupWorker.js
Normal file
38
@xen-orchestra/backups/runBackupWorker.js
Normal file
@@ -0,0 +1,38 @@
|
||||
const path = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { fork } = require('child_process')
|
||||
|
||||
const { warn } = createLogger('xo:backups:backupWorker')
|
||||
|
||||
const PATH = path.resolve(__dirname, '_backupWorker.js')
|
||||
|
||||
exports.runBackupWorker = function runBackupWorker(params, onLog) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const worker = fork(PATH)
|
||||
|
||||
worker.on('exit', code => reject(new Error(`worker exited with code ${code}`)))
|
||||
worker.on('error', reject)
|
||||
|
||||
worker.on('message', message => {
|
||||
try {
|
||||
if (message.type === 'result') {
|
||||
if (message.status === 'success') {
|
||||
resolve(message.result)
|
||||
} else {
|
||||
reject(message.result)
|
||||
}
|
||||
} else if (message.type === 'log') {
|
||||
onLog(message.data)
|
||||
}
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
}
|
||||
})
|
||||
|
||||
worker.send({
|
||||
action: 'run',
|
||||
data: params,
|
||||
runWithLogs: onLog !== undefined,
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1,11 +0,0 @@
|
||||
exports.watchStreamSize = stream => {
|
||||
const container = { size: 0 }
|
||||
const isPaused = stream.isPaused()
|
||||
stream.on('data', data => {
|
||||
container.size += data.length
|
||||
})
|
||||
if (isPaused) {
|
||||
stream.pause()
|
||||
}
|
||||
return container
|
||||
}
|
||||
227
@xen-orchestra/backups/writers/DeltaBackupWriter.js
Normal file
227
@xen-orchestra/backups/writers/DeltaBackupWriter.js
Normal file
@@ -0,0 +1,227 @@
|
||||
const assert = require('assert')
|
||||
const map = require('lodash/map.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { chainVhd, checkVhdChain, default: Vhd } = require('vhd-lib')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { dirname } = require('path')
|
||||
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
|
||||
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
|
||||
const { checkVhd } = require('./_checkVhd.js')
|
||||
const { packUuid } = require('./_packUuid.js')
|
||||
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
|
||||
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
const { handler } = this._adapter
|
||||
const backup = this._backup
|
||||
|
||||
const backupDir = getVmBackupDir(backup.vm.uuid)
|
||||
const vdisDir = `${backupDir}/vdis/${backup.job.id}`
|
||||
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
|
||||
let found = false
|
||||
try {
|
||||
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
|
||||
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
|
||||
prependDir: true,
|
||||
})
|
||||
await asyncMap(vhds, async path => {
|
||||
try {
|
||||
await checkVhdChain(handler, path)
|
||||
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
found = found || vhd.footer.uuid.equals(packUuid(baseUuid))
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
await ignoreErrors.call(handler.unlink(path))
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
warn('checkBaseVdis', { error })
|
||||
}
|
||||
if (!found) {
|
||||
baseUuidToSrcVdi.delete(baseUuid)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async beforeBackup() {
|
||||
await super.beforeBackup()
|
||||
return this._cleanVm({ merge: true })
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
name: 'export',
|
||||
data: {
|
||||
id: this._remoteId,
|
||||
isFull,
|
||||
type: 'remote',
|
||||
},
|
||||
})
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup, true)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
|
||||
async _prepare() {
|
||||
const adapter = this._adapter
|
||||
const settings = this._settings
|
||||
const { scheduleId, vm } = this._backup
|
||||
|
||||
const oldEntries = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
)
|
||||
this._oldEntries = oldEntries
|
||||
|
||||
// FIXME: implement optimized multiple VHDs merging with synthetic
|
||||
// delta
|
||||
//
|
||||
// For the time being, limit the number of deleted backups by run
|
||||
// because it can take a very long time and can lead to
|
||||
// interrupted backup with broken VHD chain.
|
||||
//
|
||||
// The old backups will be eventually merged in future runs of the
|
||||
// job.
|
||||
const { maxMergedDeltasPerRun } = this._settings
|
||||
if (oldEntries.length > maxMergedDeltasPerRun) {
|
||||
oldEntries.length = maxMergedDeltasPerRun
|
||||
}
|
||||
|
||||
if (settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
if (!this._settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
return Task.run({ name: 'merge' }, async () => {
|
||||
const adapter = this._adapter
|
||||
const oldEntries = this._oldEntries
|
||||
|
||||
let size = 0
|
||||
// delete sequentially from newest to oldest to avoid unnecessary merges
|
||||
for (let i = oldEntries.length; i-- > 0; ) {
|
||||
size += await adapter.deleteDeltaVmBackups([oldEntries[i]])
|
||||
}
|
||||
return {
|
||||
size,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
const adapter = this._adapter
|
||||
const backup = this._backup
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
const backupDir = getVmBackupDir(vm.uuid)
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
deltaExport.vdis,
|
||||
vdi =>
|
||||
`vdis/${jobId}/${
|
||||
vdi.type === 'suspend'
|
||||
? // doesn't make sense to group by parent for memory because we
|
||||
// don't do delta for it
|
||||
vdi.uuid
|
||||
: vdi.$snapshot_of$uuid
|
||||
}/${basename}.vhd`
|
||||
)
|
||||
|
||||
const metadataFilename = `${backupDir}/${basename}.json`
|
||||
const metadataContent = {
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
timestamp,
|
||||
vbds: deltaExport.vbds,
|
||||
vdis: deltaExport.vdis,
|
||||
version: '2.0.0',
|
||||
vifs: deltaExport.vifs,
|
||||
vhds,
|
||||
vm,
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
await Promise.all(
|
||||
map(deltaExport.vdis, async (vdi, id) => {
|
||||
const path = `${backupDir}/${vhds[id]}`
|
||||
|
||||
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
|
||||
let parentPath
|
||||
if (isDelta) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
await handler.list(vdiDir, {
|
||||
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
|
||||
prependDir: true,
|
||||
})
|
||||
)
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(parentPath, undefined, `missing parent of ${id}`)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
// TODO remove when this has been done before the export
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
await adapter.outputStream(path, deltaExport.streams[`${id}.vhd`], {
|
||||
// no checksum for VHDs, because they will be invalidated by
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||
})
|
||||
|
||||
if (isDelta) {
|
||||
await chainVhd(handler, parentPath, handler, path)
|
||||
}
|
||||
|
||||
// set the correct UUID in the VHD
|
||||
const vhd = new Vhd(handler, path)
|
||||
await vhd.readHeaderAndFooter()
|
||||
vhd.footer.uuid = packUuid(vdi.uuid)
|
||||
await vhd.readBlockAllocationTable() // required by writeFooter()
|
||||
await vhd.writeFooter()
|
||||
})
|
||||
)
|
||||
return {
|
||||
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
||||
}
|
||||
})
|
||||
metadataContent.size = size
|
||||
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
|
||||
dirMode: backup.config.dirMode,
|
||||
})
|
||||
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
}
|
||||
124
@xen-orchestra/backups/writers/DeltaReplicationWriter.js
Normal file
124
@xen-orchestra/backups/writers/DeltaReplicationWriter.js
Normal file
@@ -0,0 +1,124 @@
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { importDeltaVm, TAG_COPY_SRC } = require('../_deltaVm.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
|
||||
const { MixinReplicationWriter } = require('./_MixinReplicationWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinReplicationWriter(AbstractDeltaWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._backup.job.id, sr.uuid, this._backup.vm.uuid).find(
|
||||
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
|
||||
)
|
||||
if (replicatedVm === undefined) {
|
||||
return baseUuidToSrcVdi.clear()
|
||||
}
|
||||
|
||||
const xapi = replicatedVm.$xapi
|
||||
const replicatedVdis = new Set(
|
||||
await asyncMap(await replicatedVm.$getDisks(), async vdiRef => {
|
||||
const otherConfig = await xapi.getField('VDI', vdiRef, 'other_config')
|
||||
return otherConfig[TAG_COPY_SRC]
|
||||
})
|
||||
)
|
||||
|
||||
for (const uuid of baseUuidToSrcVdi.keys()) {
|
||||
if (!replicatedVdis.has(uuid)) {
|
||||
baseUuidToSrcVdi.delete(uuid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
name: 'export',
|
||||
data: {
|
||||
id: this._sr.uuid,
|
||||
isFull,
|
||||
type: 'SR',
|
||||
},
|
||||
})
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup, true)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
|
||||
async _prepare() {
|
||||
const settings = this._settings
|
||||
const { uuid: srUuid, $xapi: xapi } = this._sr
|
||||
const { scheduleId, vm } = this._backup
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => vm.$destroy))
|
||||
|
||||
this._oldEntries = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
|
||||
if (settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async cleanup() {
|
||||
if (!this._settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
}
|
||||
}
|
||||
|
||||
async _deleteOldEntries() {
|
||||
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
|
||||
}
|
||||
|
||||
async transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
const sr = this._sr
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
let targetVmRef
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
targetVmRef = await importDeltaVm(
|
||||
{
|
||||
__proto__: deltaExport,
|
||||
vm: {
|
||||
...deltaExport.vm,
|
||||
tags: [...deltaExport.vm.tags, 'Continuous Replication'],
|
||||
},
|
||||
},
|
||||
sr
|
||||
)
|
||||
return {
|
||||
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
||||
}
|
||||
})
|
||||
|
||||
const targetVm = await xapi.getRecord('VM', targetVmRef)
|
||||
|
||||
await Promise.all([
|
||||
targetVm.ha_restart_priority !== '' &&
|
||||
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
|
||||
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
|
||||
targetVm.update_blocked_operations(
|
||||
'start',
|
||||
'Start operation for this vm is blocked, clone it if you want to use it.'
|
||||
),
|
||||
targetVm.update_other_config({
|
||||
'xo:backup:sr': srUuid,
|
||||
|
||||
// these entries need to be added in case of offline backup
|
||||
'xo:backup:datetime': formatDateTime(timestamp),
|
||||
'xo:backup:job': job.id,
|
||||
'xo:backup:schedule': scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
}),
|
||||
])
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user