Compare commits
646 Commits
lite/neste
...
xo5/resour
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12786511bb | ||
|
|
edcbf22d3f | ||
|
|
4fa4638e00 | ||
|
|
e23ff07792 | ||
|
|
26eb727ae3 | ||
|
|
b750d277aa | ||
|
|
bd2b6dbe2a | ||
|
|
2cd87e1b2c | ||
|
|
6eed3196bb | ||
|
|
662c2bd8cb | ||
|
|
ba9d4d4bb5 | ||
|
|
18dea2f2fe | ||
|
|
70c51227bf | ||
|
|
e162fd835b | ||
|
|
bcdcfbf20b | ||
|
|
a6e93c895c | ||
|
|
5c4f907358 | ||
|
|
e19dbc06fe | ||
|
|
287378f9c6 | ||
|
|
83a94eefd6 | ||
|
|
92fc19e2e3 | ||
|
|
521d31ac84 | ||
|
|
2b3ccb4b0e | ||
|
|
2498a4f47c | ||
|
|
dd61feeaf3 | ||
|
|
7851f8c196 | ||
|
|
404a764821 | ||
|
|
59cc418973 | ||
|
|
bc00551cb3 | ||
|
|
4d24248ea2 | ||
|
|
5c731fd56e | ||
|
|
79abb97b1f | ||
|
|
3314ba6e08 | ||
|
|
0fe8f8cac3 | ||
|
|
aaffe8c872 | ||
|
|
cf0e820632 | ||
|
|
82fdfbd18b | ||
|
|
116f2ec47a | ||
|
|
5d4723ec06 | ||
|
|
092475d57f | ||
|
|
e7a97fd175 | ||
|
|
2262ce8814 | ||
|
|
7ec64476a4 | ||
|
|
bbb43808dd | ||
|
|
02d7f0f20b | ||
|
|
e226db708a | ||
|
|
b7af74e27e | ||
|
|
70f2e50274 | ||
|
|
619b3f3c78 | ||
|
|
ac182374d6 | ||
|
|
8461d68d40 | ||
|
|
838f9a42d1 | ||
|
|
a9dbc01b84 | ||
|
|
5daf269ab3 | ||
|
|
383677f4dd | ||
|
|
28ad2af9b0 | ||
|
|
53b25508af | ||
|
|
54836ec576 | ||
|
|
c9897166e0 | ||
|
|
1b35fddf3d | ||
|
|
fc2ca1f4d4 | ||
|
|
ed7046c1ab | ||
|
|
215579ff4d | ||
|
|
4c79a78a05 | ||
|
|
7864c05ee1 | ||
|
|
1026d18e4b | ||
|
|
d62acd3fe7 | ||
|
|
cc1b4bc06c | ||
|
|
d45f843308 | ||
|
|
3c7fa05c43 | ||
|
|
05df055552 | ||
|
|
755b206901 | ||
|
|
3360399b2a | ||
|
|
091bc04ace | ||
|
|
3ab2a8354b | ||
|
|
2d047c4fef | ||
|
|
0f1dcda7db | ||
|
|
44760668a3 | ||
|
|
a4023dbc54 | ||
|
|
3cad7e2467 | ||
|
|
ad5f37436a | ||
|
|
8c05eab720 | ||
|
|
4db605f14a | ||
|
|
06d411543a | ||
|
|
6084db22a9 | ||
|
|
2e6f7d35ef | ||
|
|
0f1f45953c | ||
|
|
89a4de5b21 | ||
|
|
32dd16114e | ||
|
|
f5a3cc0cdb | ||
|
|
5cabf9916a | ||
|
|
8e65ef7dbc | ||
|
|
0c0251082d | ||
|
|
c250cd9b89 | ||
|
|
d6abdb246b | ||
|
|
5769da3ebc | ||
|
|
4f383635ef | ||
|
|
8a7abc2e54 | ||
|
|
af1650bd14 | ||
|
|
c6fdef33c4 | ||
|
|
5f73f09f59 | ||
|
|
1b0fc62e2e | ||
|
|
aa2dc9206d | ||
|
|
2b1562da81 | ||
|
|
25e270edb4 | ||
|
|
51c11c15a8 | ||
|
|
47922dee56 | ||
|
|
3dda4dbaad | ||
|
|
901f7b3fe2 | ||
|
|
774d66512e | ||
|
|
ec1669a32e | ||
|
|
bbcd4184b0 | ||
|
|
85ec26194b | ||
|
|
f0242380ca | ||
|
|
a624330818 | ||
|
|
3892efcca2 | ||
|
|
c1c122d92c | ||
|
|
b7a66e9f73 | ||
|
|
d92d2efc78 | ||
|
|
c2cb51a470 | ||
|
|
5242affdc1 | ||
|
|
71f3be288b | ||
|
|
58769815b0 | ||
|
|
c81c23c0d0 | ||
|
|
f06f89b5b4 | ||
|
|
fa748ed9de | ||
|
|
cd753acff7 | ||
|
|
8ff861e2be | ||
|
|
95ccb2e0ae | ||
|
|
b0e5846ad1 | ||
|
|
19fd456ccf | ||
|
|
7946a7db68 | ||
|
|
6127e30574 | ||
|
|
4aad9d8e32 | ||
|
|
78d15ddf96 | ||
|
|
302f7fb85e | ||
|
|
ea19b0851f | ||
|
|
b0c37df8d7 | ||
|
|
beba6f7e8d | ||
|
|
9388b5500c | ||
|
|
bae8ad25e9 | ||
|
|
c96b29fe96 | ||
|
|
9888013aff | ||
|
|
0bbb0c289d | ||
|
|
80097ea777 | ||
|
|
be452a5d63 | ||
|
|
bcc0452646 | ||
|
|
9d9691c5a3 | ||
|
|
e56edc70d5 | ||
|
|
d7f4d0f5e0 | ||
|
|
8c24dd1732 | ||
|
|
575a423edf | ||
|
|
e311860bb5 | ||
|
|
e6289ebc16 | ||
|
|
013e20aa0f | ||
|
|
45a0a83fa4 | ||
|
|
ae518399fa | ||
|
|
d949112921 | ||
|
|
bb19afc45c | ||
|
|
7780cb176a | ||
|
|
74ff64dfb4 | ||
|
|
9be3c40ead | ||
|
|
0f00c7e393 | ||
|
|
95492f6f89 | ||
|
|
046fa7282b | ||
|
|
6cd99c39f4 | ||
|
|
48c3a65cc6 | ||
|
|
8b0b2d7c31 | ||
|
|
d8280087a4 | ||
|
|
c14261a0bc | ||
|
|
3d6defca37 | ||
|
|
d062a5175a | ||
|
|
f218874c4b | ||
|
|
b1e879ca2f | ||
|
|
c5010c2caa | ||
|
|
2c40b99d8b | ||
|
|
0d127f2b92 | ||
|
|
0464886e80 | ||
|
|
d655a3e222 | ||
|
|
579f0b91d5 | ||
|
|
72b1878254 | ||
|
|
74dd4c8db7 | ||
|
|
ef4ecce572 | ||
|
|
1becccffbc | ||
|
|
b95b1622b1 | ||
|
|
36d6e3779d | ||
|
|
b0e000328d | ||
|
|
cc080ec681 | ||
|
|
0d4cf48410 | ||
|
|
2ec164c560 | ||
|
|
d38dce9302 | ||
|
|
4c775f6b86 | ||
|
|
dd6c858737 | ||
|
|
194db8d0dd | ||
|
|
5b00cf3ecd | ||
|
|
afa957b305 | ||
|
|
ea1921625e | ||
|
|
65a154a1b3 | ||
|
|
2249c90be9 | ||
|
|
1b55d5e2b4 | ||
|
|
9f5be8029a | ||
|
|
16458b953f | ||
|
|
d63a567a1c | ||
|
|
d36e10e73f | ||
|
|
5d80a58754 | ||
|
|
77b14a316f | ||
|
|
213eb6a56a | ||
|
|
2c298ef47a | ||
|
|
b7b7af8cff | ||
|
|
5cf5d14449 | ||
|
|
e0bf9ee9d5 | ||
|
|
54808967f6 | ||
|
|
c63d38dc0f | ||
|
|
41ed5625be | ||
|
|
e66bcf2a5c | ||
|
|
c40e71ed49 | ||
|
|
439c721472 | ||
|
|
99429edf23 | ||
|
|
cec8237a47 | ||
|
|
e13d55bfa9 | ||
|
|
141c141516 | ||
|
|
7a47d23191 | ||
|
|
7a8bf671fb | ||
|
|
7f83a3e55e | ||
|
|
7f8ab07692 | ||
|
|
2634008a6a | ||
|
|
4c652a457f | ||
|
|
89dc40a1c5 | ||
|
|
04a7982801 | ||
|
|
df9b59f980 | ||
|
|
fe215a53af | ||
|
|
0559c843c4 | ||
|
|
79967e0eec | ||
|
|
847ad63c09 | ||
|
|
fc1357db93 | ||
|
|
b644cbe28d | ||
|
|
7ddfb2a684 | ||
|
|
5a0cfd86c7 | ||
|
|
70e3ba17af | ||
|
|
4784bbfb99 | ||
|
|
ceddddd7f2 | ||
|
|
32afd5c463 | ||
|
|
ac391f6a0f | ||
|
|
a0b50b47ef | ||
|
|
e3618416bf | ||
|
|
37fd6d13db | ||
|
|
eb56666f98 | ||
|
|
b7daee81c0 | ||
|
|
bee0eb9091 | ||
|
|
59a9a63971 | ||
|
|
a2e8b999da | ||
|
|
489ad51b4d | ||
|
|
7db2516a38 | ||
|
|
1141ef524f | ||
|
|
f449258ed3 | ||
|
|
bb3b83c690 | ||
|
|
2b973275c0 | ||
|
|
037e1c1dfa | ||
|
|
f0da94081b | ||
|
|
cd44a6e28c | ||
|
|
70b09839c7 | ||
|
|
12140143d2 | ||
|
|
e68236c9f2 | ||
|
|
8a1a0d76f7 | ||
|
|
4a5bc5dccc | ||
|
|
0ccdfbd6f4 | ||
|
|
75af7668b5 | ||
|
|
0b454fa670 | ||
|
|
2dcb5cb7cd | ||
|
|
a5aeeceb7f | ||
|
|
b2f2c3cbc4 | ||
|
|
0f7ac004ad | ||
|
|
7faa82a9c8 | ||
|
|
4b3f60b280 | ||
|
|
b29d5ba95c | ||
|
|
408fc5af84 | ||
|
|
2748aea4e9 | ||
|
|
a5acc7d267 | ||
|
|
87a9fbe237 | ||
|
|
9d0b7242f0 | ||
|
|
20ec44c3b3 | ||
|
|
6f68456bae | ||
|
|
b856c1a6b4 | ||
|
|
61e1f83a9f | ||
|
|
5820e19731 | ||
|
|
cdb51f8fe3 | ||
|
|
57940e0a52 | ||
|
|
6cc95efe51 | ||
|
|
b0ff2342ab | ||
|
|
0f67692be4 | ||
|
|
865461bfb9 | ||
|
|
e108cb0990 | ||
|
|
c4535c6bae | ||
|
|
ad8eaaa771 | ||
|
|
9419cade3d | ||
|
|
272e6422bd | ||
|
|
547908a8f9 | ||
|
|
8abfaa0bd5 | ||
|
|
a9fbcf3962 | ||
|
|
887b49ebbf | ||
|
|
858ecbc217 | ||
|
|
ffd523679d | ||
|
|
bd9db437f1 | ||
|
|
0365bacfbb | ||
|
|
f3e0227c55 | ||
|
|
4504141cbf | ||
|
|
ecbbf878d0 | ||
|
|
c1faaa3107 | ||
|
|
59f04b4a6b | ||
|
|
781b070e74 | ||
|
|
1911386aba | ||
|
|
5b0339315f | ||
|
|
5fe53dfa99 | ||
|
|
06068cdcc6 | ||
|
|
c88cc2b020 | ||
|
|
03de8ad481 | ||
|
|
08ba7e7253 | ||
|
|
9ca3f3df26 | ||
|
|
511908bb7d | ||
|
|
4351aad312 | ||
|
|
af7aa29c91 | ||
|
|
315d626055 | ||
|
|
7af0899800 | ||
|
|
46ec2dfd56 | ||
|
|
b2348474c3 | ||
|
|
836300755a | ||
|
|
55c8c8a6e9 | ||
|
|
38e32cd24c | ||
|
|
5ceacfaf5a | ||
|
|
1ee6b106b9 | ||
|
|
eaef4f22d2 | ||
|
|
96025df12f | ||
|
|
a8aac295eb | ||
|
|
83141989f0 | ||
|
|
9dea52281d | ||
|
|
2164c72034 | ||
|
|
0d0c38f3b5 | ||
|
|
e5be21a590 | ||
|
|
bc1a8be862 | ||
|
|
3df4dbaae7 | ||
|
|
8f2cfebda6 | ||
|
|
0d00c1c45f | ||
|
|
9886e06d6a | ||
|
|
478dbdfe41 | ||
|
|
2bfdb60dda | ||
|
|
cabd04470d | ||
|
|
f6819b23f9 | ||
|
|
c9dbcf1384 | ||
|
|
457fec0bc8 | ||
|
|
db99a22244 | ||
|
|
89d8adc6c6 | ||
|
|
a3ea70c61c | ||
|
|
ae0f3b4fe0 | ||
|
|
2552ef37d2 | ||
|
|
9803e8c6cb | ||
|
|
3410cbc3b9 | ||
|
|
93fce0d4bf | ||
|
|
dbdc5f3e3b | ||
|
|
581b42fa9d | ||
|
|
e07e2d3ccd | ||
|
|
ad928ec23d | ||
|
|
1d7559ded2 | ||
|
|
9099b58557 | ||
|
|
9e70397240 | ||
|
|
5f69b0e9a0 | ||
|
|
2a9bff1607 | ||
|
|
9e621d7de8 | ||
|
|
3e5c73528d | ||
|
|
397b5cd56d | ||
|
|
55cb6042e8 | ||
|
|
339d920b78 | ||
|
|
f14f716f3d | ||
|
|
fb83d1fc98 | ||
|
|
62208e7847 | ||
|
|
df91772f5c | ||
|
|
cf8a9d40be | ||
|
|
93d1c6c3fc | ||
|
|
f1fa811e5c | ||
|
|
5a9812c492 | ||
|
|
b53d613a64 | ||
|
|
225a67ae3b | ||
|
|
c7eb7db463 | ||
|
|
edfa729672 | ||
|
|
77d9798319 | ||
|
|
680f1e2f07 | ||
|
|
7c009b0fc0 | ||
|
|
eb7de4f2dd | ||
|
|
2378399981 | ||
|
|
37b2113763 | ||
|
|
5048485a85 | ||
|
|
9e667533e9 | ||
|
|
1fac7922b4 | ||
|
|
1a0e5eb6fc | ||
|
|
321e322492 | ||
|
|
8834af65f7 | ||
|
|
1a1dd0531d | ||
|
|
8752487280 | ||
|
|
4b12a6d31d | ||
|
|
2924f82754 | ||
|
|
9b236a6191 | ||
|
|
a3b8553cec | ||
|
|
00a1778a6d | ||
|
|
3b6bc629bc | ||
|
|
04dfd9a02c | ||
|
|
fb52868074 | ||
|
|
77d53d2abf | ||
|
|
6afb87def1 | ||
|
|
8bfe293414 | ||
|
|
2e634a9d1c | ||
|
|
bea771ca90 | ||
|
|
99e3622f31 | ||
|
|
a16522241e | ||
|
|
b86cb12649 | ||
|
|
2af74008b2 | ||
|
|
2e689592f1 | ||
|
|
3f8436b58b | ||
|
|
e3dd59d684 | ||
|
|
549d9b70a9 | ||
|
|
3bf6aae103 | ||
|
|
afb110c473 | ||
|
|
8727c3cf96 | ||
|
|
b13302ddeb | ||
|
|
e89ed06314 | ||
|
|
e3f57998f7 | ||
|
|
8cdb5ee31b | ||
|
|
5b734db656 | ||
|
|
e853f9d04f | ||
|
|
2a5e09719e | ||
|
|
3c0477e0da | ||
|
|
060d1c5297 | ||
|
|
55dd7bfb9c | ||
|
|
b00cf13029 | ||
|
|
73755e4ccf | ||
|
|
a1bd96da6a | ||
|
|
0e934c1413 | ||
|
|
eb69234a8e | ||
|
|
7659d9c0be | ||
|
|
2ba81d55f8 | ||
|
|
2e1abad255 | ||
|
|
c7d5b4b063 | ||
|
|
cc5f4b0996 | ||
|
|
55f627ed83 | ||
|
|
988179a3f0 | ||
|
|
ce617e0732 | ||
|
|
f0f429a473 | ||
|
|
bb6e158301 | ||
|
|
7ff304a042 | ||
|
|
7df1994d7f | ||
|
|
a3a2fda157 | ||
|
|
d8530f9518 | ||
|
|
d3062ac35c | ||
|
|
b11f11f4db | ||
|
|
79d48f3b56 | ||
|
|
869f7ffab0 | ||
|
|
6665d6a8e6 | ||
|
|
8eb0bdbda7 | ||
|
|
710689db0b | ||
|
|
801eea7e75 | ||
|
|
7885e1e6e7 | ||
|
|
d384c746ca | ||
|
|
a30d962b1d | ||
|
|
b6e078716b | ||
|
|
34b69c7ee8 | ||
|
|
70bf8d9620 | ||
|
|
c8bfda9cf5 | ||
|
|
1eb4c20844 | ||
|
|
e5c5f19219 | ||
|
|
db92f0e365 | ||
|
|
570de7c0fe | ||
|
|
90e0f26845 | ||
|
|
c714bc3518 | ||
|
|
48e0acda32 | ||
|
|
013cdbcd96 | ||
|
|
fdd886f213 | ||
|
|
de70ef3064 | ||
|
|
9142a95f79 | ||
|
|
1c6aebf997 | ||
|
|
7b9ec4b7a7 | ||
|
|
decb87f0c9 | ||
|
|
e17470f56c | ||
|
|
99ddbcdc67 | ||
|
|
6953e2fe7b | ||
|
|
beb1063ba1 | ||
|
|
7773edd590 | ||
|
|
0104649b84 | ||
|
|
1c9d1049e0 | ||
|
|
d992a4cb87 | ||
|
|
52114ad4b0 | ||
|
|
bcc62cfcaf | ||
|
|
60434b136a | ||
|
|
13f3c8851d | ||
|
|
f386f94dc2 | ||
|
|
fda1fd1a04 | ||
|
|
0b17bdd9bc | ||
|
|
2c5706a89b | ||
|
|
5448452b71 | ||
|
|
22e7c126e6 | ||
|
|
750fefe957 | ||
|
|
025e671989 | ||
|
|
df0ed5e794 | ||
|
|
da45ace7c1 | ||
|
|
2a623b8ae7 | ||
|
|
f034ec45f3 | ||
|
|
970bc0ac5d | ||
|
|
3abbc8d57e | ||
|
|
06570d78a0 | ||
|
|
6a0df7aec2 | ||
|
|
30aeb95f3a | ||
|
|
36d6d53a26 | ||
|
|
895773b6c6 | ||
|
|
8ebc0dba4f | ||
|
|
006f12f17f | ||
|
|
b22239804a | ||
|
|
afd174ca21 | ||
|
|
27c6c1b896 | ||
|
|
311b420b74 | ||
|
|
e403298140 | ||
|
|
9c7fd94a9b | ||
|
|
8cdae83150 | ||
|
|
5b1cc7415e | ||
|
|
f5d3bc1f2d | ||
|
|
ba81d0e08a | ||
|
|
3b3f927e4b | ||
|
|
5e8539865f | ||
|
|
3a3fa2882c | ||
|
|
3baa37846e | ||
|
|
999fba2030 | ||
|
|
785a5857ef | ||
|
|
067f4ac882 | ||
|
|
8a26e08102 | ||
|
|
42aa202f7a | ||
|
|
403d2c8e7b | ||
|
|
ad46bde302 | ||
|
|
1b6ec2c545 | ||
|
|
56388557cb | ||
|
|
1ddbe87d0f | ||
|
|
3081810450 | ||
|
|
155be7fd95 | ||
|
|
ef960e94d3 | ||
|
|
bfd99a48fe | ||
|
|
a13fda5fe9 | ||
|
|
66bee59774 | ||
|
|
685400bbf8 | ||
|
|
5bef8fc411 | ||
|
|
aa7ff1449a | ||
|
|
3dca7f2a71 | ||
|
|
3dc2f649f6 | ||
|
|
9eb537c2f9 | ||
|
|
dfd5f6882f | ||
|
|
7214016338 | ||
|
|
606e3c4ce5 | ||
|
|
fb04d3d25d | ||
|
|
db8c042131 | ||
|
|
fd9005fba8 | ||
|
|
2d25413b8d | ||
|
|
035679800a | ||
|
|
abd0a3035a | ||
|
|
d307730c68 | ||
|
|
1b44de4958 | ||
|
|
ec78a1ce8b | ||
|
|
19c82ab30d | ||
|
|
9986f3fb18 | ||
|
|
d24e9c093d | ||
|
|
70c8b24fac | ||
|
|
9c9c11104b | ||
|
|
cba90b27f4 | ||
|
|
46cbced570 | ||
|
|
52cf2d1514 | ||
|
|
e51351be8d | ||
|
|
2a42e0ff94 | ||
|
|
3a824a2bfc | ||
|
|
fc1c809a18 | ||
|
|
221cd40199 | ||
|
|
aca19d9a81 | ||
|
|
0601bbe18d | ||
|
|
2d52aee952 | ||
|
|
99605bf185 | ||
|
|
91b19d9bc4 | ||
|
|
562401ebe4 | ||
|
|
6fd2f2610d | ||
|
|
6ae19b0640 | ||
|
|
6b936d8a8c | ||
|
|
8f2cfaae00 | ||
|
|
5c215e1a8a | ||
|
|
e3cb98124f | ||
|
|
90c3319880 | ||
|
|
348db876d2 | ||
|
|
408fd7ec03 | ||
|
|
1fd84836b1 | ||
|
|
522204795f | ||
|
|
e29c422ac9 | ||
|
|
152cf09b7e | ||
|
|
ff728099dc | ||
|
|
706d94221d | ||
|
|
340e9af7f4 | ||
|
|
40e536ba61 | ||
|
|
fd4c56c8c2 | ||
|
|
20d04ba956 | ||
|
|
3b1bcc67ae | ||
|
|
1add3fbf9d | ||
|
|
97f0759de0 | ||
|
|
005ab47d9b | ||
|
|
14a0caa4c6 | ||
|
|
1c23bd5ff7 | ||
|
|
49c161b17a | ||
|
|
18dce3fce6 | ||
|
|
d6fc86b6bc | ||
|
|
61d960d4b1 | ||
|
|
02d3465832 | ||
|
|
4bbadc9515 | ||
|
|
78586291ca | ||
|
|
945dec94bf | ||
|
|
003140d96b | ||
|
|
363d7cf0d0 | ||
|
|
f0c94496bf | ||
|
|
de217eabd9 | ||
|
|
7c80d0c1e1 | ||
|
|
9fb749b1db | ||
|
|
ad9c59669a | ||
|
|
76a038e403 | ||
|
|
0e12072922 | ||
|
|
158a8e14a2 | ||
|
|
0c97910349 | ||
|
|
8347ac6ed8 | ||
|
|
996abd6e7e | ||
|
|
de8abd5b63 | ||
|
|
3de928c488 | ||
|
|
a2a514e483 | ||
|
|
ff432e04b0 | ||
|
|
4502590bb0 | ||
|
|
6d440a5af5 | ||
|
|
0840b4c359 | ||
|
|
696ee7dbe5 | ||
|
|
5e23e356ce | ||
|
|
c705051a89 | ||
|
|
ce2b918a29 | ||
|
|
df740b1e8e | ||
|
|
c3e0308ad0 | ||
|
|
1005e295b2 | ||
|
|
b3cf58b8c0 | ||
|
|
2652c87917 | ||
|
|
9e0b5575a4 | ||
|
|
56c089dc01 | ||
|
|
3b94da1790 | ||
|
|
ec39a8e9fe | ||
|
|
6339f971ca |
59
.eslintrc.js
59
.eslintrc.js
@@ -15,9 +15,10 @@ module.exports = {
|
|||||||
|
|
||||||
overrides: [
|
overrides: [
|
||||||
{
|
{
|
||||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
|
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js', '**/scripts/**.{,c,m}js'],
|
||||||
rules: {
|
rules: {
|
||||||
'n/no-process-exit': 'off',
|
'n/no-process-exit': 'off',
|
||||||
|
'n/shebang': 'off',
|
||||||
'no-console': 'off',
|
'no-console': 'off',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -46,6 +47,57 @@ module.exports = {
|
|||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
files: ['@xen-orchestra/{web-core,lite,web}/**/*.{vue,ts}'],
|
||||||
|
parserOptions: {
|
||||||
|
sourceType: 'module',
|
||||||
|
},
|
||||||
|
plugins: ['import'],
|
||||||
|
extends: [
|
||||||
|
'plugin:import/recommended',
|
||||||
|
'plugin:import/typescript',
|
||||||
|
'plugin:vue/vue3-recommended',
|
||||||
|
'@vue/eslint-config-typescript/recommended',
|
||||||
|
'@vue/eslint-config-prettier',
|
||||||
|
],
|
||||||
|
settings: {
|
||||||
|
'import/resolver': {
|
||||||
|
typescript: true,
|
||||||
|
'eslint-import-resolver-custom-alias': {
|
||||||
|
alias: {
|
||||||
|
'@': './src',
|
||||||
|
},
|
||||||
|
extensions: ['.ts'],
|
||||||
|
packages: ['@xen-orchestra/lite'],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
rules: {
|
||||||
|
'no-void': 'off',
|
||||||
|
'n/no-missing-import': 'off', // using 'import' plugin instead to support TS aliases
|
||||||
|
'@typescript-eslint/no-explicit-any': 'off',
|
||||||
|
'vue/require-default-prop': 'off', // https://github.com/vuejs/eslint-plugin-vue/issues/2051
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
files: ['@xen-orchestra/{web-core,lite,web}/src/pages/**/*.vue'],
|
||||||
|
parserOptions: {
|
||||||
|
sourceType: 'module',
|
||||||
|
},
|
||||||
|
rules: {
|
||||||
|
'vue/multi-word-component-names': 'off',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
files: ['@xen-orchestra/{web-core,lite,web}/typed-router.d.ts'],
|
||||||
|
parserOptions: {
|
||||||
|
sourceType: 'module',
|
||||||
|
},
|
||||||
|
rules: {
|
||||||
|
'eslint-comments/disable-enable-pair': 'off',
|
||||||
|
'eslint-comments/no-unlimited-disable': 'off',
|
||||||
|
},
|
||||||
|
},
|
||||||
],
|
],
|
||||||
|
|
||||||
parserOptions: {
|
parserOptions: {
|
||||||
@@ -68,6 +120,11 @@ module.exports = {
|
|||||||
|
|
||||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||||
|
|
||||||
|
// this rule can prevent race condition bugs like parallel `a += await foo()`
|
||||||
|
//
|
||||||
|
// as it has a lots of false positive, it is only enabled as a warning for now
|
||||||
|
'require-atomic-updates': 'warn',
|
||||||
|
|
||||||
strict: 'error',
|
strict: 'error',
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
48
.github/ISSUE_TEMPLATE/bug_report.md
vendored
48
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,48 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
|
|
||||||
assignees: ''
|
|
||||||
---
|
|
||||||
|
|
||||||
1. ⚠️ **If you don't follow this template, the issue will be closed**.
|
|
||||||
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
|
|
||||||
|
|
||||||
Are you using XOA or XO from the sources?
|
|
||||||
|
|
||||||
If XOA:
|
|
||||||
|
|
||||||
- which release channel? (`stable` vs `latest`)
|
|
||||||
- please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
|
|
||||||
|
|
||||||
If XO from the sources:
|
|
||||||
|
|
||||||
- Provide **your commit number**. If it's older than a week, we won't investigate
|
|
||||||
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
|
||||||
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
|
||||||
|
|
||||||
**Describe the bug**
|
|
||||||
A clear and concise description of what the bug is.
|
|
||||||
|
|
||||||
**To Reproduce**
|
|
||||||
Steps to reproduce the behavior:
|
|
||||||
|
|
||||||
1. Go to '...'
|
|
||||||
2. Click on '....'
|
|
||||||
3. Scroll down to '....'
|
|
||||||
4. See error
|
|
||||||
|
|
||||||
**Expected behavior**
|
|
||||||
A clear and concise description of what you expected to happen.
|
|
||||||
|
|
||||||
**Screenshots**
|
|
||||||
If applicable, add screenshots to help explain your problem.
|
|
||||||
|
|
||||||
**Environment (please provide the following information):**
|
|
||||||
|
|
||||||
- Node: [e.g. 16.12.1]
|
|
||||||
- hypervisor: [e.g. XCP-ng 8.2.0]
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context about the problem here.
|
|
||||||
119
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
119
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
name: Bug Report
|
||||||
|
description: Create a report to help us improve
|
||||||
|
labels: ['type: bug :bug:', 'status: triaging :triangular_flag_on_post:']
|
||||||
|
body:
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
1. ⚠️ **If you don't follow this template, the issue will be closed**.
|
||||||
|
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: '## Are you using XOA or XO from the sources?'
|
||||||
|
- type: dropdown
|
||||||
|
id: xo-origin
|
||||||
|
attributes:
|
||||||
|
label: Are you using XOA or XO from the sources?
|
||||||
|
options:
|
||||||
|
- XOA
|
||||||
|
- XO from the sources
|
||||||
|
- both
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: '### If XOA:'
|
||||||
|
- type: dropdown
|
||||||
|
id: xoa-channel
|
||||||
|
attributes:
|
||||||
|
label: Which release channel?
|
||||||
|
description: please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
|
||||||
|
options:
|
||||||
|
- stable
|
||||||
|
- latest
|
||||||
|
- both
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: '### If XO from the sources:'
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: |
|
||||||
|
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
||||||
|
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
||||||
|
- type: input
|
||||||
|
id: xo-sources-commit-number
|
||||||
|
attributes:
|
||||||
|
label: Provide your commit number
|
||||||
|
description: If it's older than a week, we won't investigate
|
||||||
|
placeholder: e.g. 579f0
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: '## Bug description:'
|
||||||
|
- type: textarea
|
||||||
|
id: bug-description
|
||||||
|
attributes:
|
||||||
|
label: Describe the bug
|
||||||
|
description: A clear and concise description of what the bug is
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: error-message
|
||||||
|
attributes:
|
||||||
|
label: Error message
|
||||||
|
render: Text
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: steps
|
||||||
|
attributes:
|
||||||
|
label: To reproduce
|
||||||
|
description: 'Steps to reproduce the behavior:'
|
||||||
|
value: |
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '...'
|
||||||
|
3. Scroll down to '...'
|
||||||
|
4. See error
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: expected-behavior
|
||||||
|
attributes:
|
||||||
|
label: Expected behavior
|
||||||
|
description: A clear and concise description of what you expected to happen
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: textarea
|
||||||
|
id: screenshots
|
||||||
|
attributes:
|
||||||
|
label: Screenshots
|
||||||
|
description: If applicable, add screenshots to help explain your problem
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: markdown
|
||||||
|
attributes:
|
||||||
|
value: '## Environment (please provide the following information):'
|
||||||
|
- type: input
|
||||||
|
id: node-version
|
||||||
|
attributes:
|
||||||
|
label: Node
|
||||||
|
placeholder: e.g. 16.12.1
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: input
|
||||||
|
id: hypervisor-version
|
||||||
|
attributes:
|
||||||
|
label: Hypervisor
|
||||||
|
placeholder: e.g. XCP-ng 8.2.0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: additional-context
|
||||||
|
attributes:
|
||||||
|
label: Additional context
|
||||||
|
description: Add any other context about the problem here
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -24,8 +24,12 @@ jobs:
|
|||||||
cache: 'yarn'
|
cache: 'yarn'
|
||||||
- name: Install project dependencies
|
- name: Install project dependencies
|
||||||
run: yarn
|
run: yarn
|
||||||
|
- name: Ensure yarn.lock is up-to-date
|
||||||
|
run: git diff --exit-code yarn.lock
|
||||||
- name: Build the project
|
- name: Build the project
|
||||||
run: yarn build
|
run: yarn build
|
||||||
|
- name: Unit tests
|
||||||
|
run: yarn test-unit
|
||||||
- name: Lint tests
|
- name: Lint tests
|
||||||
run: yarn test-lint
|
run: yarn test-lint
|
||||||
- name: Integration tests
|
- name: Integration tests
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -30,8 +30,12 @@ pnpm-debug.log.*
|
|||||||
yarn-error.log
|
yarn-error.log
|
||||||
yarn-error.log.*
|
yarn-error.log.*
|
||||||
.env
|
.env
|
||||||
|
*.tsbuildinfo
|
||||||
|
|
||||||
# code coverage
|
# code coverage
|
||||||
.nyc_output/
|
.nyc_output/
|
||||||
coverage/
|
coverage/
|
||||||
.turbo/
|
.turbo/
|
||||||
|
|
||||||
|
# https://node-tap.org/dot-tap-folder/
|
||||||
|
.tap/
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
|
'use strict'
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
arrowParens: 'avoid',
|
arrowParens: 'avoid',
|
||||||
jsxSingleQuote: true,
|
jsxSingleQuote: true,
|
||||||
semi: false,
|
semi: false,
|
||||||
singleQuote: true,
|
singleQuote: true,
|
||||||
|
trailingComma: 'es5',
|
||||||
|
|
||||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||||
//
|
//
|
||||||
|
|||||||
@@ -33,8 +33,7 @@
|
|||||||
"test": "node--test"
|
"test": "node--test"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"sinon": "^15.0.1",
|
"sinon": "^17.0.1",
|
||||||
"tap": "^16.3.0",
|
|
||||||
"test": "^3.2.1"
|
"test": "^3.2.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -62,6 +62,42 @@ decorateClass(Foo, {
|
|||||||
})
|
})
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `decorateObject(object, map)`
|
||||||
|
|
||||||
|
Decorates an object the same way `decorateClass()` decorates a class:
|
||||||
|
|
||||||
|
```js
|
||||||
|
import { decorateObject } from '@vates/decorate-with'
|
||||||
|
|
||||||
|
const object = {
|
||||||
|
get bar() {
|
||||||
|
// body
|
||||||
|
},
|
||||||
|
|
||||||
|
set bar(value) {
|
||||||
|
// body
|
||||||
|
},
|
||||||
|
|
||||||
|
baz() {
|
||||||
|
// body
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
decorateObject(object, {
|
||||||
|
// getter and/or setter
|
||||||
|
bar: {
|
||||||
|
// without arguments
|
||||||
|
get: lodash.memoize,
|
||||||
|
|
||||||
|
// with arguments
|
||||||
|
set: [lodash.debounce, 150],
|
||||||
|
},
|
||||||
|
|
||||||
|
// method (with or without arguments)
|
||||||
|
baz: lodash.curry,
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
### `perInstance(fn, ...args)`
|
### `perInstance(fn, ...args)`
|
||||||
|
|
||||||
Helper to decorate the method by instance instead of for the whole class.
|
Helper to decorate the method by instance instead of for the whole class.
|
||||||
|
|||||||
@@ -80,6 +80,42 @@ decorateClass(Foo, {
|
|||||||
})
|
})
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `decorateObject(object, map)`
|
||||||
|
|
||||||
|
Decorates an object the same way `decorateClass()` decorates a class:
|
||||||
|
|
||||||
|
```js
|
||||||
|
import { decorateObject } from '@vates/decorate-with'
|
||||||
|
|
||||||
|
const object = {
|
||||||
|
get bar() {
|
||||||
|
// body
|
||||||
|
},
|
||||||
|
|
||||||
|
set bar(value) {
|
||||||
|
// body
|
||||||
|
},
|
||||||
|
|
||||||
|
baz() {
|
||||||
|
// body
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
decorateObject(object, {
|
||||||
|
// getter and/or setter
|
||||||
|
bar: {
|
||||||
|
// without arguments
|
||||||
|
get: lodash.memoize,
|
||||||
|
|
||||||
|
// with arguments
|
||||||
|
set: [lodash.debounce, 150],
|
||||||
|
},
|
||||||
|
|
||||||
|
// method (with or without arguments)
|
||||||
|
baz: lodash.curry,
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
### `perInstance(fn, ...args)`
|
### `perInstance(fn, ...args)`
|
||||||
|
|
||||||
Helper to decorate the method by instance instead of for the whole class.
|
Helper to decorate the method by instance instead of for the whole class.
|
||||||
|
|||||||
@@ -14,10 +14,13 @@ function applyDecorator(decorator, value) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
exports.decorateClass = exports.decorateMethodsWith = function decorateClass(klass, map) {
|
exports.decorateClass = exports.decorateMethodsWith = function decorateClass(klass, map) {
|
||||||
const { prototype } = klass
|
return decorateObject(klass.prototype, map)
|
||||||
|
}
|
||||||
|
|
||||||
|
function decorateObject(object, map) {
|
||||||
for (const name of Object.keys(map)) {
|
for (const name of Object.keys(map)) {
|
||||||
const decorator = map[name]
|
const decorator = map[name]
|
||||||
const descriptor = getOwnPropertyDescriptor(prototype, name)
|
const descriptor = getOwnPropertyDescriptor(object, name)
|
||||||
if (typeof decorator === 'function' || Array.isArray(decorator)) {
|
if (typeof decorator === 'function' || Array.isArray(decorator)) {
|
||||||
descriptor.value = applyDecorator(decorator, descriptor.value)
|
descriptor.value = applyDecorator(decorator, descriptor.value)
|
||||||
} else {
|
} else {
|
||||||
@@ -30,10 +33,11 @@ exports.decorateClass = exports.decorateMethodsWith = function decorateClass(kla
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
defineProperty(prototype, name, descriptor)
|
defineProperty(object, name, descriptor)
|
||||||
}
|
}
|
||||||
return klass
|
return object
|
||||||
}
|
}
|
||||||
|
exports.decorateObject = decorateObject
|
||||||
|
|
||||||
exports.perInstance = function perInstance(fn, decorator, ...args) {
|
exports.perInstance = function perInstance(fn, decorator, ...args) {
|
||||||
const map = new WeakMap()
|
const map = new WeakMap()
|
||||||
|
|||||||
@@ -13,12 +13,15 @@ describe('decorateWith', () => {
|
|||||||
const expectedFn = Function.prototype
|
const expectedFn = Function.prototype
|
||||||
const newFn = () => {}
|
const newFn = () => {}
|
||||||
|
|
||||||
const decorator = decorateWith(function wrapper(fn, ...args) {
|
const decorator = decorateWith(
|
||||||
assert.deepStrictEqual(fn, expectedFn)
|
function wrapper(fn, ...args) {
|
||||||
assert.deepStrictEqual(args, expectedArgs)
|
assert.deepStrictEqual(fn, expectedFn)
|
||||||
|
assert.deepStrictEqual(args, expectedArgs)
|
||||||
|
|
||||||
return newFn
|
return newFn
|
||||||
}, ...expectedArgs)
|
},
|
||||||
|
...expectedArgs
|
||||||
|
)
|
||||||
|
|
||||||
const descriptor = {
|
const descriptor = {
|
||||||
configurable: true,
|
configurable: true,
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
"url": "https://vates.fr"
|
"url": "https://vates.fr"
|
||||||
},
|
},
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"version": "2.0.0",
|
"version": "2.1.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8.10"
|
"node": ">=8.10"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -14,7 +14,7 @@
|
|||||||
"url": "https://vates.fr"
|
"url": "https://vates.fr"
|
||||||
},
|
},
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"version": "0.1.4",
|
"version": "0.1.5",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8.10"
|
"node": ">=8.10"
|
||||||
},
|
},
|
||||||
@@ -23,13 +23,13 @@
|
|||||||
"test": "node--test"
|
"test": "node--test"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@vates/multi-key-map": "^0.1.0",
|
"@vates/multi-key-map": "^0.2.0",
|
||||||
"@xen-orchestra/async-map": "^0.1.2",
|
"@xen-orchestra/async-map": "^0.1.2",
|
||||||
"@xen-orchestra/log": "^0.6.0",
|
"@xen-orchestra/log": "^0.6.0",
|
||||||
"ensure-array": "^1.0.0"
|
"ensure-array": "^1.0.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"sinon": "^15.0.1",
|
"sinon": "^17.0.1",
|
||||||
"test": "^3.2.1"
|
"test": "^3.2.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,9 @@ function assertListeners(t, event, listeners) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.beforeEach(function (t) {
|
t.beforeEach(function (t) {
|
||||||
|
// work around https://github.com/tapjs/tapjs/issues/998
|
||||||
|
t.context = {}
|
||||||
|
|
||||||
t.context.ee = new EventEmitter()
|
t.context.ee = new EventEmitter()
|
||||||
t.context.em = new EventListenersManager(t.context.ee)
|
t.context.em = new EventListenersManager(t.context.ee)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -38,9 +38,9 @@
|
|||||||
"version": "1.0.1",
|
"version": "1.0.1",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public",
|
"postversion": "npm publish --access public",
|
||||||
"test": "tap --branches=72"
|
"test": "tap --allow-incomplete-coverage"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"tap": "^16.2.0"
|
"tap": "^18.7.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
28
@vates/fuse-vhd/.USAGE.md
Normal file
28
@vates/fuse-vhd/.USAGE.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
Mount a vhd generated by xen-orchestra to filesystem
|
||||||
|
|
||||||
|
### Library
|
||||||
|
|
||||||
|
```js
|
||||||
|
import { mount } from 'fuse-vhd'
|
||||||
|
|
||||||
|
// return a disposable, see promise-toolbox/Disposable
|
||||||
|
// unmount automatically when disposable is disposed
|
||||||
|
// in case of differencing VHD, it mounts the full chain
|
||||||
|
await mount(handler, diskId, mountPoint)
|
||||||
|
```
|
||||||
|
|
||||||
|
### cli
|
||||||
|
|
||||||
|
From the install folder :
|
||||||
|
|
||||||
|
```
|
||||||
|
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||||
|
```
|
||||||
|
|
||||||
|
After installing the package
|
||||||
|
|
||||||
|
```
|
||||||
|
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||||
|
```
|
||||||
|
|
||||||
|
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`
|
||||||
59
@vates/fuse-vhd/README.md
Normal file
59
@vates/fuse-vhd/README.md
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||||
|
|
||||||
|
# @vates/fuse-vhd
|
||||||
|
|
||||||
|
[](https://npmjs.org/package/@vates/fuse-vhd)  [](https://bundlephobia.com/result?p=@vates/fuse-vhd) [](https://npmjs.org/package/@vates/fuse-vhd)
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
Installation of the [npm package](https://npmjs.org/package/@vates/fuse-vhd):
|
||||||
|
|
||||||
|
```sh
|
||||||
|
npm install --save @vates/fuse-vhd
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
Mount a vhd generated by xen-orchestra to filesystem
|
||||||
|
|
||||||
|
### Library
|
||||||
|
|
||||||
|
```js
|
||||||
|
import { mount } from 'fuse-vhd'
|
||||||
|
|
||||||
|
// return a disposable, see promise-toolbox/Disposable
|
||||||
|
// unmount automatically when disposable is disposed
|
||||||
|
// in case of differencing VHD, it mounts the full chain
|
||||||
|
await mount(handler, diskId, mountPoint)
|
||||||
|
```
|
||||||
|
|
||||||
|
### cli
|
||||||
|
|
||||||
|
From the install folder :
|
||||||
|
|
||||||
|
```
|
||||||
|
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||||
|
```
|
||||||
|
|
||||||
|
After installing the package
|
||||||
|
|
||||||
|
```
|
||||||
|
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||||
|
```
|
||||||
|
|
||||||
|
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`
|
||||||
|
|
||||||
|
## Contributions
|
||||||
|
|
||||||
|
Contributions are _very_ welcomed, either on the documentation or on
|
||||||
|
the code.
|
||||||
|
|
||||||
|
You may:
|
||||||
|
|
||||||
|
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||||
|
you've encountered;
|
||||||
|
- fork and create a pull request.
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||||
26
@vates/fuse-vhd/cli.mjs
Executable file
26
@vates/fuse-vhd/cli.mjs
Executable file
@@ -0,0 +1,26 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
|
||||||
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||||
|
|
||||||
|
import { mount } from './index.mjs'
|
||||||
|
|
||||||
|
async function* main([remoteUrl, vhdPathInRemote, mountPoint]) {
|
||||||
|
if (mountPoint === undefined) {
|
||||||
|
throw new TypeError('missing arg: cli <remoteUrl> <vhdPathInRemote> <mountPoint>')
|
||||||
|
}
|
||||||
|
const handler = yield getSyncedHandler({ url: remoteUrl })
|
||||||
|
const mounted = await mount(handler, vhdPathInRemote, mountPoint)
|
||||||
|
|
||||||
|
let disposePromise
|
||||||
|
process.on('SIGINT', async () => {
|
||||||
|
// ensure single dispose
|
||||||
|
if (!disposePromise) {
|
||||||
|
disposePromise = mounted.dispose()
|
||||||
|
}
|
||||||
|
await disposePromise
|
||||||
|
process.exit()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
Disposable.wrap(main)(process.argv.slice(2))
|
||||||
@@ -1,9 +1,7 @@
|
|||||||
'use strict'
|
import LRU from 'lru-cache'
|
||||||
|
import Fuse from 'fuse-native'
|
||||||
const LRU = require('lru-cache')
|
import { VhdSynthetic } from 'vhd-lib'
|
||||||
const Fuse = require('fuse-native')
|
import { Disposable, fromCallback } from 'promise-toolbox'
|
||||||
const { VhdSynthetic } = require('vhd-lib')
|
|
||||||
const { Disposable, fromCallback } = require('promise-toolbox')
|
|
||||||
|
|
||||||
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
||||||
const stat = st => ({
|
const stat = st => ({
|
||||||
@@ -16,7 +14,7 @@ const stat = st => ({
|
|||||||
gid: st.gid !== undefined ? st.gid : process.getgid(),
|
gid: st.gid !== undefined ? st.gid : process.getgid(),
|
||||||
})
|
})
|
||||||
|
|
||||||
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
export const mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||||
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
|
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
|
||||||
|
|
||||||
const cache = new LRU({
|
const cache = new LRU({
|
||||||
@@ -60,7 +58,7 @@ exports.mount = Disposable.factory(async function* mount(handler, diskPath, moun
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
return new Disposable(
|
return new Disposable(
|
||||||
() => fromCallback(() => fuse.unmount()),
|
() => fromCallback(cb => fuse.unmount(cb)),
|
||||||
fromCallback(() => fuse.mount())
|
fromCallback(cb => fuse.mount(cb))
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@vates/fuse-vhd",
|
"name": "@vates/fuse-vhd",
|
||||||
"version": "1.0.0",
|
"version": "2.1.0",
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"private": false,
|
"private": false,
|
||||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||||
@@ -15,13 +15,18 @@
|
|||||||
"url": "https://vates.fr"
|
"url": "https://vates.fr"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=10.0"
|
"node": ">=14"
|
||||||
},
|
},
|
||||||
|
"main": "./index.mjs",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@xen-orchestra/fs": "^4.1.4",
|
||||||
"fuse-native": "^2.2.6",
|
"fuse-native": "^2.2.6",
|
||||||
"lru-cache": "^7.14.0",
|
"lru-cache": "^7.14.0",
|
||||||
"promise-toolbox": "^0.21.0",
|
"promise-toolbox": "^0.21.0",
|
||||||
"vhd-lib": "^4.5.0"
|
"vhd-lib": "^4.9.0"
|
||||||
|
},
|
||||||
|
"bin": {
|
||||||
|
"xo-fuse-vhd": "./cli.mjs"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public"
|
"postversion": "npm publish --access public"
|
||||||
|
|||||||
@@ -17,4 +17,14 @@ map.get(['foo', 'bar']) // 2
|
|||||||
map.get(['bar', 'foo']) // 3
|
map.get(['bar', 'foo']) // 3
|
||||||
map.get([OBJ]) // 4
|
map.get([OBJ]) // 4
|
||||||
map.get([{}]) // undefined
|
map.get([{}]) // undefined
|
||||||
|
|
||||||
|
map.delete([])
|
||||||
|
|
||||||
|
for (const [key, value] of map.entries() {
|
||||||
|
console.log(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const value of map.values()) {
|
||||||
|
console.log(value)
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -35,6 +35,16 @@ map.get(['foo', 'bar']) // 2
|
|||||||
map.get(['bar', 'foo']) // 3
|
map.get(['bar', 'foo']) // 3
|
||||||
map.get([OBJ]) // 4
|
map.get([OBJ]) // 4
|
||||||
map.get([{}]) // undefined
|
map.get([{}]) // undefined
|
||||||
|
|
||||||
|
map.delete([])
|
||||||
|
|
||||||
|
for (const [key, value] of map.entries() {
|
||||||
|
console.log(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const value of map.values()) {
|
||||||
|
console.log(value)
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Contributions
|
## Contributions
|
||||||
|
|||||||
@@ -36,14 +36,31 @@ function del(node, i, keys) {
|
|||||||
return node
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function* entries(node, key) {
|
||||||
|
if (node !== undefined) {
|
||||||
|
if (node instanceof Node) {
|
||||||
|
const { value } = node
|
||||||
|
if (value !== undefined) {
|
||||||
|
yield [key, node.value]
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const [childKey, child] of node.children.entries()) {
|
||||||
|
yield* entries(child, key.concat(childKey))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
yield [key, node]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function get(node, i, keys) {
|
function get(node, i, keys) {
|
||||||
return i === keys.length
|
return i === keys.length
|
||||||
? node instanceof Node
|
? node instanceof Node
|
||||||
? node.value
|
? node.value
|
||||||
: node
|
: node
|
||||||
: node instanceof Node
|
: node instanceof Node
|
||||||
? get(node.children.get(keys[i]), i + 1, keys)
|
? get(node.children.get(keys[i]), i + 1, keys)
|
||||||
: undefined
|
: undefined
|
||||||
}
|
}
|
||||||
|
|
||||||
function set(node, i, keys, value) {
|
function set(node, i, keys, value) {
|
||||||
@@ -69,6 +86,22 @@ function set(node, i, keys, value) {
|
|||||||
return node
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function* values(node) {
|
||||||
|
if (node !== undefined) {
|
||||||
|
if (node instanceof Node) {
|
||||||
|
const { value } = node
|
||||||
|
if (value !== undefined) {
|
||||||
|
yield node.value
|
||||||
|
}
|
||||||
|
for (const child of node.children.values()) {
|
||||||
|
yield* values(child)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
yield node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
exports.MultiKeyMap = class MultiKeyMap {
|
exports.MultiKeyMap = class MultiKeyMap {
|
||||||
constructor() {
|
constructor() {
|
||||||
// each node is either a value or a Node if it contains children
|
// each node is either a value or a Node if it contains children
|
||||||
@@ -79,6 +112,10 @@ exports.MultiKeyMap = class MultiKeyMap {
|
|||||||
this._root = del(this._root, 0, keys)
|
this._root = del(this._root, 0, keys)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entries() {
|
||||||
|
return entries(this._root, [])
|
||||||
|
}
|
||||||
|
|
||||||
get(keys) {
|
get(keys) {
|
||||||
return get(this._root, 0, keys)
|
return get(this._root, 0, keys)
|
||||||
}
|
}
|
||||||
@@ -86,4 +123,8 @@ exports.MultiKeyMap = class MultiKeyMap {
|
|||||||
set(keys, value) {
|
set(keys, value) {
|
||||||
this._root = set(this._root, 0, keys, value)
|
this._root = set(this._root, 0, keys, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
values() {
|
||||||
|
return values(this._root)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ describe('MultiKeyMap', () => {
|
|||||||
// reverse composite key
|
// reverse composite key
|
||||||
['bar', 'foo'],
|
['bar', 'foo'],
|
||||||
]
|
]
|
||||||
const values = keys.map(() => ({}))
|
const values = keys.map(() => Math.random())
|
||||||
|
|
||||||
// set all values first to make sure they are all stored and not only the
|
// set all values first to make sure they are all stored and not only the
|
||||||
// last one
|
// last one
|
||||||
@@ -27,6 +27,12 @@ describe('MultiKeyMap', () => {
|
|||||||
map.set(key, values[i])
|
map.set(key, values[i])
|
||||||
})
|
})
|
||||||
|
|
||||||
|
assert.deepEqual(
|
||||||
|
Array.from(map.entries()),
|
||||||
|
keys.map((key, i) => [key, values[i]])
|
||||||
|
)
|
||||||
|
assert.deepEqual(Array.from(map.values()), values)
|
||||||
|
|
||||||
keys.forEach((key, i) => {
|
keys.forEach((key, i) => {
|
||||||
// copy the key to make sure the array itself is not the key
|
// copy the key to make sure the array itself is not the key
|
||||||
assert.strictEqual(map.get(key.slice()), values[i])
|
assert.strictEqual(map.get(key.slice()), values[i])
|
||||||
|
|||||||
@@ -18,7 +18,7 @@
|
|||||||
"url": "https://vates.fr"
|
"url": "https://vates.fr"
|
||||||
},
|
},
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"version": "0.1.0",
|
"version": "0.2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8.10"
|
"node": ">=8.10"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -1,42 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
|
||||||
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
|
||||||
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
|
||||||
exports.NBD_OPT_EXPORT_NAME = 1
|
|
||||||
exports.NBD_OPT_ABORT = 2
|
|
||||||
exports.NBD_OPT_LIST = 3
|
|
||||||
exports.NBD_OPT_STARTTLS = 5
|
|
||||||
exports.NBD_OPT_INFO = 6
|
|
||||||
exports.NBD_OPT_GO = 7
|
|
||||||
|
|
||||||
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
|
|
||||||
exports.NBD_FLAG_READ_ONLY = 1 << 1
|
|
||||||
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
|
|
||||||
exports.NBD_FLAG_SEND_FUA = 1 << 3
|
|
||||||
exports.NBD_FLAG_ROTATIONAL = 1 << 4
|
|
||||||
exports.NBD_FLAG_SEND_TRIM = 1 << 5
|
|
||||||
|
|
||||||
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
|
||||||
|
|
||||||
exports.NBD_CMD_FLAG_FUA = 1 << 0
|
|
||||||
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
|
||||||
exports.NBD_CMD_FLAG_DF = 1 << 2
|
|
||||||
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
|
||||||
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
|
||||||
|
|
||||||
exports.NBD_CMD_READ = 0
|
|
||||||
exports.NBD_CMD_WRITE = 1
|
|
||||||
exports.NBD_CMD_DISC = 2
|
|
||||||
exports.NBD_CMD_FLUSH = 3
|
|
||||||
exports.NBD_CMD_TRIM = 4
|
|
||||||
exports.NBD_CMD_CACHE = 5
|
|
||||||
exports.NBD_CMD_WRITE_ZEROES = 6
|
|
||||||
exports.NBD_CMD_BLOCK_STATUS = 7
|
|
||||||
exports.NBD_CMD_RESIZE = 8
|
|
||||||
|
|
||||||
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
|
||||||
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
|
||||||
exports.NBD_REPLY_ACK = 1
|
|
||||||
|
|
||||||
exports.NBD_DEFAULT_PORT = 10809
|
|
||||||
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
|
||||||
41
@vates/nbd-client/constants.mjs
Normal file
41
@vates/nbd-client/constants.mjs
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
export const INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||||
|
export const OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||||
|
export const NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||||
|
export const NBD_OPT_EXPORT_NAME = 1
|
||||||
|
export const NBD_OPT_ABORT = 2
|
||||||
|
export const NBD_OPT_LIST = 3
|
||||||
|
export const NBD_OPT_STARTTLS = 5
|
||||||
|
export const NBD_OPT_INFO = 6
|
||||||
|
export const NBD_OPT_GO = 7
|
||||||
|
|
||||||
|
export const NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||||
|
export const NBD_FLAG_READ_ONLY = 1 << 1
|
||||||
|
export const NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||||
|
export const NBD_FLAG_SEND_FUA = 1 << 3
|
||||||
|
export const NBD_FLAG_ROTATIONAL = 1 << 4
|
||||||
|
export const NBD_FLAG_SEND_TRIM = 1 << 5
|
||||||
|
|
||||||
|
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||||
|
|
||||||
|
export const NBD_CMD_FLAG_FUA = 1 << 0
|
||||||
|
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||||
|
export const NBD_CMD_FLAG_DF = 1 << 2
|
||||||
|
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||||
|
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||||
|
|
||||||
|
export const NBD_CMD_READ = 0
|
||||||
|
export const NBD_CMD_WRITE = 1
|
||||||
|
export const NBD_CMD_DISC = 2
|
||||||
|
export const NBD_CMD_FLUSH = 3
|
||||||
|
export const NBD_CMD_TRIM = 4
|
||||||
|
export const NBD_CMD_CACHE = 5
|
||||||
|
export const NBD_CMD_WRITE_ZEROES = 6
|
||||||
|
export const NBD_CMD_BLOCK_STATUS = 7
|
||||||
|
export const NBD_CMD_RESIZE = 8
|
||||||
|
|
||||||
|
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||||
|
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||||
|
export const NBD_REPLY_ACK = 1
|
||||||
|
|
||||||
|
export const NBD_DEFAULT_PORT = 10809
|
||||||
|
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||||
@@ -1,8 +1,10 @@
|
|||||||
'use strict'
|
import assert from 'node:assert'
|
||||||
const assert = require('node:assert')
|
import { Socket } from 'node:net'
|
||||||
const { Socket } = require('node:net')
|
import { connect } from 'node:tls'
|
||||||
const { connect } = require('node:tls')
|
import { fromCallback, pRetry, pDelay, pTimeout, pFromCallback } from 'promise-toolbox'
|
||||||
const {
|
import { readChunkStrict } from '@vates/read-chunk'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
import {
|
||||||
INIT_PASSWD,
|
INIT_PASSWD,
|
||||||
NBD_CMD_READ,
|
NBD_CMD_READ,
|
||||||
NBD_DEFAULT_BLOCK_SIZE,
|
NBD_DEFAULT_BLOCK_SIZE,
|
||||||
@@ -17,16 +19,12 @@ const {
|
|||||||
NBD_REQUEST_MAGIC,
|
NBD_REQUEST_MAGIC,
|
||||||
OPTS_MAGIC,
|
OPTS_MAGIC,
|
||||||
NBD_CMD_DISC,
|
NBD_CMD_DISC,
|
||||||
} = require('./constants.js')
|
} from './constants.mjs'
|
||||||
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
|
|
||||||
const { readChunkStrict } = require('@vates/read-chunk')
|
|
||||||
const { createLogger } = require('@xen-orchestra/log')
|
|
||||||
|
|
||||||
const { warn } = createLogger('vates:nbd-client')
|
const { warn } = createLogger('vates:nbd-client')
|
||||||
|
|
||||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||||
|
|
||||||
module.exports = class NbdClient {
|
export default class NbdClient {
|
||||||
#serverAddress
|
#serverAddress
|
||||||
#serverCert
|
#serverCert
|
||||||
#serverPort
|
#serverPort
|
||||||
@@ -40,6 +38,7 @@ module.exports = class NbdClient {
|
|||||||
#readBlockRetries
|
#readBlockRetries
|
||||||
#reconnectRetry
|
#reconnectRetry
|
||||||
#connectTimeout
|
#connectTimeout
|
||||||
|
#messageTimeout
|
||||||
|
|
||||||
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
||||||
// so we handle a backlog of command waiting for response and handle concurrency manually
|
// so we handle a backlog of command waiting for response and handle concurrency manually
|
||||||
@@ -52,7 +51,14 @@ module.exports = class NbdClient {
|
|||||||
#reconnectingPromise
|
#reconnectingPromise
|
||||||
constructor(
|
constructor(
|
||||||
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
||||||
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
|
{
|
||||||
|
connectTimeout = 6e4,
|
||||||
|
messageTimeout = 6e4,
|
||||||
|
waitBeforeReconnect = 1e3,
|
||||||
|
readAhead = 10,
|
||||||
|
readBlockRetries = 5,
|
||||||
|
reconnectRetry = 5,
|
||||||
|
} = {}
|
||||||
) {
|
) {
|
||||||
this.#serverAddress = address
|
this.#serverAddress = address
|
||||||
this.#serverPort = port
|
this.#serverPort = port
|
||||||
@@ -63,6 +69,7 @@ module.exports = class NbdClient {
|
|||||||
this.#readBlockRetries = readBlockRetries
|
this.#readBlockRetries = readBlockRetries
|
||||||
this.#reconnectRetry = reconnectRetry
|
this.#reconnectRetry = reconnectRetry
|
||||||
this.#connectTimeout = connectTimeout
|
this.#connectTimeout = connectTimeout
|
||||||
|
this.#messageTimeout = messageTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
get exportSize() {
|
get exportSize() {
|
||||||
@@ -115,13 +122,27 @@ module.exports = class NbdClient {
|
|||||||
if (!this.#connected) {
|
if (!this.#connected) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
this.#connected = false
|
||||||
|
const socket = this.#serverSocket
|
||||||
|
|
||||||
|
const queryId = this.#nextCommandQueryId
|
||||||
|
this.#nextCommandQueryId++
|
||||||
|
|
||||||
const buffer = Buffer.alloc(28)
|
const buffer = Buffer.alloc(28)
|
||||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||||
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
||||||
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
||||||
await this.#write(buffer)
|
buffer.writeBigUInt64BE(queryId, 8)
|
||||||
await this.#serverSocket.destroy()
|
buffer.writeBigUInt64BE(0n, 16)
|
||||||
|
buffer.writeInt32BE(0, 24)
|
||||||
|
const promise = pFromCallback(cb => {
|
||||||
|
socket.end(buffer, 'utf8', cb)
|
||||||
|
})
|
||||||
|
try {
|
||||||
|
await pTimeout.call(promise, this.#messageTimeout)
|
||||||
|
} catch (error) {
|
||||||
|
socket.destroy()
|
||||||
|
}
|
||||||
this.#serverSocket = undefined
|
this.#serverSocket = undefined
|
||||||
this.#connected = false
|
this.#connected = false
|
||||||
}
|
}
|
||||||
@@ -195,11 +216,13 @@ module.exports = class NbdClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#read(length) {
|
#read(length) {
|
||||||
return readChunkStrict(this.#serverSocket, length)
|
const promise = readChunkStrict(this.#serverSocket, length)
|
||||||
|
return pTimeout.call(promise, this.#messageTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
#write(buffer) {
|
#write(buffer) {
|
||||||
return fromCallback.call(this.#serverSocket, 'write', buffer)
|
const promise = fromCallback.call(this.#serverSocket, 'write', buffer)
|
||||||
|
return pTimeout.call(promise, this.#messageTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
async #readInt32() {
|
async #readInt32() {
|
||||||
@@ -232,19 +255,20 @@ module.exports = class NbdClient {
|
|||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
this.#waitingForResponse = true
|
this.#waitingForResponse = true
|
||||||
const magic = await this.#readInt32()
|
const buffer = await this.#read(16)
|
||||||
|
const magic = buffer.readInt32BE(0)
|
||||||
|
|
||||||
if (magic !== NBD_REPLY_MAGIC) {
|
if (magic !== NBD_REPLY_MAGIC) {
|
||||||
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
|
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
const error = await this.#readInt32()
|
const error = buffer.readInt32BE(4)
|
||||||
if (error !== 0) {
|
if (error !== 0) {
|
||||||
// @todo use error code from constants.mjs
|
// @todo use error code from constants.mjs
|
||||||
throw new Error(`GOT ERROR CODE : ${error}`)
|
throw new Error(`GOT ERROR CODE : ${error}`)
|
||||||
}
|
}
|
||||||
|
|
||||||
const blockQueryId = await this.#readInt64()
|
const blockQueryId = buffer.readBigUInt64BE(8)
|
||||||
const query = this.#commandQueryBacklog.get(blockQueryId)
|
const query = this.#commandQueryBacklog.get(blockQueryId)
|
||||||
if (!query) {
|
if (!query) {
|
||||||
throw new Error(` no query associated with id ${blockQueryId}`)
|
throw new Error(` no query associated with id ${blockQueryId}`)
|
||||||
@@ -265,7 +289,7 @@ module.exports = class NbdClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
async #readBlock(index, size) {
|
||||||
// we don't want to add anything in backlog while reconnecting
|
// we don't want to add anything in backlog while reconnecting
|
||||||
if (this.#reconnectingPromise) {
|
if (this.#reconnectingPromise) {
|
||||||
await this.#reconnectingPromise
|
await this.#reconnectingPromise
|
||||||
@@ -281,7 +305,13 @@ module.exports = class NbdClient {
|
|||||||
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
|
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
|
||||||
buffer.writeBigUInt64BE(queryId, 8)
|
buffer.writeBigUInt64BE(queryId, 8)
|
||||||
// byte offset in the raw disk
|
// byte offset in the raw disk
|
||||||
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
|
const offset = BigInt(index) * BigInt(size)
|
||||||
|
const remaining = this.#exportSize - offset
|
||||||
|
if (remaining < BigInt(size)) {
|
||||||
|
size = Number(remaining)
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer.writeBigUInt64BE(offset, 16)
|
||||||
buffer.writeInt32BE(size, 24)
|
buffer.writeInt32BE(size, 24)
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
@@ -307,45 +337,13 @@ module.exports = class NbdClient {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async *readBlocks(indexGenerator) {
|
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||||
// default : read all blocks
|
return pRetry(() => this.#readBlock(index, size), {
|
||||||
if (indexGenerator === undefined) {
|
tries: this.#readBlockRetries,
|
||||||
const exportSize = this.#exportSize
|
onRetry: async err => {
|
||||||
const chunkSize = 2 * 1024 * 1024
|
warn('will retry reading block ', index, err)
|
||||||
indexGenerator = function* () {
|
await this.reconnect()
|
||||||
const nbBlocks = Math.ceil(Number(exportSize / BigInt(chunkSize)))
|
},
|
||||||
for (let index = 0; BigInt(index) < nbBlocks; index++) {
|
})
|
||||||
yield { index, size: chunkSize }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const readAhead = []
|
|
||||||
const readAheadMaxLength = this.#readAhead
|
|
||||||
const makeReadBlockPromise = (index, size) => {
|
|
||||||
const promise = pRetry(() => this.readBlock(index, size), {
|
|
||||||
tries: this.#readBlockRetries,
|
|
||||||
onRetry: async err => {
|
|
||||||
warn('will retry reading block ', index, err)
|
|
||||||
await this.reconnect()
|
|
||||||
},
|
|
||||||
})
|
|
||||||
// error is handled during unshift
|
|
||||||
promise.catch(() => {})
|
|
||||||
return promise
|
|
||||||
}
|
|
||||||
|
|
||||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
|
||||||
for (const { index, size } of indexGenerator()) {
|
|
||||||
// stack readAheadMaxLength promises before starting to handle the results
|
|
||||||
if (readAhead.length === readAheadMaxLength) {
|
|
||||||
// any error will stop reading blocks
|
|
||||||
yield readAhead.shift()
|
|
||||||
}
|
|
||||||
|
|
||||||
readAhead.push(makeReadBlockPromise(index, size))
|
|
||||||
}
|
|
||||||
while (readAhead.length > 0) {
|
|
||||||
yield readAhead.shift()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
85
@vates/nbd-client/multi.mjs
Normal file
85
@vates/nbd-client/multi.mjs
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
import { asyncEach } from '@vates/async-each'
|
||||||
|
import { NBD_DEFAULT_BLOCK_SIZE } from './constants.mjs'
|
||||||
|
import NbdClient from './index.mjs'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
|
||||||
|
const { warn } = createLogger('vates:nbd-client:multi')
|
||||||
|
export default class MultiNbdClient {
|
||||||
|
#clients = []
|
||||||
|
#readAhead
|
||||||
|
|
||||||
|
get exportSize() {
|
||||||
|
return this.#clients[0].exportSize
|
||||||
|
}
|
||||||
|
|
||||||
|
constructor(settings, { nbdConcurrency = 8, readAhead = 16, ...options } = {}) {
|
||||||
|
this.#readAhead = readAhead
|
||||||
|
if (!Array.isArray(settings)) {
|
||||||
|
settings = [settings]
|
||||||
|
}
|
||||||
|
for (let i = 0; i < nbdConcurrency; i++) {
|
||||||
|
this.#clients.push(
|
||||||
|
new NbdClient(settings[i % settings.length], { ...options, readAhead: Math.ceil(readAhead / nbdConcurrency) })
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async connect() {
|
||||||
|
const connectedClients = []
|
||||||
|
for (const clientId in this.#clients) {
|
||||||
|
const client = this.#clients[clientId]
|
||||||
|
try {
|
||||||
|
await client.connect()
|
||||||
|
connectedClients.push(client)
|
||||||
|
} catch (err) {
|
||||||
|
client.disconnect().catch(() => {})
|
||||||
|
warn(`can't connect to one nbd client`, { err })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (connectedClients.length === 0) {
|
||||||
|
throw new Error(`Fail to connect to any Nbd client`)
|
||||||
|
}
|
||||||
|
if (connectedClients.length < this.#clients.length) {
|
||||||
|
warn(
|
||||||
|
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${this.#clients.length} expected clients`
|
||||||
|
)
|
||||||
|
this.#clients = connectedClients
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async disconnect() {
|
||||||
|
await asyncEach(this.#clients, client => client.disconnect(), {
|
||||||
|
stopOnError: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||||
|
const clientId = index % this.#clients.length
|
||||||
|
return this.#clients[clientId].readBlock(index, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
async *readBlocks(indexGenerator) {
|
||||||
|
// default : read all blocks
|
||||||
|
const readAhead = []
|
||||||
|
const makeReadBlockPromise = (index, size) => {
|
||||||
|
const promise = this.readBlock(index, size)
|
||||||
|
// error is handled during unshift
|
||||||
|
promise.catch(() => {})
|
||||||
|
return promise
|
||||||
|
}
|
||||||
|
|
||||||
|
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||||
|
for (const { index, size } of indexGenerator()) {
|
||||||
|
// stack readAheadMaxLength promises before starting to handle the results
|
||||||
|
if (readAhead.length === this.#readAhead) {
|
||||||
|
// any error will stop reading blocks
|
||||||
|
yield readAhead.shift()
|
||||||
|
}
|
||||||
|
|
||||||
|
readAhead.push(makeReadBlockPromise(index, size))
|
||||||
|
}
|
||||||
|
while (readAhead.length > 0) {
|
||||||
|
yield readAhead.shift()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -13,24 +13,25 @@
|
|||||||
"url": "https://vates.fr"
|
"url": "https://vates.fr"
|
||||||
},
|
},
|
||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"version": "1.2.1",
|
"version": "3.0.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=14.0"
|
"node": ">=14.0"
|
||||||
},
|
},
|
||||||
|
"main": "./index.mjs",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@vates/async-each": "^1.0.0",
|
"@vates/async-each": "^1.0.0",
|
||||||
"@vates/read-chunk": "^1.1.1",
|
"@vates/read-chunk": "^1.2.0",
|
||||||
"@xen-orchestra/async-map": "^0.1.2",
|
"@xen-orchestra/async-map": "^0.1.2",
|
||||||
"@xen-orchestra/log": "^0.6.0",
|
"@xen-orchestra/log": "^0.6.0",
|
||||||
"promise-toolbox": "^0.21.0",
|
"promise-toolbox": "^0.21.0",
|
||||||
"xen-api": "^1.3.3"
|
"xen-api": "^2.0.1"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"tap": "^16.3.0",
|
"tap": "^18.7.0",
|
||||||
"tmp": "^0.2.1"
|
"tmp": "^0.2.1"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public",
|
"postversion": "npm publish --access public",
|
||||||
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.js"
|
"test-integration": "tap --allow-incomplete-coverage"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
'use strict'
|
import { spawn, exec } from 'node:child_process'
|
||||||
const NbdClient = require('../index.js')
|
import fs from 'node:fs/promises'
|
||||||
const { spawn, exec } = require('node:child_process')
|
import { test } from 'tap'
|
||||||
const fs = require('node:fs/promises')
|
import tmp from 'tmp'
|
||||||
const { test } = require('tap')
|
import { pFromCallback } from 'promise-toolbox'
|
||||||
const tmp = require('tmp')
|
import { Socket } from 'node:net'
|
||||||
const { pFromCallback } = require('promise-toolbox')
|
import { NBD_DEFAULT_PORT } from '../constants.mjs'
|
||||||
const { Socket } = require('node:net')
|
import assert from 'node:assert'
|
||||||
const { NBD_DEFAULT_PORT } = require('../constants.js')
|
import MultiNbdClient from '../multi.mjs'
|
||||||
const assert = require('node:assert')
|
|
||||||
|
|
||||||
const FILE_SIZE = 10 * 1024 * 1024
|
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||||
|
const FILE_SIZE = 1024 * 1024 * 9.5 // non aligned file size
|
||||||
|
|
||||||
async function createTempFile(size) {
|
async function createTempFile(size) {
|
||||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||||
@@ -82,7 +82,7 @@ test('it works with unsecured network', async tap => {
|
|||||||
const path = await createTempFile(FILE_SIZE)
|
const path = await createTempFile(FILE_SIZE)
|
||||||
|
|
||||||
let nbdServer = await spawnNbdKit(path)
|
let nbdServer = await spawnNbdKit(path)
|
||||||
const client = new NbdClient(
|
const client = new MultiNbdClient(
|
||||||
{
|
{
|
||||||
address: '127.0.0.1',
|
address: '127.0.0.1',
|
||||||
exportname: 'MY_SECRET_EXPORT',
|
exportname: 'MY_SECRET_EXPORT',
|
||||||
@@ -110,13 +110,13 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
|||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
nbdConcurrency: 1,
|
||||||
readAhead: 2,
|
readAhead: 2,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
await client.connect()
|
await client.connect()
|
||||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
|
||||||
const indexes = []
|
const indexes = []
|
||||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||||
indexes.push(i)
|
indexes.push(i)
|
||||||
@@ -128,9 +128,9 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
|||||||
})
|
})
|
||||||
let i = 0
|
let i = 0
|
||||||
for await (const block of nbdIterator) {
|
for await (const block of nbdIterator) {
|
||||||
let blockOk = true
|
let blockOk = block.length === Math.min(CHUNK_SIZE, FILE_SIZE - CHUNK_SIZE * i)
|
||||||
let firstFail
|
let firstFail
|
||||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
for (let j = 0; j < block.length; j += 4) {
|
||||||
const wanted = i * CHUNK_SIZE + j
|
const wanted = i * CHUNK_SIZE + j
|
||||||
const found = block.readUInt32BE(j)
|
const found = block.readUInt32BE(j)
|
||||||
blockOk = blockOk && found === wanted
|
blockOk = blockOk && found === wanted
|
||||||
@@ -138,7 +138,7 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
|||||||
firstFail = j
|
firstFail = j
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tap.ok(blockOk, `check block ${i} content`)
|
tap.ok(blockOk, `check block ${i} content ${block.length}`)
|
||||||
i++
|
i++
|
||||||
|
|
||||||
// flaky server is flaky
|
// flaky server is flaky
|
||||||
@@ -148,17 +148,6 @@ CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
|||||||
nbdServer = await spawnNbdKit(path)
|
nbdServer = await spawnNbdKit(path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// we can reuse the conneciton to read other blocks
|
|
||||||
// default iterator
|
|
||||||
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
|
|
||||||
let nb = 0
|
|
||||||
for await (const block of nbdIteratorWithDefaultBlockIterator) {
|
|
||||||
nb++
|
|
||||||
tap.equal(block.length, 2 * 1024 * 1024)
|
|
||||||
}
|
|
||||||
|
|
||||||
tap.equal(nb, 5)
|
|
||||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||||
|
|
||||||
await client.disconnect()
|
await client.disconnect()
|
||||||
@@ -1,4 +1,3 @@
|
|||||||
'use strict'
|
|
||||||
/*
|
/*
|
||||||
|
|
||||||
node-vsphere-soap
|
node-vsphere-soap
|
||||||
@@ -12,17 +11,18 @@
|
|||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const EventEmitter = require('events').EventEmitter
|
import { EventEmitter } from 'events'
|
||||||
const axios = require('axios')
|
import axios from 'axios'
|
||||||
const https = require('node:https')
|
import https from 'node:https'
|
||||||
const util = require('util')
|
import util from 'util'
|
||||||
const soap = require('soap')
|
import soap from 'soap'
|
||||||
const Cookie = require('soap-cookie') // required for session persistence
|
import Cookie from 'soap-cookie' // required for session persistence
|
||||||
|
|
||||||
// Client class
|
// Client class
|
||||||
// inherits from EventEmitter
|
// inherits from EventEmitter
|
||||||
// possible events: connect, error, ready
|
// possible events: connect, error, ready
|
||||||
|
|
||||||
function Client(vCenterHostname, username, password, sslVerify) {
|
export function Client(vCenterHostname, username, password, sslVerify) {
|
||||||
this.status = 'disconnected'
|
this.status = 'disconnected'
|
||||||
this.reconnectCount = 0
|
this.reconnectCount = 0
|
||||||
|
|
||||||
@@ -228,4 +228,3 @@ function _soapErrorHandler(self, emitter, command, args, err) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// end
|
// end
|
||||||
exports.Client = Client
|
|
||||||
@@ -1,8 +1,8 @@
|
|||||||
{
|
{
|
||||||
"name": "@vates/node-vsphere-soap",
|
"name": "@vates/node-vsphere-soap",
|
||||||
"version": "1.0.0",
|
"version": "2.0.0",
|
||||||
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
|
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
|
||||||
"main": "lib/client.js",
|
"main": "lib/client.mjs",
|
||||||
"author": "reedog117",
|
"author": "reedog117",
|
||||||
"repository": {
|
"repository": {
|
||||||
"directory": "@vates/node-vsphere-soap",
|
"directory": "@vates/node-vsphere-soap",
|
||||||
@@ -30,7 +30,7 @@
|
|||||||
"private": false,
|
"private": false,
|
||||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
|
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8.10"
|
"node": ">=14"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public"
|
"postversion": "npm publish --access public"
|
||||||
|
|||||||
@@ -1,15 +1,11 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
// place your own credentials here for a vCenter or ESXi server
|
// place your own credentials here for a vCenter or ESXi server
|
||||||
// this information will be used for connecting to a vCenter instance
|
// this information will be used for connecting to a vCenter instance
|
||||||
// for module testing
|
// for module testing
|
||||||
// name the file config-test.js
|
// name the file config-test.js
|
||||||
|
|
||||||
const vCenterTestCreds = {
|
export const vCenterTestCreds = {
|
||||||
vCenterIP: 'vcsa',
|
vCenterIP: 'vcsa',
|
||||||
vCenterUser: 'vcuser',
|
vCenterUser: 'vcuser',
|
||||||
vCenterPassword: 'vcpw',
|
vCenterPassword: 'vcpw',
|
||||||
vCenter: true,
|
vCenter: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.vCenterTestCreds = vCenterTestCreds
|
|
||||||
@@ -1,18 +1,16 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
vsphere-soap.test.js
|
vsphere-soap.test.js
|
||||||
|
|
||||||
tests for the vCenterConnectionInstance class
|
tests for the vCenterConnectionInstance class
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const assert = require('assert')
|
import assert from 'assert'
|
||||||
const { describe, it } = require('test')
|
import { describe, it } from 'test'
|
||||||
|
|
||||||
const vc = require('../lib/client')
|
import * as vc from '../lib/client.mjs'
|
||||||
|
|
||||||
// eslint-disable-next-line n/no-missing-require
|
// eslint-disable-next-line n/no-missing-import
|
||||||
const TestCreds = require('../config-test.js').vCenterTestCreds
|
import { vCenterTestCreds as TestCreds } from '../config-test.mjs'
|
||||||
|
|
||||||
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)
|
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)
|
||||||
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
import { strict as assert } from 'node:assert'
|
import { strict as assert } from 'node:assert'
|
||||||
import { describe, it } from 'tap/mocha'
|
import test from 'test'
|
||||||
|
|
||||||
import {
|
import {
|
||||||
generateHotp,
|
generateHotp,
|
||||||
@@ -11,6 +11,8 @@ import {
|
|||||||
verifyTotp,
|
verifyTotp,
|
||||||
} from './index.mjs'
|
} from './index.mjs'
|
||||||
|
|
||||||
|
const { describe, it } = test
|
||||||
|
|
||||||
describe('generateSecret', function () {
|
describe('generateSecret', function () {
|
||||||
it('generates a string of 32 chars', async function () {
|
it('generates a string of 32 chars', async function () {
|
||||||
const secret = generateSecret()
|
const secret = generateSecret()
|
||||||
@@ -31,9 +31,9 @@
|
|||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public",
|
"postversion": "npm publish --access public",
|
||||||
"test": "tap"
|
"test": "node--test"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"tap": "^16.3.0"
|
"test": "^3.3.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
'use strict'
|
'use strict'
|
||||||
|
|
||||||
const assert = require('assert/strict')
|
const assert = require('assert/strict')
|
||||||
const { describe, it } = require('tap').mocha
|
const { describe, it } = require('test')
|
||||||
|
|
||||||
const { every, not, some } = require('./')
|
const { every, not, some } = require('./')
|
||||||
|
|
||||||
@@ -32,9 +32,9 @@
|
|||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public",
|
"postversion": "npm publish --access public",
|
||||||
"test": "tap"
|
"test": "node--test"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"tap": "^16.0.1"
|
"test": "^3.3.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
'use strict'
|
'use strict'
|
||||||
|
|
||||||
const assert = require('assert')
|
const assert = require('assert')
|
||||||
|
const isUtf8 = require('isutf8')
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read a chunk of data from a stream.
|
* Read a chunk of data from a stream.
|
||||||
@@ -21,41 +22,41 @@ const readChunk = (stream, size) =>
|
|||||||
stream.errored != null
|
stream.errored != null
|
||||||
? Promise.reject(stream.errored)
|
? Promise.reject(stream.errored)
|
||||||
: stream.closed || stream.readableEnded
|
: stream.closed || stream.readableEnded
|
||||||
? Promise.resolve(null)
|
? Promise.resolve(null)
|
||||||
: new Promise((resolve, reject) => {
|
: new Promise((resolve, reject) => {
|
||||||
if (size !== undefined) {
|
if (size !== undefined) {
|
||||||
assert(size > 0)
|
assert(size > 0)
|
||||||
|
|
||||||
// per Node documentation:
|
// per Node documentation:
|
||||||
// > The size argument must be less than or equal to 1 GiB.
|
// > The size argument must be less than or equal to 1 GiB.
|
||||||
assert(size < 1073741824)
|
assert(size < 1073741824)
|
||||||
}
|
}
|
||||||
|
|
||||||
function onEnd() {
|
function onEnd() {
|
||||||
resolve(null)
|
resolve(null)
|
||||||
removeListeners()
|
|
||||||
}
|
|
||||||
function onError(error) {
|
|
||||||
reject(error)
|
|
||||||
removeListeners()
|
|
||||||
}
|
|
||||||
function onReadable() {
|
|
||||||
const data = stream.read(size)
|
|
||||||
if (data !== null) {
|
|
||||||
resolve(data)
|
|
||||||
removeListeners()
|
removeListeners()
|
||||||
}
|
}
|
||||||
}
|
function onError(error) {
|
||||||
function removeListeners() {
|
reject(error)
|
||||||
stream.removeListener('end', onEnd)
|
removeListeners()
|
||||||
stream.removeListener('error', onError)
|
}
|
||||||
stream.removeListener('readable', onReadable)
|
function onReadable() {
|
||||||
}
|
const data = stream.read(size)
|
||||||
stream.on('end', onEnd)
|
if (data !== null) {
|
||||||
stream.on('error', onError)
|
resolve(data)
|
||||||
stream.on('readable', onReadable)
|
removeListeners()
|
||||||
onReadable()
|
}
|
||||||
})
|
}
|
||||||
|
function removeListeners() {
|
||||||
|
stream.removeListener('end', onEnd)
|
||||||
|
stream.removeListener('error', onError)
|
||||||
|
stream.removeListener('readable', onReadable)
|
||||||
|
}
|
||||||
|
stream.on('end', onEnd)
|
||||||
|
stream.on('error', onError)
|
||||||
|
stream.on('readable', onReadable)
|
||||||
|
onReadable()
|
||||||
|
})
|
||||||
exports.readChunk = readChunk
|
exports.readChunk = readChunk
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -81,6 +82,13 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
|||||||
|
|
||||||
if (size !== undefined && chunk.length !== size) {
|
if (size !== undefined && chunk.length !== size) {
|
||||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||||
|
|
||||||
|
// Buffer.isUtf8 is too recent for now
|
||||||
|
// @todo : replace external package by Buffer.isUtf8 when the supported version of node reach 18
|
||||||
|
|
||||||
|
if (chunk.length < 1024 && isUtf8(chunk)) {
|
||||||
|
error.text = chunk.toString('utf8')
|
||||||
|
}
|
||||||
Object.defineProperties(error, {
|
Object.defineProperties(error, {
|
||||||
chunk: {
|
chunk: {
|
||||||
value: chunk,
|
value: chunk,
|
||||||
@@ -103,42 +111,42 @@ async function skip(stream, size) {
|
|||||||
return stream.errored != null
|
return stream.errored != null
|
||||||
? Promise.reject(stream.errored)
|
? Promise.reject(stream.errored)
|
||||||
: size === 0 || stream.closed || stream.readableEnded
|
: size === 0 || stream.closed || stream.readableEnded
|
||||||
? Promise.resolve(0)
|
? Promise.resolve(0)
|
||||||
: new Promise((resolve, reject) => {
|
: new Promise((resolve, reject) => {
|
||||||
let left = size
|
let left = size
|
||||||
function onEnd() {
|
function onEnd() {
|
||||||
resolve(size - left)
|
resolve(size - left)
|
||||||
removeListeners()
|
|
||||||
}
|
|
||||||
function onError(error) {
|
|
||||||
reject(error)
|
|
||||||
removeListeners()
|
|
||||||
}
|
|
||||||
function onReadable() {
|
|
||||||
const data = stream.read()
|
|
||||||
left -= data === null ? 0 : data.length
|
|
||||||
if (left > 0) {
|
|
||||||
// continue to read
|
|
||||||
} else {
|
|
||||||
// if more than wanted has been read, push back the rest
|
|
||||||
if (left < 0) {
|
|
||||||
stream.unshift(data.slice(left))
|
|
||||||
}
|
|
||||||
|
|
||||||
resolve(size)
|
|
||||||
removeListeners()
|
removeListeners()
|
||||||
}
|
}
|
||||||
}
|
function onError(error) {
|
||||||
function removeListeners() {
|
reject(error)
|
||||||
stream.removeListener('end', onEnd)
|
removeListeners()
|
||||||
stream.removeListener('error', onError)
|
}
|
||||||
stream.removeListener('readable', onReadable)
|
function onReadable() {
|
||||||
}
|
const data = stream.read()
|
||||||
stream.on('end', onEnd)
|
left -= data === null ? 0 : data.length
|
||||||
stream.on('error', onError)
|
if (left > 0) {
|
||||||
stream.on('readable', onReadable)
|
// continue to read
|
||||||
onReadable()
|
} else {
|
||||||
})
|
// if more than wanted has been read, push back the rest
|
||||||
|
if (left < 0) {
|
||||||
|
stream.unshift(data.slice(left))
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve(size)
|
||||||
|
removeListeners()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
function removeListeners() {
|
||||||
|
stream.removeListener('end', onEnd)
|
||||||
|
stream.removeListener('error', onError)
|
||||||
|
stream.removeListener('readable', onReadable)
|
||||||
|
}
|
||||||
|
stream.on('end', onEnd)
|
||||||
|
stream.on('error', onError)
|
||||||
|
stream.on('readable', onReadable)
|
||||||
|
onReadable()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
exports.skip = skip
|
exports.skip = skip
|
||||||
|
|
||||||
|
|||||||
@@ -102,12 +102,37 @@ describe('readChunkStrict', function () {
|
|||||||
assert.strictEqual(error.chunk, undefined)
|
assert.strictEqual(error.chunk, undefined)
|
||||||
})
|
})
|
||||||
|
|
||||||
it('throws if stream ends with not enough data', async () => {
|
it('throws if stream ends with not enough data, utf8', async () => {
|
||||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||||
assert(error instanceof Error)
|
assert(error instanceof Error)
|
||||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||||
|
assert.strictEqual(error.text, 'foobar')
|
||||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('throws if stream ends with not enough data, non utf8 ', async () => {
|
||||||
|
const source = [Buffer.alloc(10, 128), Buffer.alloc(10, 128)]
|
||||||
|
const error = await rejectionOf(readChunkStrict(makeStream(source), 30))
|
||||||
|
assert(error instanceof Error)
|
||||||
|
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 20, expected: 30)')
|
||||||
|
assert.strictEqual(error.text, undefined)
|
||||||
|
assert.deepEqual(error.chunk, Buffer.concat(source))
|
||||||
|
})
|
||||||
|
|
||||||
|
it('throws if stream ends with not enough data, utf8 , long data', async () => {
|
||||||
|
const source = Buffer.from('a'.repeat(1500))
|
||||||
|
const error = await rejectionOf(readChunkStrict(makeStream([source]), 2000))
|
||||||
|
assert(error instanceof Error)
|
||||||
|
assert.strictEqual(error.message, `stream has ended with not enough data (actual: 1500, expected: 2000)`)
|
||||||
|
assert.strictEqual(error.text, undefined)
|
||||||
|
assert.deepEqual(error.chunk, source)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('succeed', async () => {
|
||||||
|
const source = Buffer.from('a'.repeat(20))
|
||||||
|
const chunk = await readChunkStrict(makeStream([source]), 10)
|
||||||
|
assert.deepEqual(source.subarray(10), chunk)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('skip', function () {
|
describe('skip', function () {
|
||||||
@@ -134,6 +159,16 @@ describe('skip', function () {
|
|||||||
it('returns less size if stream ends', async () => {
|
it('returns less size if stream ends', async () => {
|
||||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
it('put back if it read too much', async () => {
|
||||||
|
let source = makeStream(['foo', 'bar'])
|
||||||
|
await skip(source, 1) // read part of data chunk
|
||||||
|
const chunk = (await readChunkStrict(source, 2)).toString('utf-8')
|
||||||
|
assert.strictEqual(chunk, 'oo')
|
||||||
|
|
||||||
|
source = makeStream(['foo', 'bar'])
|
||||||
|
assert.strictEqual(await skip(source, 3), 3) // read aligned with data chunk
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('skipStrict', function () {
|
describe('skipStrict', function () {
|
||||||
@@ -144,4 +179,9 @@ describe('skipStrict', function () {
|
|||||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||||
assert.deepEqual(error.bytesSkipped, 7)
|
assert.deepEqual(error.bytesSkipped, 7)
|
||||||
})
|
})
|
||||||
|
it('succeed', async () => {
|
||||||
|
const source = makeStream(['foo', 'bar', 'baz'])
|
||||||
|
const res = await skipStrict(source, 4)
|
||||||
|
assert.strictEqual(res, undefined)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -19,7 +19,7 @@
|
|||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||||
},
|
},
|
||||||
"version": "1.1.1",
|
"version": "1.2.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8.10"
|
"node": ">=8.10"
|
||||||
},
|
},
|
||||||
@@ -33,5 +33,8 @@
|
|||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"test": "^3.2.1"
|
"test": "^3.2.1"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"isutf8": "^4.0.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -27,7 +27,7 @@
|
|||||||
"license": "ISC",
|
"license": "ISC",
|
||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=10"
|
"node": ">=12.3"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public",
|
"postversion": "npm publish --access public",
|
||||||
|
|||||||
@@ -123,7 +123,7 @@ const onProgress = makeOnProgress({
|
|||||||
onTaskUpdate(taskLog) {},
|
onTaskUpdate(taskLog) {},
|
||||||
})
|
})
|
||||||
|
|
||||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
|
||||||
```
|
```
|
||||||
|
|
||||||
It can also be fed event logs directly:
|
It can also be fed event logs directly:
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ const onProgress = makeOnProgress({
|
|||||||
// current status of the task as described in the previous section
|
// current status of the task as described in the previous section
|
||||||
taskLog.status
|
taskLog.status
|
||||||
|
|
||||||
// undefined or a dictionnary of properties attached to the task
|
// undefined or a dictionary of properties attached to the task
|
||||||
taskLog.properties
|
taskLog.properties
|
||||||
|
|
||||||
// timestamp at which the abortion was requested, undefined otherwise
|
// timestamp at which the abortion was requested, undefined otherwise
|
||||||
@@ -139,7 +139,7 @@ const onProgress = makeOnProgress({
|
|||||||
onTaskUpdate(taskLog) {},
|
onTaskUpdate(taskLog) {},
|
||||||
})
|
})
|
||||||
|
|
||||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
|
||||||
```
|
```
|
||||||
|
|
||||||
It can also be fed event logs directly:
|
It can also be fed event logs directly:
|
||||||
|
|||||||
@@ -35,7 +35,7 @@
|
|||||||
"test": "node--test"
|
"test": "node--test"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"sinon": "^15.0.1",
|
"sinon": "^17.0.1",
|
||||||
"test": "^3.2.1"
|
"test": "^3.2.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
'use strict'
|
'use strict'
|
||||||
|
|
||||||
const assert = require('assert/strict')
|
const assert = require('assert/strict')
|
||||||
const { afterEach, describe, it } = require('tap').mocha
|
const { afterEach, describe, it } = require('test')
|
||||||
|
|
||||||
const { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } = require('.')
|
const { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } = require('.')
|
||||||
|
|
||||||
@@ -13,10 +13,10 @@
|
|||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public",
|
"postversion": "npm publish --access public",
|
||||||
"test": "tap --lines 67 --functions 92 --branches 52 --statements 67"
|
"test": "node--test"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@vates/decorate-with": "^2.0.0",
|
"@vates/decorate-with": "^2.1.0",
|
||||||
"@xen-orchestra/log": "^0.6.0",
|
"@xen-orchestra/log": "^0.6.0",
|
||||||
"golike-defer": "^0.5.1",
|
"golike-defer": "^0.5.1",
|
||||||
"object-hash": "^2.0.1"
|
"object-hash": "^2.0.1"
|
||||||
@@ -28,6 +28,6 @@
|
|||||||
"url": "https://vates.fr"
|
"url": "https://vates.fr"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"tap": "^16.0.1"
|
"test": "^3.3.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
import { asyncMap } from '@xen-orchestra/async-map'
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
|
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.mjs'
|
||||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||||
import getopts from 'getopts'
|
import getopts from 'getopts'
|
||||||
import { basename, dirname } from 'path'
|
import { basename, dirname } from 'path'
|
||||||
|
|||||||
@@ -7,9 +7,9 @@
|
|||||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@xen-orchestra/async-map": "^0.1.2",
|
"@xen-orchestra/async-map": "^0.1.2",
|
||||||
"@xen-orchestra/backups": "^0.39.0",
|
"@xen-orchestra/backups": "^0.44.6",
|
||||||
"@xen-orchestra/fs": "^4.0.1",
|
"@xen-orchestra/fs": "^4.1.4",
|
||||||
"filenamify": "^4.1.0",
|
"filenamify": "^6.0.0",
|
||||||
"getopts": "^2.2.5",
|
"getopts": "^2.2.5",
|
||||||
"lodash": "^4.17.15",
|
"lodash": "^4.17.15",
|
||||||
"promise-toolbox": "^0.21.0"
|
"promise-toolbox": "^0.21.0"
|
||||||
@@ -27,7 +27,7 @@
|
|||||||
"scripts": {
|
"scripts": {
|
||||||
"postversion": "npm publish --access public"
|
"postversion": "npm publish --access public"
|
||||||
},
|
},
|
||||||
"version": "1.0.9",
|
"version": "1.0.14",
|
||||||
"license": "AGPL-3.0-or-later",
|
"license": "AGPL-3.0-or-later",
|
||||||
"author": {
|
"author": {
|
||||||
"name": "Vates SAS",
|
"name": "Vates SAS",
|
||||||
|
|||||||
@@ -1,10 +1,8 @@
|
|||||||
'use strict'
|
import { Metadata } from './_runners/Metadata.mjs'
|
||||||
|
import { VmsRemote } from './_runners/VmsRemote.mjs'
|
||||||
|
import { VmsXapi } from './_runners/VmsXapi.mjs'
|
||||||
|
|
||||||
const { Metadata } = require('./_runners/Metadata.js')
|
export function createRunner(opts) {
|
||||||
const { VmsRemote } = require('./_runners/VmsRemote.js')
|
|
||||||
const { VmsXapi } = require('./_runners/VmsXapi.js')
|
|
||||||
|
|
||||||
exports.createRunner = function createRunner(opts) {
|
|
||||||
const { type } = opts.job
|
const { type } = opts.job
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case 'backup':
|
case 'backup':
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
'use strict'
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
export class DurablePartition {
|
||||||
|
|
||||||
exports.DurablePartition = class DurablePartition {
|
|
||||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||||
#partitionDisposers = {}
|
#partitionDisposers = {}
|
||||||
|
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
'use strict'
|
import { Task } from './Task.mjs'
|
||||||
|
|
||||||
const { Task } = require('./Task')
|
export class HealthCheckVmBackup {
|
||||||
|
|
||||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
|
||||||
#restoredVm
|
#restoredVm
|
||||||
#timeout
|
#timeout
|
||||||
#xapi
|
#xapi
|
||||||
@@ -1,73 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
const assert = require('assert')
|
|
||||||
|
|
||||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
|
||||||
const { importIncrementalVm } = require('./_incrementalVm.js')
|
|
||||||
const { Task } = require('./Task.js')
|
|
||||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
|
||||||
|
|
||||||
exports.ImportVmBackup = class ImportVmBackup {
|
|
||||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
|
||||||
this._adapter = adapter
|
|
||||||
this._importIncrementalVmSettings = { newMacAddresses, mapVdisSrs }
|
|
||||||
this._metadata = metadata
|
|
||||||
this._srUuid = srUuid
|
|
||||||
this._xapi = xapi
|
|
||||||
}
|
|
||||||
|
|
||||||
async run() {
|
|
||||||
const adapter = this._adapter
|
|
||||||
const metadata = this._metadata
|
|
||||||
const isFull = metadata.mode === 'full'
|
|
||||||
|
|
||||||
const sizeContainer = { size: 0 }
|
|
||||||
|
|
||||||
let backup
|
|
||||||
if (isFull) {
|
|
||||||
backup = await adapter.readFullVmBackup(metadata)
|
|
||||||
watchStreamSize(backup, sizeContainer)
|
|
||||||
} else {
|
|
||||||
assert.strictEqual(metadata.mode, 'delta')
|
|
||||||
|
|
||||||
const ignoredVdis = new Set(
|
|
||||||
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
|
|
||||||
.filter(([_, srUuid]) => srUuid === null)
|
|
||||||
.map(([vdiUuid]) => vdiUuid)
|
|
||||||
)
|
|
||||||
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
|
|
||||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
|
||||||
}
|
|
||||||
|
|
||||||
return Task.run(
|
|
||||||
{
|
|
||||||
name: 'transfer',
|
|
||||||
},
|
|
||||||
async () => {
|
|
||||||
const xapi = this._xapi
|
|
||||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
|
||||||
|
|
||||||
const vmRef = isFull
|
|
||||||
? await xapi.VM_import(backup, srRef)
|
|
||||||
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
|
||||||
...this._importIncrementalVmSettings,
|
|
||||||
detectBase: false,
|
|
||||||
})
|
|
||||||
|
|
||||||
await Promise.all([
|
|
||||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
|
||||||
xapi.call(
|
|
||||||
'VM.set_name_label',
|
|
||||||
vmRef,
|
|
||||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
|
||||||
),
|
|
||||||
])
|
|
||||||
|
|
||||||
return {
|
|
||||||
size: sizeContainer.size,
|
|
||||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
280
@xen-orchestra/backups/ImportVmBackup.mjs
Normal file
280
@xen-orchestra/backups/ImportVmBackup.mjs
Normal file
@@ -0,0 +1,280 @@
|
|||||||
|
import assert from 'node:assert'
|
||||||
|
|
||||||
|
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||||
|
import { importIncrementalVm } from './_incrementalVm.mjs'
|
||||||
|
import { Task } from './Task.mjs'
|
||||||
|
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||||
|
import { VhdNegative, VhdSynthetic } from 'vhd-lib'
|
||||||
|
import { decorateClass } from '@vates/decorate-with'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
import { dirname, join } from 'node:path'
|
||||||
|
import pickBy from 'lodash/pickBy.js'
|
||||||
|
import { defer } from 'golike-defer'
|
||||||
|
|
||||||
|
const { debug, info, warn } = createLogger('xo:backups:importVmBackup')
|
||||||
|
async function resolveUuid(xapi, cache, uuid, type) {
|
||||||
|
if (uuid == null) {
|
||||||
|
return uuid
|
||||||
|
}
|
||||||
|
const ref = cache.get(uuid)
|
||||||
|
if (ref === undefined) {
|
||||||
|
cache.set(uuid, xapi.call(`${type}.get_by_uuid`, uuid))
|
||||||
|
}
|
||||||
|
return cache.get(uuid)
|
||||||
|
}
|
||||||
|
export class ImportVmBackup {
|
||||||
|
constructor({
|
||||||
|
adapter,
|
||||||
|
metadata,
|
||||||
|
srUuid,
|
||||||
|
xapi,
|
||||||
|
settings: { additionnalVmTag, newMacAddresses, mapVdisSrs = {}, useDifferentialRestore = false } = {},
|
||||||
|
}) {
|
||||||
|
this._adapter = adapter
|
||||||
|
this._importIncrementalVmSettings = { additionnalVmTag, newMacAddresses, mapVdisSrs, useDifferentialRestore }
|
||||||
|
this._metadata = metadata
|
||||||
|
this._srUuid = srUuid
|
||||||
|
this._xapi = xapi
|
||||||
|
}
|
||||||
|
|
||||||
|
async #getPathOfVdiSnapshot(snapshotUuid) {
|
||||||
|
const metadata = this._metadata
|
||||||
|
if (this._pathToVdis === undefined) {
|
||||||
|
const backups = await this._adapter.listVmBackups(
|
||||||
|
this._metadata.vm.uuid,
|
||||||
|
({ mode, timestamp }) => mode === 'delta' && timestamp >= metadata.timestamp
|
||||||
|
)
|
||||||
|
const map = new Map()
|
||||||
|
for (const backup of backups) {
|
||||||
|
for (const [vdiRef, vdi] of Object.entries(backup.vdis)) {
|
||||||
|
map.set(vdi.uuid, backup.vhds[vdiRef])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this._pathToVdis = map
|
||||||
|
}
|
||||||
|
return this._pathToVdis.get(snapshotUuid)
|
||||||
|
}
|
||||||
|
|
||||||
|
async _reuseNearestSnapshot($defer, ignoredVdis) {
|
||||||
|
const metadata = this._metadata
|
||||||
|
const { mapVdisSrs } = this._importIncrementalVmSettings
|
||||||
|
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||||
|
const streams = {}
|
||||||
|
const metdataDir = dirname(metadata._filename)
|
||||||
|
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||||
|
|
||||||
|
for (const [vdiRef, vdi] of Object.entries(vdis)) {
|
||||||
|
const vhdPath = join(metdataDir, vhds[vdiRef])
|
||||||
|
|
||||||
|
let xapiDisk
|
||||||
|
try {
|
||||||
|
xapiDisk = await this._xapi.getRecordByUuid('VDI', vdi.$snapshot_of$uuid)
|
||||||
|
} catch (err) {
|
||||||
|
// if this disk is not present anymore, fall back to default restore
|
||||||
|
warn(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
let snapshotCandidate, backupCandidate
|
||||||
|
if (xapiDisk !== undefined) {
|
||||||
|
debug('found disks, wlll search its snapshots', { snapshots: xapiDisk.snapshots })
|
||||||
|
for (const snapshotRef of xapiDisk.snapshots) {
|
||||||
|
const snapshot = await this._xapi.getRecord('VDI', snapshotRef)
|
||||||
|
debug('handling snapshot', { snapshot })
|
||||||
|
|
||||||
|
// take only the first snapshot
|
||||||
|
if (snapshotCandidate && snapshotCandidate.snapshot_time < snapshot.snapshot_time) {
|
||||||
|
debug('already got a better candidate')
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// have a corresponding backup more recent than metadata ?
|
||||||
|
const pathToSnapshotData = await this.#getPathOfVdiSnapshot(snapshot.uuid)
|
||||||
|
if (pathToSnapshotData === undefined) {
|
||||||
|
debug('no backup linked to this snaphot')
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (snapshot.$SR.uuid !== (mapVdisSrs[vdi.$snapshot_of$uuid] ?? this._srUuid)) {
|
||||||
|
debug('not restored on the same SR', { snapshotSr: snapshot.$SR.uuid, mapVdisSrs, srUuid: this._srUuid })
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
debug('got a candidate', pathToSnapshotData)
|
||||||
|
|
||||||
|
snapshotCandidate = snapshot
|
||||||
|
backupCandidate = pathToSnapshotData
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let stream
|
||||||
|
const backupWithSnapshotPath = join(metdataDir, backupCandidate ?? '')
|
||||||
|
if (vhdPath === backupWithSnapshotPath) {
|
||||||
|
// all the data are already on the host
|
||||||
|
debug('direct reuse of a snapshot')
|
||||||
|
stream = null
|
||||||
|
vdis[vdiRef].baseVdi = snapshotCandidate
|
||||||
|
// go next disk , we won't use this stream
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
let disposableDescendants
|
||||||
|
|
||||||
|
const disposableSynthetic = await VhdSynthetic.fromVhdChain(this._adapter._handler, vhdPath)
|
||||||
|
|
||||||
|
// this will also clean if another disk of this VM backup fails
|
||||||
|
// if user really only need to restore non failing disks he can retry with ignoredVdis
|
||||||
|
let disposed = false
|
||||||
|
const disposeOnce = async () => {
|
||||||
|
if (!disposed) {
|
||||||
|
disposed = true
|
||||||
|
try {
|
||||||
|
await disposableDescendants?.dispose()
|
||||||
|
await disposableSynthetic?.dispose()
|
||||||
|
} catch (error) {
|
||||||
|
warn('openVhd: failed to dispose VHDs', { error })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$defer.onFailure(() => disposeOnce())
|
||||||
|
|
||||||
|
const parentVhd = disposableSynthetic.value
|
||||||
|
await parentVhd.readBlockAllocationTable()
|
||||||
|
debug('got vhd synthetic of parents', parentVhd.length)
|
||||||
|
|
||||||
|
if (snapshotCandidate !== undefined) {
|
||||||
|
try {
|
||||||
|
debug('will try to use differential restore', {
|
||||||
|
backupWithSnapshotPath,
|
||||||
|
vhdPath,
|
||||||
|
vdiRef,
|
||||||
|
})
|
||||||
|
|
||||||
|
disposableDescendants = await VhdSynthetic.fromVhdChain(this._adapter._handler, backupWithSnapshotPath, {
|
||||||
|
until: vhdPath,
|
||||||
|
})
|
||||||
|
const descendantsVhd = disposableDescendants.value
|
||||||
|
await descendantsVhd.readBlockAllocationTable()
|
||||||
|
debug('got vhd synthetic of descendants')
|
||||||
|
const negativeVhd = new VhdNegative(parentVhd, descendantsVhd)
|
||||||
|
debug('got vhd negative')
|
||||||
|
|
||||||
|
// update the stream with the negative vhd stream
|
||||||
|
stream = await negativeVhd.stream()
|
||||||
|
vdis[vdiRef].baseVdi = snapshotCandidate
|
||||||
|
} catch (err) {
|
||||||
|
// can be a broken VHD chain, a vhd chain with a key backup, ....
|
||||||
|
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
|
||||||
|
warn(`can't use differential restore`, err)
|
||||||
|
disposableDescendants?.dispose()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// didn't make a negative stream : fallback to classic stream
|
||||||
|
if (stream === undefined) {
|
||||||
|
debug('use legacy restore')
|
||||||
|
stream = await parentVhd.stream()
|
||||||
|
}
|
||||||
|
|
||||||
|
stream.on('end', disposeOnce)
|
||||||
|
stream.on('close', disposeOnce)
|
||||||
|
stream.on('error', disposeOnce)
|
||||||
|
info('everything is ready, will transfer', stream.length)
|
||||||
|
streams[`${vdiRef}.vhd`] = stream
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
streams,
|
||||||
|
vbds,
|
||||||
|
vdis,
|
||||||
|
version: '1.0.0',
|
||||||
|
vifs,
|
||||||
|
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async #decorateIncrementalVmMetadata() {
|
||||||
|
const { additionnalVmTag, mapVdisSrs, useDifferentialRestore } = this._importIncrementalVmSettings
|
||||||
|
|
||||||
|
const ignoredVdis = new Set(
|
||||||
|
Object.entries(mapVdisSrs)
|
||||||
|
.filter(([_, srUuid]) => srUuid === null)
|
||||||
|
.map(([vdiUuid]) => vdiUuid)
|
||||||
|
)
|
||||||
|
let backup
|
||||||
|
if (useDifferentialRestore) {
|
||||||
|
backup = await this._reuseNearestSnapshot(ignoredVdis)
|
||||||
|
} else {
|
||||||
|
backup = await this._adapter.readIncrementalVmBackup(this._metadata, ignoredVdis)
|
||||||
|
}
|
||||||
|
const xapi = this._xapi
|
||||||
|
|
||||||
|
const cache = new Map()
|
||||||
|
const mapVdisSrRefs = {}
|
||||||
|
if (additionnalVmTag !== undefined) {
|
||||||
|
backup.vm.tags.push(additionnalVmTag)
|
||||||
|
}
|
||||||
|
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
|
||||||
|
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
|
||||||
|
}
|
||||||
|
const srRef = await resolveUuid(xapi, cache, this._srUuid, 'SR')
|
||||||
|
Object.values(backup.vdis).forEach(vdi => {
|
||||||
|
vdi.SR = mapVdisSrRefs[vdi.uuid] ?? srRef
|
||||||
|
})
|
||||||
|
return backup
|
||||||
|
}
|
||||||
|
|
||||||
|
async run() {
|
||||||
|
const adapter = this._adapter
|
||||||
|
const metadata = this._metadata
|
||||||
|
const isFull = metadata.mode === 'full'
|
||||||
|
|
||||||
|
const sizeContainer = { size: 0 }
|
||||||
|
const { newMacAddresses } = this._importIncrementalVmSettings
|
||||||
|
let backup
|
||||||
|
if (isFull) {
|
||||||
|
backup = await adapter.readFullVmBackup(metadata)
|
||||||
|
watchStreamSize(backup, sizeContainer)
|
||||||
|
} else {
|
||||||
|
assert.strictEqual(metadata.mode, 'delta')
|
||||||
|
|
||||||
|
backup = await this.#decorateIncrementalVmMetadata()
|
||||||
|
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||||
|
}
|
||||||
|
|
||||||
|
return Task.run(
|
||||||
|
{
|
||||||
|
name: 'transfer',
|
||||||
|
},
|
||||||
|
async () => {
|
||||||
|
const xapi = this._xapi
|
||||||
|
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||||
|
|
||||||
|
const vmRef = isFull
|
||||||
|
? await xapi.VM_import(backup, srRef)
|
||||||
|
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
||||||
|
newMacAddresses,
|
||||||
|
})
|
||||||
|
|
||||||
|
await Promise.all([
|
||||||
|
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||||
|
xapi.call(
|
||||||
|
'VM.set_name_label',
|
||||||
|
vmRef,
|
||||||
|
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||||
|
),
|
||||||
|
xapi.call(
|
||||||
|
'VM.set_name_description',
|
||||||
|
vmRef,
|
||||||
|
`Restored on ${formatFilenameDate(+new Date())} from ${adapter._handler._remote.name} -
|
||||||
|
${metadata.vm.name_description}
|
||||||
|
`
|
||||||
|
),
|
||||||
|
])
|
||||||
|
|
||||||
|
return {
|
||||||
|
size: sizeContainer.size,
|
||||||
|
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
decorateClass(ImportVmBackup, { _reuseNearestSnapshot: defer })
|
||||||
@@ -1,43 +1,41 @@
|
|||||||
'use strict'
|
import { asyncEach } from '@vates/async-each'
|
||||||
|
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||||
|
import { compose } from '@vates/compose'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
|
||||||
|
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||||
|
import { deduped } from '@vates/disposable/deduped.js'
|
||||||
|
import { dirname, join, resolve } from 'node:path'
|
||||||
|
import { execFile } from 'child_process'
|
||||||
|
import { mount } from '@vates/fuse-vhd'
|
||||||
|
import { readdir, lstat } from 'node:fs/promises'
|
||||||
|
import { synchronized } from 'decorator-synchronized'
|
||||||
|
import { v4 as uuidv4 } from 'uuid'
|
||||||
|
import { ZipFile } from 'yazl'
|
||||||
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import fromCallback from 'promise-toolbox/fromCallback'
|
||||||
|
import fromEvent from 'promise-toolbox/fromEvent'
|
||||||
|
import groupBy from 'lodash/groupBy.js'
|
||||||
|
import pDefer from 'promise-toolbox/defer'
|
||||||
|
import pickBy from 'lodash/pickBy.js'
|
||||||
|
import tar from 'tar'
|
||||||
|
import zlib from 'zlib'
|
||||||
|
|
||||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
import { BACKUP_DIR } from './_getVmBackupDir.mjs'
|
||||||
const { synchronized } = require('decorator-synchronized')
|
import { cleanVm } from './_cleanVm.mjs'
|
||||||
const Disposable = require('promise-toolbox/Disposable')
|
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||||
const fromCallback = require('promise-toolbox/fromCallback')
|
import { getTmpDir } from './_getTmpDir.mjs'
|
||||||
const fromEvent = require('promise-toolbox/fromEvent')
|
import { isMetadataFile } from './_backupType.mjs'
|
||||||
const pDefer = require('promise-toolbox/defer')
|
import { isValidXva } from './_isValidXva.mjs'
|
||||||
const groupBy = require('lodash/groupBy.js')
|
import { listPartitions, LVM_PARTITION_TYPE } from './_listPartitions.mjs'
|
||||||
const pickBy = require('lodash/pickBy.js')
|
import { lvs, pvs } from './_lvm.mjs'
|
||||||
const { dirname, join, normalize, resolve } = require('path')
|
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||||
const { createLogger } = require('@xen-orchestra/log')
|
|
||||||
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
|
|
||||||
const { deduped } = require('@vates/disposable/deduped.js')
|
|
||||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
|
||||||
const { compose } = require('@vates/compose')
|
|
||||||
const { execFile } = require('child_process')
|
|
||||||
const { readdir, lstat } = require('fs-extra')
|
|
||||||
const { v4: uuidv4 } = require('uuid')
|
|
||||||
const { ZipFile } = require('yazl')
|
|
||||||
const zlib = require('zlib')
|
|
||||||
|
|
||||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||||
const { cleanVm } = require('./_cleanVm.js')
|
|
||||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
|
||||||
const { getTmpDir } = require('./_getTmpDir.js')
|
|
||||||
const { isMetadataFile } = require('./_backupType.js')
|
|
||||||
const { isValidXva } = require('./_isValidXva.js')
|
|
||||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
|
||||||
const { lvs, pvs } = require('./_lvm.js')
|
|
||||||
const { watchStreamSize } = require('./_watchStreamSize')
|
|
||||||
// @todo : this import is marked extraneous , sould be fixed when lib is published
|
|
||||||
const { mount } = require('@vates/fuse-vhd')
|
|
||||||
const { asyncEach } = require('@vates/async-each')
|
|
||||||
|
|
||||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
|
||||||
|
|
||||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
const IMMUTABILTY_METADATA_FILENAME = '/immutability.json'
|
||||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
|
||||||
|
|
||||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||||
|
|
||||||
@@ -46,20 +44,23 @@ const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
|||||||
const noop = Function.prototype
|
const noop = Function.prototype
|
||||||
|
|
||||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||||
|
const makeRelative = path => resolve('/', path).slice(1)
|
||||||
|
const resolveSubpath = (root, path) => resolve(root, makeRelative(path))
|
||||||
|
|
||||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
async function addZipEntries(zip, realBasePath, virtualBasePath, relativePaths) {
|
||||||
|
for (const relativePath of relativePaths) {
|
||||||
|
const realPath = join(realBasePath, relativePath)
|
||||||
|
const virtualPath = join(virtualBasePath, relativePath)
|
||||||
|
|
||||||
async function addDirectory(files, realPath, metadataPath) {
|
const stats = await lstat(realPath)
|
||||||
const stats = await lstat(realPath)
|
const { mode, mtime } = stats
|
||||||
if (stats.isDirectory()) {
|
const opts = { mode, mtime }
|
||||||
await asyncMap(await readdir(realPath), file =>
|
if (stats.isDirectory()) {
|
||||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
zip.addEmptyDirectory(virtualPath, opts)
|
||||||
)
|
await addZipEntries(zip, realPath, virtualPath, await readdir(realPath))
|
||||||
} else if (stats.isFile()) {
|
} else if (stats.isFile()) {
|
||||||
files.push({
|
zip.addFile(realPath, virtualPath, opts)
|
||||||
realPath,
|
}
|
||||||
metadataPath,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,7 +77,7 @@ const debounceResourceFactory = factory =>
|
|||||||
return this._debounceResource(factory.apply(this, arguments))
|
return this._debounceResource(factory.apply(this, arguments))
|
||||||
}
|
}
|
||||||
|
|
||||||
class RemoteAdapter {
|
export class RemoteAdapter {
|
||||||
constructor(
|
constructor(
|
||||||
handler,
|
handler,
|
||||||
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
||||||
@@ -187,17 +188,6 @@ class RemoteAdapter {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
|
||||||
const path = yield this.getPartition(diskId, partitionId)
|
|
||||||
|
|
||||||
const files = []
|
|
||||||
await asyncMap(paths, file =>
|
|
||||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
|
||||||
)
|
|
||||||
|
|
||||||
return files
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if we will be allowed to merge a a vhd created in this adapter
|
// check if we will be allowed to merge a a vhd created in this adapter
|
||||||
// with the vhd at path `path`
|
// with the vhd at path `path`
|
||||||
async isMergeableParent(packedParentUid, path) {
|
async isMergeableParent(packedParentUid, path) {
|
||||||
@@ -214,15 +204,24 @@ class RemoteAdapter {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
fetchPartitionFiles(diskId, partitionId, paths, format) {
|
||||||
const { promise, reject, resolve } = pDefer()
|
const { promise, reject, resolve } = pDefer()
|
||||||
Disposable.use(
|
Disposable.use(
|
||||||
async function* () {
|
async function* () {
|
||||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
const path = yield this.getPartition(diskId, partitionId)
|
||||||
const zip = new ZipFile()
|
let outputStream
|
||||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
|
||||||
zip.end()
|
if (format === 'tgz') {
|
||||||
const { outputStream } = zip
|
outputStream = tar.c({ cwd: path, gzip: true }, paths.map(makeRelative))
|
||||||
|
} else if (format === 'zip') {
|
||||||
|
const zip = new ZipFile()
|
||||||
|
await addZipEntries(zip, path, '', paths.map(makeRelative))
|
||||||
|
zip.end()
|
||||||
|
;({ outputStream } = zip)
|
||||||
|
} else {
|
||||||
|
throw new Error('unsupported format ' + format)
|
||||||
|
}
|
||||||
|
|
||||||
resolve(outputStream)
|
resolve(outputStream)
|
||||||
await fromEvent(outputStream, 'end')
|
await fromEvent(outputStream, 'end')
|
||||||
}.bind(this)
|
}.bind(this)
|
||||||
@@ -684,11 +683,13 @@ class RemoteAdapter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
async outputStream(path, input, { checksum = true, maxStreamLength, streamLength, validator = noop } = {}) {
|
||||||
const container = watchStreamSize(input)
|
const container = watchStreamSize(input)
|
||||||
await this._handler.outputStream(path, input, {
|
await this._handler.outputStream(path, input, {
|
||||||
checksum,
|
checksum,
|
||||||
dirMode: this._dirMode,
|
dirMode: this._dirMode,
|
||||||
|
maxStreamLength,
|
||||||
|
streamLength,
|
||||||
async validator() {
|
async validator() {
|
||||||
await input.task
|
await input.task
|
||||||
return validator.apply(this, arguments)
|
return validator.apply(this, arguments)
|
||||||
@@ -750,10 +751,37 @@ class RemoteAdapter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async readVmBackupMetadata(path) {
|
async readVmBackupMetadata(path) {
|
||||||
|
let json
|
||||||
|
let isImmutable = false
|
||||||
|
let remoteIsImmutable = false
|
||||||
|
// if the remote is immutable, check if this metadatas are also immutables
|
||||||
|
try {
|
||||||
|
// this file is not encrypted
|
||||||
|
await this._handler._readFile(IMMUTABILTY_METADATA_FILENAME)
|
||||||
|
remoteIsImmutable = true
|
||||||
|
} catch (error) {
|
||||||
|
if (error.code !== 'ENOENT') {
|
||||||
|
throw error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// this will trigger an EPERM error if the file is immutable
|
||||||
|
json = await this.handler.readFile(path, { flag: 'r+' })
|
||||||
|
// s3 handler don't respect flags
|
||||||
|
} catch (err) {
|
||||||
|
// retry without triggerring immutbaility check ,only on immutable remote
|
||||||
|
if (err.code === 'EPERM' && remoteIsImmutable) {
|
||||||
|
isImmutable = true
|
||||||
|
json = await this._handler.readFile(path, { flag: 'r' })
|
||||||
|
} else {
|
||||||
|
throw err
|
||||||
|
}
|
||||||
|
}
|
||||||
// _filename is a private field used to compute the backup id
|
// _filename is a private field used to compute the backup id
|
||||||
//
|
//
|
||||||
// it's enumerable to make it cacheable
|
// it's enumerable to make it cacheable
|
||||||
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
const metadata = { ...JSON.parse(json), _filename: path, isImmutable }
|
||||||
|
|
||||||
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
||||||
if (typeof metadata.vm.is_a_template === 'number') {
|
if (typeof metadata.vm.is_a_template === 'number') {
|
||||||
@@ -829,11 +857,7 @@ decorateMethodsWith(RemoteAdapter, {
|
|||||||
debounceResourceFactory,
|
debounceResourceFactory,
|
||||||
]),
|
]),
|
||||||
|
|
||||||
_usePartitionFiles: Disposable.factory,
|
|
||||||
|
|
||||||
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
|
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
|
||||||
|
|
||||||
getPartition: Disposable.factory,
|
getPartition: Disposable.factory,
|
||||||
})
|
})
|
||||||
|
|
||||||
exports.RemoteAdapter = RemoteAdapter
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
const { join, resolve } = require('node:path/posix')
|
|
||||||
|
|
||||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
|
||||||
const { PATH_DB_DUMP } = require('./_runners/_PoolMetadataBackup.js')
|
|
||||||
|
|
||||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
|
||||||
constructor({ backupId, handler, xapi }) {
|
|
||||||
this._backupId = backupId
|
|
||||||
this._handler = handler
|
|
||||||
this._xapi = xapi
|
|
||||||
}
|
|
||||||
|
|
||||||
async run() {
|
|
||||||
const backupId = this._backupId
|
|
||||||
const handler = this._handler
|
|
||||||
const xapi = this._xapi
|
|
||||||
|
|
||||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
|
||||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
|
||||||
task: xapi.task_create('Import pool metadata'),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
|
|
||||||
return String(await handler.readFile(resolve(backupId, metadata.data ?? 'data.json')))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
32
@xen-orchestra/backups/RestoreMetadataBackup.mjs
Normal file
32
@xen-orchestra/backups/RestoreMetadataBackup.mjs
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
import { join, resolve } from 'node:path/posix'
|
||||||
|
|
||||||
|
import { DIR_XO_POOL_METADATA_BACKUPS } from './RemoteAdapter.mjs'
|
||||||
|
import { PATH_DB_DUMP } from './_runners/_PoolMetadataBackup.mjs'
|
||||||
|
|
||||||
|
export class RestoreMetadataBackup {
|
||||||
|
constructor({ backupId, handler, xapi }) {
|
||||||
|
this._backupId = backupId
|
||||||
|
this._handler = handler
|
||||||
|
this._xapi = xapi
|
||||||
|
}
|
||||||
|
|
||||||
|
async run() {
|
||||||
|
const backupId = this._backupId
|
||||||
|
const handler = this._handler
|
||||||
|
const xapi = this._xapi
|
||||||
|
|
||||||
|
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||||
|
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||||
|
task: xapi.task_create('Import pool metadata'),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
|
||||||
|
const dataFileName = resolve('/', backupId, metadata.data ?? 'data.json').slice(1)
|
||||||
|
const data = await handler.readFile(dataFileName)
|
||||||
|
|
||||||
|
// if data is JSON, sent it as a plain string, otherwise, consider the data as binary and encode it
|
||||||
|
const isJson = dataFileName.endsWith('.json')
|
||||||
|
return isJson ? data.toString() : { encoding: 'base64', data: data.toString('base64') }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,5 @@
|
|||||||
'use strict'
|
import CancelToken from 'promise-toolbox/CancelToken'
|
||||||
|
import Zone from 'node-zone'
|
||||||
const CancelToken = require('promise-toolbox/CancelToken')
|
|
||||||
const Zone = require('node-zone')
|
|
||||||
|
|
||||||
const logAfterEnd = log => {
|
const logAfterEnd = log => {
|
||||||
const error = new Error('task has already ended')
|
const error = new Error('task has already ended')
|
||||||
@@ -30,7 +28,7 @@ const serializeError = error =>
|
|||||||
|
|
||||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||||
|
|
||||||
class Task {
|
export class Task {
|
||||||
static get cancelToken() {
|
static get cancelToken() {
|
||||||
const task = Zone.current.data[$$task]
|
const task = Zone.current.data[$$task]
|
||||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||||
@@ -151,7 +149,6 @@ class Task {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exports.Task = Task
|
|
||||||
|
|
||||||
for (const method of ['info', 'warning']) {
|
for (const method of ['info', 'warning']) {
|
||||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
|
||||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
|
||||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
|
||||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
|
||||||
4
@xen-orchestra/backups/_backupType.mjs
Normal file
4
@xen-orchestra/backups/_backupType.mjs
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
export const isMetadataFile = filename => filename.endsWith('.json')
|
||||||
|
export const isVhdFile = filename => filename.endsWith('.vhd')
|
||||||
|
export const isXvaFile = filename => filename.endsWith('.xva')
|
||||||
|
export const isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||||
@@ -1,25 +1,25 @@
|
|||||||
'use strict'
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||||
|
|
||||||
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||||
|
import { compose } from '@vates/compose'
|
||||||
|
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||||
|
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
|
||||||
|
import { createRunner } from './Backup.mjs'
|
||||||
|
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||||
|
import { deduped } from '@vates/disposable/deduped.js'
|
||||||
|
import { getHandler } from '@xen-orchestra/fs'
|
||||||
|
import { parseDuration } from '@vates/parse-duration'
|
||||||
|
import { Xapi } from '@xen-orchestra/xapi'
|
||||||
|
|
||||||
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
|
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||||
|
import { Task } from './Task.mjs'
|
||||||
|
|
||||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
createCachedLookup().patchGlobal()
|
||||||
|
|
||||||
const Disposable = require('promise-toolbox/Disposable')
|
|
||||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
|
||||||
const { compose } = require('@vates/compose')
|
|
||||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
|
||||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
|
||||||
const { deduped } = require('@vates/disposable/deduped.js')
|
|
||||||
const { getHandler } = require('@xen-orchestra/fs')
|
|
||||||
const { createRunner } = require('./Backup.js')
|
|
||||||
const { parseDuration } = require('@vates/parse-duration')
|
|
||||||
const { Xapi } = require('@xen-orchestra/xapi')
|
|
||||||
|
|
||||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
|
||||||
const { Task } = require('./Task.js')
|
|
||||||
|
|
||||||
|
const logger = createLogger('xo:backups:worker')
|
||||||
|
catchGlobalErrors(logger)
|
||||||
const { debug } = logger
|
const { debug } = logger
|
||||||
|
|
||||||
class BackupWorker {
|
class BackupWorker {
|
||||||
@@ -1,13 +1,11 @@
|
|||||||
'use strict'
|
import cancelable from 'promise-toolbox/cancelable'
|
||||||
|
import CancelToken from 'promise-toolbox/CancelToken'
|
||||||
const cancelable = require('promise-toolbox/cancelable')
|
|
||||||
const CancelToken = require('promise-toolbox/CancelToken')
|
|
||||||
|
|
||||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||||
//
|
//
|
||||||
// If any of the executions fails, the cancel token will be triggered and the
|
// If any of the executions fails, the cancel token will be triggered and the
|
||||||
// first reason will be rejected.
|
// first reason will be rejected.
|
||||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
export const cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||||
try {
|
try {
|
||||||
return await Promise.all(
|
return await Promise.all(
|
||||||
@@ -1,19 +1,19 @@
|
|||||||
'use strict'
|
import test from 'test'
|
||||||
|
import { strict as assert } from 'node:assert'
|
||||||
|
|
||||||
const { beforeEach, afterEach, test, describe } = require('test')
|
import tmp from 'tmp'
|
||||||
const assert = require('assert').strict
|
import fs from 'fs-extra'
|
||||||
|
import * as uuid from 'uuid'
|
||||||
|
import { getHandler } from '@xen-orchestra/fs'
|
||||||
|
import { pFromCallback } from 'promise-toolbox'
|
||||||
|
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||||
|
import { VHDFOOTER, VHDHEADER } from './tests.fixtures.mjs'
|
||||||
|
import { VhdFile, Constants, VhdDirectory, VhdAbstract } from 'vhd-lib'
|
||||||
|
import { checkAliases } from './_cleanVm.mjs'
|
||||||
|
import { dirname, basename } from 'node:path'
|
||||||
|
import { rimraf } from 'rimraf'
|
||||||
|
|
||||||
const tmp = require('tmp')
|
const { beforeEach, afterEach, describe } = test
|
||||||
const fs = require('fs-extra')
|
|
||||||
const uuid = require('uuid')
|
|
||||||
const { getHandler } = require('@xen-orchestra/fs')
|
|
||||||
const { pFromCallback } = require('promise-toolbox')
|
|
||||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
|
||||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
|
||||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
|
||||||
const { checkAliases } = require('./_cleanVm')
|
|
||||||
const { dirname, basename } = require('path')
|
|
||||||
const { rimraf } = require('rimraf')
|
|
||||||
|
|
||||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||||
@@ -67,6 +67,11 @@ async function generateVhd(path, opts = {}) {
|
|||||||
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
|
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (opts.blocks) {
|
||||||
|
for (const blockId of opts.blocks) {
|
||||||
|
await vhd.writeEntireBlock({ id: blockId, buffer: Buffer.alloc(2 * 1024 * 1024 + 512, blockId) })
|
||||||
|
}
|
||||||
|
}
|
||||||
await vhd.writeBlockAllocationTable()
|
await vhd.writeBlockAllocationTable()
|
||||||
await vhd.writeHeader()
|
await vhd.writeHeader()
|
||||||
await vhd.writeFooter()
|
await vhd.writeFooter()
|
||||||
@@ -230,7 +235,7 @@ test('it merges delta of non destroyed chain', async () => {
|
|||||||
|
|
||||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||||
// size should be the size of children + grand children after the merge
|
// size should be the size of children + grand children after the merge
|
||||||
assert.equal(metadata.size, 209920)
|
assert.equal(metadata.size, 104960)
|
||||||
|
|
||||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||||
// only check deletion
|
// only check deletion
|
||||||
@@ -320,6 +325,7 @@ describe('tests multiple combination ', () => {
|
|||||||
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
|
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
|
||||||
useAlias,
|
useAlias,
|
||||||
mode: vhdMode,
|
mode: vhdMode,
|
||||||
|
blocks: [1, 3],
|
||||||
})
|
})
|
||||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||||
useAlias,
|
useAlias,
|
||||||
@@ -328,6 +334,7 @@ describe('tests multiple combination ', () => {
|
|||||||
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||||
parentUuid: ancestor.footer.uuid,
|
parentUuid: ancestor.footer.uuid,
|
||||||
},
|
},
|
||||||
|
blocks: [1, 2],
|
||||||
})
|
})
|
||||||
// a grand child vhd in metadata
|
// a grand child vhd in metadata
|
||||||
await generateVhd(`${basePath}/grandchild.vhd`, {
|
await generateVhd(`${basePath}/grandchild.vhd`, {
|
||||||
@@ -337,6 +344,7 @@ describe('tests multiple combination ', () => {
|
|||||||
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
|
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||||
parentUuid: child.footer.uuid,
|
parentUuid: child.footer.uuid,
|
||||||
},
|
},
|
||||||
|
blocks: [2, 3],
|
||||||
})
|
})
|
||||||
|
|
||||||
// an older parent that was merging in clean
|
// an older parent that was merging in clean
|
||||||
@@ -395,7 +403,7 @@ describe('tests multiple combination ', () => {
|
|||||||
|
|
||||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||||
// size should be the size of children + grand children + clean after the merge
|
// size should be the size of children + grand children + clean after the merge
|
||||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
|
assert.deepEqual(metadata.size, vhdMode === 'file' ? 6502400 : 6501888)
|
||||||
|
|
||||||
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
|
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
|
||||||
// ancestor and child should be merged
|
// ancestor and child should be merged
|
||||||
@@ -1,19 +1,18 @@
|
|||||||
'use strict'
|
import * as UUID from 'uuid'
|
||||||
|
import sum from 'lodash/sum.js'
|
||||||
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import { Constants, openVhd, VhdAbstract, VhdFile } from 'vhd-lib'
|
||||||
|
import { isVhdAlias, resolveVhdAlias } from 'vhd-lib/aliases.js'
|
||||||
|
import { dirname, resolve } from 'node:path'
|
||||||
|
import { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } from './_backupType.mjs'
|
||||||
|
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||||
|
import { mergeVhdChain } from 'vhd-lib/merge.js'
|
||||||
|
|
||||||
|
import { Task } from './Task.mjs'
|
||||||
|
import { Disposable } from 'promise-toolbox'
|
||||||
|
import handlerPath from '@xen-orchestra/fs/path'
|
||||||
|
|
||||||
const sum = require('lodash/sum')
|
|
||||||
const UUID = require('uuid')
|
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
|
||||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
|
||||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
|
||||||
const { dirname, resolve } = require('path')
|
|
||||||
const { DISK_TYPES } = Constants
|
const { DISK_TYPES } = Constants
|
||||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
|
||||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
|
||||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
|
||||||
|
|
||||||
const { Task } = require('./Task.js')
|
|
||||||
const { Disposable } = require('promise-toolbox')
|
|
||||||
const handlerPath = require('@xen-orchestra/fs/path')
|
|
||||||
|
|
||||||
// checking the size of a vhd directory is costly
|
// checking the size of a vhd directory is costly
|
||||||
// 1 Http Query per 1000 blocks
|
// 1 Http Query per 1000 blocks
|
||||||
@@ -37,34 +36,32 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
|||||||
)
|
)
|
||||||
|
|
||||||
// chain is [ ancestor, child_1, ..., child_n ]
|
// chain is [ ancestor, child_1, ..., child_n ]
|
||||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
|
async function _mergeVhdChain(handler, chain, { logInfo, remove, mergeBlockConcurrency }) {
|
||||||
if (merge) {
|
logInfo(`merging VHD chain`, { chain })
|
||||||
logInfo(`merging VHD chain`, { chain })
|
|
||||||
|
|
||||||
let done, total
|
let done, total
|
||||||
const handle = setInterval(() => {
|
const handle = setInterval(() => {
|
||||||
if (done !== undefined) {
|
if (done !== undefined) {
|
||||||
logInfo('merge in progress', {
|
logInfo('merge in progress', {
|
||||||
done,
|
done,
|
||||||
parent: chain[0],
|
parent: chain[0],
|
||||||
progress: Math.round((100 * done) / total),
|
progress: Math.round((100 * done) / total),
|
||||||
total,
|
total,
|
||||||
})
|
|
||||||
}
|
|
||||||
}, 10e3)
|
|
||||||
try {
|
|
||||||
return await mergeVhdChain(handler, chain, {
|
|
||||||
logInfo,
|
|
||||||
mergeBlockConcurrency,
|
|
||||||
onProgress({ done: d, total: t }) {
|
|
||||||
done = d
|
|
||||||
total = t
|
|
||||||
},
|
|
||||||
removeUnused: remove,
|
|
||||||
})
|
})
|
||||||
} finally {
|
|
||||||
clearInterval(handle)
|
|
||||||
}
|
}
|
||||||
|
}, 10e3)
|
||||||
|
try {
|
||||||
|
return await mergeVhdChain(handler, chain, {
|
||||||
|
logInfo,
|
||||||
|
mergeBlockConcurrency,
|
||||||
|
onProgress({ done: d, total: t }) {
|
||||||
|
done = d
|
||||||
|
total = t
|
||||||
|
},
|
||||||
|
removeUnused: remove,
|
||||||
|
})
|
||||||
|
} finally {
|
||||||
|
clearInterval(handle)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -117,7 +114,7 @@ const listVhds = async (handler, vmDir, logWarn) => {
|
|||||||
return { vhds, interruptedVhds, aliases }
|
return { vhds, interruptedVhds, aliases }
|
||||||
}
|
}
|
||||||
|
|
||||||
async function checkAliases(
|
export async function checkAliases(
|
||||||
aliasPaths,
|
aliasPaths,
|
||||||
targetDataRepository,
|
targetDataRepository,
|
||||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||||
@@ -176,11 +173,9 @@ async function checkAliases(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.checkAliases = checkAliases
|
|
||||||
|
|
||||||
const defaultMergeLimiter = limitConcurrency(1)
|
const defaultMergeLimiter = limitConcurrency(1)
|
||||||
|
|
||||||
exports.cleanVm = async function cleanVm(
|
export async function cleanVm(
|
||||||
vmDir,
|
vmDir,
|
||||||
{
|
{
|
||||||
fixMetadata,
|
fixMetadata,
|
||||||
@@ -474,23 +469,20 @@ exports.cleanVm = async function cleanVm(
|
|||||||
const metadataWithMergedVhd = {}
|
const metadataWithMergedVhd = {}
|
||||||
const doMerge = async () => {
|
const doMerge = async () => {
|
||||||
await asyncMap(toMerge, async chain => {
|
await asyncMap(toMerge, async chain => {
|
||||||
const merged = await limitedMergeVhdChain(handler, chain, {
|
const { finalVhdSize } = await limitedMergeVhdChain(handler, chain, {
|
||||||
logInfo,
|
logInfo,
|
||||||
logWarn,
|
logWarn,
|
||||||
remove,
|
remove,
|
||||||
merge,
|
|
||||||
mergeBlockConcurrency,
|
mergeBlockConcurrency,
|
||||||
})
|
})
|
||||||
if (merged !== undefined) {
|
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
metadataWithMergedVhd[metadataPath] = (metadataWithMergedVhd[metadataPath] ?? 0) + finalVhdSize
|
||||||
metadataWithMergedVhd[metadataPath] = true
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
await Promise.all([
|
await Promise.all([
|
||||||
...unusedVhdsDeletion,
|
...unusedVhdsDeletion,
|
||||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : () => Promise.resolve()),
|
||||||
asyncMap(unusedXvas, path => {
|
asyncMap(unusedXvas, path => {
|
||||||
logWarn('unused XVA', { path })
|
logWarn('unused XVA', { path })
|
||||||
if (remove) {
|
if (remove) {
|
||||||
@@ -512,12 +504,11 @@ exports.cleanVm = async function cleanVm(
|
|||||||
|
|
||||||
// update size for delta metadata with merged VHD
|
// update size for delta metadata with merged VHD
|
||||||
// check for the other that the size is the same as the real file size
|
// check for the other that the size is the same as the real file size
|
||||||
|
|
||||||
await asyncMap(jsons, async metadataPath => {
|
await asyncMap(jsons, async metadataPath => {
|
||||||
const metadata = backups.get(metadataPath)
|
const metadata = backups.get(metadataPath)
|
||||||
|
|
||||||
let fileSystemSize
|
let fileSystemSize
|
||||||
const merged = metadataWithMergedVhd[metadataPath] !== undefined
|
const mergedSize = metadataWithMergedVhd[metadataPath]
|
||||||
|
|
||||||
const { mode, size, vhds, xva } = metadata
|
const { mode, size, vhds, xva } = metadata
|
||||||
|
|
||||||
@@ -527,26 +518,29 @@ exports.cleanVm = async function cleanVm(
|
|||||||
const linkedXva = resolve('/', vmDir, xva)
|
const linkedXva = resolve('/', vmDir, xva)
|
||||||
try {
|
try {
|
||||||
fileSystemSize = await handler.getSize(linkedXva)
|
fileSystemSize = await handler.getSize(linkedXva)
|
||||||
|
if (fileSystemSize !== size && fileSystemSize !== undefined) {
|
||||||
|
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||||
|
path: metadataPath,
|
||||||
|
actual: size ?? 'none',
|
||||||
|
expected: fileSystemSize,
|
||||||
|
})
|
||||||
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// can fail with encrypted remote
|
// can fail with encrypted remote
|
||||||
}
|
}
|
||||||
} else if (mode === 'delta') {
|
} else if (mode === 'delta') {
|
||||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
|
||||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
|
||||||
|
|
||||||
// the size is not computed in some cases (e.g. VhdDirectory)
|
|
||||||
if (fileSystemSize === undefined) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't warn if the size has changed after a merge
|
// don't warn if the size has changed after a merge
|
||||||
if (!merged && fileSystemSize !== size) {
|
if (mergedSize === undefined) {
|
||||||
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
|
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||||
console.warn('cleanVm: incorrect backup size in metadata', {
|
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||||
path: metadataPath,
|
// the size is not computed in some cases (e.g. VhdDirectory)
|
||||||
actual: size ?? 'none',
|
if (fileSystemSize !== undefined && fileSystemSize !== size) {
|
||||||
expected: fileSystemSize,
|
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||||
})
|
path: metadataPath,
|
||||||
|
actual: size ?? 'none',
|
||||||
|
expected: fileSystemSize,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -554,9 +548,19 @@ exports.cleanVm = async function cleanVm(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// systematically update size after a merge
|
// systematically update size and differentials after a merge
|
||||||
if ((merged || fixMetadata) && size !== fileSystemSize) {
|
|
||||||
metadata.size = fileSystemSize
|
// @todo : after 2024-04-01 remove the fixmetadata options since the size computation is fixed
|
||||||
|
if (mergedSize || (fixMetadata && fileSystemSize !== size)) {
|
||||||
|
metadata.size = mergedSize ?? fileSystemSize ?? size
|
||||||
|
|
||||||
|
if (mergedSize) {
|
||||||
|
// all disks are now key disk
|
||||||
|
metadata.isVhdDifferencing = {}
|
||||||
|
for (const id of Object.values(metadata.vdis ?? {})) {
|
||||||
|
metadata.isVhdDifferencing[`${id}.vhd`] = false
|
||||||
|
}
|
||||||
|
}
|
||||||
mustRegenerateCache = true
|
mustRegenerateCache = true
|
||||||
try {
|
try {
|
||||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
const { utcFormat, utcParse } = require('d3-time-format')
|
|
||||||
|
|
||||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
|
||||||
// (even on Windows).
|
|
||||||
exports.formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
|
||||||
exports.parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
|
||||||
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
import { utcFormat, utcParse } from 'd3-time-format'
|
||||||
|
|
||||||
|
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||||
|
// (even on Windows).
|
||||||
|
export const formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||||
|
export const parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||||
@@ -1,6 +1,4 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
// returns all entries but the last retention-th
|
// returns all entries but the last retention-th
|
||||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
export function getOldEntries(retention, entries) {
|
||||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||||
}
|
}
|
||||||
@@ -1,13 +1,11 @@
|
|||||||
'use strict'
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import { join } from 'node:path'
|
||||||
const Disposable = require('promise-toolbox/Disposable')
|
import { mkdir, rmdir } from 'node:fs/promises'
|
||||||
const { join } = require('path')
|
import { tmpdir } from 'os'
|
||||||
const { mkdir, rmdir } = require('fs-extra')
|
|
||||||
const { tmpdir } = require('os')
|
|
||||||
|
|
||||||
const MAX_ATTEMPTS = 3
|
const MAX_ATTEMPTS = 3
|
||||||
|
|
||||||
exports.getTmpDir = async function getTmpDir() {
|
export async function getTmpDir() {
|
||||||
for (let i = 0; true; ++i) {
|
for (let i = 0; true; ++i) {
|
||||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||||
try {
|
try {
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
const BACKUP_DIR = 'xo-vm-backups'
|
|
||||||
exports.BACKUP_DIR = BACKUP_DIR
|
|
||||||
|
|
||||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
|
||||||
return `${BACKUP_DIR}/${uuid}`
|
|
||||||
}
|
|
||||||
5
@xen-orchestra/backups/_getVmBackupDir.mjs
Normal file
5
@xen-orchestra/backups/_getVmBackupDir.mjs
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
export const BACKUP_DIR = 'xo-vm-backups'
|
||||||
|
|
||||||
|
export function getVmBackupDir(uuid) {
|
||||||
|
return `${BACKUP_DIR}/${uuid}`
|
||||||
|
}
|
||||||
@@ -1,39 +1,30 @@
|
|||||||
'use strict'
|
import groupBy from 'lodash/groupBy.js'
|
||||||
|
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||||
|
import omit from 'lodash/omit.js'
|
||||||
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import { CancelToken } from 'promise-toolbox'
|
||||||
|
import { compareVersions } from 'compare-versions'
|
||||||
|
import { createVhdStreamWithLength } from 'vhd-lib'
|
||||||
|
import { defer } from 'golike-defer'
|
||||||
|
|
||||||
const find = require('lodash/find.js')
|
import { cancelableMap } from './_cancelableMap.mjs'
|
||||||
const groupBy = require('lodash/groupBy.js')
|
import { Task } from './Task.mjs'
|
||||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
import pick from 'lodash/pick.js'
|
||||||
const omit = require('lodash/omit.js')
|
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
|
||||||
const { CancelToken } = require('promise-toolbox')
|
|
||||||
const { compareVersions } = require('compare-versions')
|
|
||||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
|
||||||
const { defer } = require('golike-defer')
|
|
||||||
|
|
||||||
const { cancelableMap } = require('./_cancelableMap.js')
|
// in `other_config` of an incrementally replicated VM, contains the UUID of the source VM
|
||||||
const { Task } = require('./Task.js')
|
export const TAG_BASE_DELTA = 'xo:base_delta'
|
||||||
const pick = require('lodash/pick.js')
|
|
||||||
|
|
||||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
// in `other_config` of an incrementally replicated VM, contains the UUID of the target SR used for replication
|
||||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
//
|
||||||
|
// added after the complete replication
|
||||||
|
export const TAG_BACKUP_SR = 'xo:backup:sr'
|
||||||
|
|
||||||
const TAG_COPY_SRC = 'xo:copy_of'
|
// in other_config of VDIs of an incrementally replicated VM, contains the UUID of the source VDI
|
||||||
exports.TAG_COPY_SRC = TAG_COPY_SRC
|
export const TAG_COPY_SRC = 'xo:copy_of'
|
||||||
|
|
||||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||||
const resolveUuid = async (xapi, cache, uuid, type) => {
|
|
||||||
if (uuid == null) {
|
|
||||||
return uuid
|
|
||||||
}
|
|
||||||
let ref = cache.get(uuid)
|
|
||||||
if (ref === undefined) {
|
|
||||||
ref = await xapi.call(`${type}.get_by_uuid`, uuid)
|
|
||||||
cache.set(uuid, ref)
|
|
||||||
}
|
|
||||||
return ref
|
|
||||||
}
|
|
||||||
|
|
||||||
exports.exportIncrementalVm = async function exportIncrementalVm(
|
export async function exportIncrementalVm(
|
||||||
vm,
|
vm,
|
||||||
baseVm,
|
baseVm,
|
||||||
{
|
{
|
||||||
@@ -43,6 +34,8 @@ exports.exportIncrementalVm = async function exportIncrementalVm(
|
|||||||
fullVdisRequired = new Set(),
|
fullVdisRequired = new Set(),
|
||||||
|
|
||||||
disableBaseTags = false,
|
disableBaseTags = false,
|
||||||
|
nbdConcurrency = 1,
|
||||||
|
preferNbd,
|
||||||
} = {}
|
} = {}
|
||||||
) {
|
) {
|
||||||
// refs of VM's VDIs → base's VDIs.
|
// refs of VM's VDIs → base's VDIs.
|
||||||
@@ -90,6 +83,8 @@ exports.exportIncrementalVm = async function exportIncrementalVm(
|
|||||||
baseRef: baseVdi?.$ref,
|
baseRef: baseVdi?.$ref,
|
||||||
cancelToken,
|
cancelToken,
|
||||||
format: 'vhd',
|
format: 'vhd',
|
||||||
|
nbdConcurrency,
|
||||||
|
preferNbd,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -143,11 +138,11 @@ exports.exportIncrementalVm = async function exportIncrementalVm(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.importIncrementalVm = defer(async function importIncrementalVm(
|
export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||||
$defer,
|
$defer,
|
||||||
incrementalVm,
|
incrementalVm,
|
||||||
sr,
|
sr,
|
||||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
|
{ cancelToken = CancelToken.none, newMacAddresses = false } = {}
|
||||||
) {
|
) {
|
||||||
const { version } = incrementalVm
|
const { version } = incrementalVm
|
||||||
if (compareVersions(version, '1.0.0') < 0) {
|
if (compareVersions(version, '1.0.0') < 0) {
|
||||||
@@ -157,32 +152,6 @@ exports.importIncrementalVm = defer(async function importIncrementalVm(
|
|||||||
const vmRecord = incrementalVm.vm
|
const vmRecord = incrementalVm.vm
|
||||||
const xapi = sr.$xapi
|
const xapi = sr.$xapi
|
||||||
|
|
||||||
let baseVm
|
|
||||||
if (detectBase) {
|
|
||||||
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
|
|
||||||
if (remoteBaseVmUuid) {
|
|
||||||
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
|
|
||||||
|
|
||||||
if (!baseVm) {
|
|
||||||
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const cache = new Map()
|
|
||||||
const mapVdisSrRefs = {}
|
|
||||||
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
|
|
||||||
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
|
|
||||||
}
|
|
||||||
|
|
||||||
const baseVdis = {}
|
|
||||||
baseVm &&
|
|
||||||
baseVm.$VBDs.forEach(vbd => {
|
|
||||||
const vdi = vbd.$VDI
|
|
||||||
if (vdi !== undefined) {
|
|
||||||
baseVdis[vbd.VDI] = vbd.$VDI
|
|
||||||
}
|
|
||||||
})
|
|
||||||
const vdiRecords = incrementalVm.vdis
|
const vdiRecords = incrementalVm.vdis
|
||||||
|
|
||||||
// 0. Create suspend_VDI
|
// 0. Create suspend_VDI
|
||||||
@@ -194,18 +163,7 @@ exports.importIncrementalVm = defer(async function importIncrementalVm(
|
|||||||
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
|
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
suspendVdi = await xapi.getRecord(
|
suspendVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
|
||||||
'VDI',
|
|
||||||
await xapi.VDI_create({
|
|
||||||
...vdi,
|
|
||||||
other_config: {
|
|
||||||
...vdi.other_config,
|
|
||||||
[TAG_BASE_DELTA]: undefined,
|
|
||||||
[TAG_COPY_SRC]: vdi.uuid,
|
|
||||||
},
|
|
||||||
sr: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
|
||||||
})
|
|
||||||
)
|
|
||||||
$defer.onFailure(() => suspendVdi.$destroy())
|
$defer.onFailure(() => suspendVdi.$destroy())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -223,10 +181,6 @@ exports.importIncrementalVm = defer(async function importIncrementalVm(
|
|||||||
ha_always_run: false,
|
ha_always_run: false,
|
||||||
is_a_template: false,
|
is_a_template: false,
|
||||||
name_label: '[Importing…] ' + vmRecord.name_label,
|
name_label: '[Importing…] ' + vmRecord.name_label,
|
||||||
other_config: {
|
|
||||||
...vmRecord.other_config,
|
|
||||||
[TAG_COPY_SRC]: vmRecord.uuid,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
bios_strings: vmRecord.bios_strings,
|
bios_strings: vmRecord.bios_strings,
|
||||||
@@ -247,14 +201,8 @@ exports.importIncrementalVm = defer(async function importIncrementalVm(
|
|||||||
const vdi = vdiRecords[vdiRef]
|
const vdi = vdiRecords[vdiRef]
|
||||||
let newVdi
|
let newVdi
|
||||||
|
|
||||||
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
|
if (vdi.baseVdi !== undefined) {
|
||||||
if (remoteBaseVdiUuid) {
|
newVdi = await xapi.getRecord('VDI', await vdi.baseVdi.$clone())
|
||||||
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
|
|
||||||
if (!baseVdi) {
|
|
||||||
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
|
||||||
}
|
|
||||||
|
|
||||||
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
|
|
||||||
$defer.onFailure(() => newVdi.$destroy())
|
$defer.onFailure(() => newVdi.$destroy())
|
||||||
|
|
||||||
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
|
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
|
||||||
@@ -265,18 +213,7 @@ exports.importIncrementalVm = defer(async function importIncrementalVm(
|
|||||||
// suspendVDI has already created
|
// suspendVDI has already created
|
||||||
newVdi = suspendVdi
|
newVdi = suspendVdi
|
||||||
} else {
|
} else {
|
||||||
newVdi = await xapi.getRecord(
|
newVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
|
||||||
'VDI',
|
|
||||||
await xapi.VDI_create({
|
|
||||||
...vdi,
|
|
||||||
other_config: {
|
|
||||||
...vdi.other_config,
|
|
||||||
[TAG_BASE_DELTA]: undefined,
|
|
||||||
[TAG_COPY_SRC]: vdi.uuid,
|
|
||||||
},
|
|
||||||
SR: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
|
||||||
})
|
|
||||||
)
|
|
||||||
$defer.onFailure(() => newVdi.$destroy())
|
$defer.onFailure(() => newVdi.$destroy())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -315,13 +252,19 @@ exports.importIncrementalVm = defer(async function importIncrementalVm(
|
|||||||
// Import VDI contents.
|
// Import VDI contents.
|
||||||
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
|
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
|
||||||
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
||||||
|
if (stream === null) {
|
||||||
|
// we restore a backup and reuse completly a local snapshot
|
||||||
|
continue
|
||||||
|
}
|
||||||
if (typeof stream === 'function') {
|
if (typeof stream === 'function') {
|
||||||
stream = await stream()
|
stream = await stream()
|
||||||
}
|
}
|
||||||
if (stream.length === undefined) {
|
if (stream.length === undefined) {
|
||||||
stream = await createVhdStreamWithLength(stream)
|
stream = await createVhdStreamWithLength(stream)
|
||||||
}
|
}
|
||||||
|
await xapi.setField('VDI', vdi.$ref, 'name_label', `[Importing] ${vdiRecords[id].name_label}`)
|
||||||
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
|
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
|
||||||
|
await xapi.setField('VDI', vdi.$ref, 'name_label', vdiRecords[id].name_label)
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
|
|
||||||
@@ -1,6 +1,4 @@
|
|||||||
'use strict'
|
import assert from 'node:assert'
|
||||||
|
|
||||||
const assert = require('assert')
|
|
||||||
|
|
||||||
const COMPRESSED_MAGIC_NUMBERS = [
|
const COMPRESSED_MAGIC_NUMBERS = [
|
||||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||||
@@ -47,7 +45,7 @@ const isValidTar = async (handler, size, fd) => {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: find an heuristic for compressed files
|
// TODO: find an heuristic for compressed files
|
||||||
async function isValidXva(path) {
|
export async function isValidXva(path) {
|
||||||
const handler = this._handler
|
const handler = this._handler
|
||||||
|
|
||||||
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
||||||
@@ -74,6 +72,5 @@ async function isValidXva(path) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exports.isValidXva = isValidXva
|
|
||||||
|
|
||||||
const noop = Function.prototype
|
const noop = Function.prototype
|
||||||
@@ -1,9 +1,7 @@
|
|||||||
'use strict'
|
import fromCallback from 'promise-toolbox/fromCallback'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
const fromCallback = require('promise-toolbox/fromCallback')
|
import { createParser } from 'parse-pairs'
|
||||||
const { createLogger } = require('@xen-orchestra/log')
|
import { execFile } from 'child_process'
|
||||||
const { createParser } = require('parse-pairs')
|
|
||||||
const { execFile } = require('child_process')
|
|
||||||
|
|
||||||
const { debug } = createLogger('xo:backups:listPartitions')
|
const { debug } = createLogger('xo:backups:listPartitions')
|
||||||
|
|
||||||
@@ -24,8 +22,7 @@ const IGNORED_PARTITION_TYPES = {
|
|||||||
0x82: true, // swap
|
0x82: true, // swap
|
||||||
}
|
}
|
||||||
|
|
||||||
const LVM_PARTITION_TYPE = 0x8e
|
export const LVM_PARTITION_TYPE = 0x8e
|
||||||
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
|
|
||||||
|
|
||||||
const parsePartxLine = createParser({
|
const parsePartxLine = createParser({
|
||||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||||
@@ -33,7 +30,7 @@ const parsePartxLine = createParser({
|
|||||||
})
|
})
|
||||||
|
|
||||||
// returns an empty array in case of a non-partitioned disk
|
// returns an empty array in case of a non-partitioned disk
|
||||||
exports.listPartitions = async function listPartitions(devicePath) {
|
export async function listPartitions(devicePath) {
|
||||||
const parts = await fromCallback(execFile, 'partx', [
|
const parts = await fromCallback(execFile, 'partx', [
|
||||||
'--bytes',
|
'--bytes',
|
||||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
'use strict'
|
import fromCallback from 'promise-toolbox/fromCallback'
|
||||||
|
import { createParser } from 'parse-pairs'
|
||||||
const fromCallback = require('promise-toolbox/fromCallback')
|
import { execFile } from 'child_process'
|
||||||
const { createParser } = require('parse-pairs')
|
|
||||||
const { execFile } = require('child_process')
|
|
||||||
|
|
||||||
// ===================================================================
|
// ===================================================================
|
||||||
|
|
||||||
@@ -29,5 +27,5 @@ const makeFunction =
|
|||||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.lvs = makeFunction('lvs')
|
export const lvs = makeFunction('lvs')
|
||||||
exports.pvs = makeFunction('pvs')
|
export const pvs = makeFunction('pvs')
|
||||||
@@ -1,22 +1,20 @@
|
|||||||
'use strict'
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||||
|
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||||
const Disposable = require('promise-toolbox/Disposable')
|
import { PoolMetadataBackup } from './_PoolMetadataBackup.mjs'
|
||||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
import { XoMetadataBackup } from './_XoMetadataBackup.mjs'
|
||||||
|
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
import { runTask } from './_runTask.mjs'
|
||||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
|
||||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
|
||||||
const { runTask } = require('./_runTask.js')
|
|
||||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
|
||||||
|
|
||||||
const DEFAULT_METADATA_SETTINGS = {
|
const DEFAULT_METADATA_SETTINGS = {
|
||||||
retentionPoolMetadata: 0,
|
retentionPoolMetadata: 0,
|
||||||
retentionXoMetadata: 0,
|
retentionXoMetadata: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.Metadata = class MetadataBackupRunner extends Abstract {
|
export const Metadata = class MetadataBackupRunner extends Abstract {
|
||||||
_computeBaseSettings(config, job) {
|
_computeBaseSettings(config, job) {
|
||||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||||
@@ -1,17 +1,15 @@
|
|||||||
'use strict'
|
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||||
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||||
|
|
||||||
const { asyncMapSettled } = require('@xen-orchestra/async-map')
|
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||||
const Disposable = require('promise-toolbox/Disposable')
|
import { Task } from '../Task.mjs'
|
||||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||||
|
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
import { runTask } from './_runTask.mjs'
|
||||||
const { Task } = require('../Task.js')
|
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
import { FullRemote } from './_vmRunners/FullRemote.mjs'
|
||||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
import { IncrementalRemote } from './_vmRunners/IncrementalRemote.mjs'
|
||||||
const { runTask } = require('./_runTask.js')
|
|
||||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
|
||||||
const { FullRemote } = require('./_vmRunners/FullRemote.js')
|
|
||||||
const { IncrementalRemote } = require('./_vmRunners/IncrementalRemote.js')
|
|
||||||
|
|
||||||
const DEFAULT_REMOTE_VM_SETTINGS = {
|
const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||||
concurrency: 2,
|
concurrency: 2,
|
||||||
@@ -27,7 +25,7 @@ const DEFAULT_REMOTE_VM_SETTINGS = {
|
|||||||
vmTimeout: 0,
|
vmTimeout: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||||
_computeBaseSettings(config, job) {
|
_computeBaseSettings(config, job) {
|
||||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||||
Object.assign(baseSettings, DEFAULT_REMOTE_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
Object.assign(baseSettings, DEFAULT_REMOTE_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||||
@@ -1,17 +1,15 @@
|
|||||||
'use strict'
|
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||||
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||||
|
|
||||||
const { asyncMapSettled } = require('@xen-orchestra/async-map')
|
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||||
const Disposable = require('promise-toolbox/Disposable')
|
import { Task } from '../Task.mjs'
|
||||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||||
|
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
import { runTask } from './_runTask.mjs'
|
||||||
const { Task } = require('../Task.js')
|
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
import { IncrementalXapi } from './_vmRunners/IncrementalXapi.mjs'
|
||||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
import { FullXapi } from './_vmRunners/FullXapi.mjs'
|
||||||
const { runTask } = require('./_runTask.js')
|
|
||||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
|
||||||
const { IncrementalXapi } = require('./_vmRunners/IncrementalXapi.js')
|
|
||||||
const { FullXapi } = require('./_vmRunners/FullXapi.js')
|
|
||||||
|
|
||||||
const DEFAULT_XAPI_VM_SETTINGS = {
|
const DEFAULT_XAPI_VM_SETTINGS = {
|
||||||
bypassVdiChainsCheck: false,
|
bypassVdiChainsCheck: false,
|
||||||
@@ -36,7 +34,7 @@ const DEFAULT_XAPI_VM_SETTINGS = {
|
|||||||
vmTimeout: 0,
|
vmTimeout: 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||||
_computeBaseSettings(config, job) {
|
_computeBaseSettings(config, job) {
|
||||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||||
Object.assign(baseSettings, DEFAULT_XAPI_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
Object.assign(baseSettings, DEFAULT_XAPI_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||||
@@ -1,17 +1,15 @@
|
|||||||
'use strict'
|
import Disposable from 'promise-toolbox/Disposable'
|
||||||
|
import pTimeout from 'promise-toolbox/timeout'
|
||||||
|
import { compileTemplate } from '@xen-orchestra/template'
|
||||||
|
import { runTask } from './_runTask.mjs'
|
||||||
|
import { RemoteTimeoutError } from './_RemoteTimeoutError.mjs'
|
||||||
|
|
||||||
const Disposable = require('promise-toolbox/Disposable')
|
export const DEFAULT_SETTINGS = {
|
||||||
const pTimeout = require('promise-toolbox/timeout')
|
|
||||||
const { compileTemplate } = require('@xen-orchestra/template')
|
|
||||||
const { runTask } = require('./_runTask.js')
|
|
||||||
const { RemoteTimeoutError } = require('./_RemoteTimeoutError.js')
|
|
||||||
|
|
||||||
exports.DEFAULT_SETTINGS = {
|
|
||||||
getRemoteTimeout: 300e3,
|
getRemoteTimeout: 300e3,
|
||||||
reportWhen: 'failure',
|
reportWhen: 'failure',
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.Abstract = class AbstractRunner {
|
export const Abstract = class AbstractRunner {
|
||||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||||
this._config = config
|
this._config = config
|
||||||
this._getRecord = getConnectedRecord
|
this._getRecord = getConnectedRecord
|
||||||
@@ -1,16 +1,13 @@
|
|||||||
'use strict'
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
import { DIR_XO_POOL_METADATA_BACKUPS } from '../RemoteAdapter.mjs'
|
||||||
|
import { forkStreamUnpipe } from './_forkStreamUnpipe.mjs'
|
||||||
|
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||||
|
import { Task } from '../Task.mjs'
|
||||||
|
|
||||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('../RemoteAdapter.js')
|
export const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
|
||||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
|
||||||
const { Task } = require('../Task.js')
|
|
||||||
|
|
||||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
export class PoolMetadataBackup {
|
||||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
|
||||||
|
|
||||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
|
||||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||||
this._config = config
|
this._config = config
|
||||||
this._job = job
|
this._job = job
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
'use strict'
|
export class RemoteTimeoutError extends Error {
|
||||||
class RemoteTimeoutError extends Error {
|
|
||||||
constructor(remoteId) {
|
constructor(remoteId) {
|
||||||
super('timeout while getting the remote ' + remoteId)
|
super('timeout while getting the remote ' + remoteId)
|
||||||
this.remoteId = remoteId
|
this.remoteId = remoteId
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exports.RemoteTimeoutError = RemoteTimeoutError
|
|
||||||
@@ -1,13 +1,11 @@
|
|||||||
'use strict'
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import { join } from '@xen-orchestra/fs/path'
|
||||||
|
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
import { DIR_XO_CONFIG_BACKUPS } from '../RemoteAdapter.mjs'
|
||||||
const { join } = require('@xen-orchestra/fs/path')
|
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||||
|
import { Task } from '../Task.mjs'
|
||||||
|
|
||||||
const { DIR_XO_CONFIG_BACKUPS } = require('../RemoteAdapter.js')
|
export class XoMetadataBackup {
|
||||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
|
||||||
const { Task } = require('../Task.js')
|
|
||||||
|
|
||||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
|
||||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||||
this._config = config
|
this._config = config
|
||||||
this._job = job
|
this._job = job
|
||||||
@@ -24,7 +22,13 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
|||||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||||
|
|
||||||
const data = job.xoMetadata
|
const data = job.xoMetadata
|
||||||
const dataBaseName = './data.json'
|
let dataBaseName = './data'
|
||||||
|
|
||||||
|
// JSON data is sent as plain string, binary data is sent as an object with `data` and `encoding properties
|
||||||
|
const isJson = typeof data === 'string'
|
||||||
|
if (isJson) {
|
||||||
|
dataBaseName += '.json'
|
||||||
|
}
|
||||||
|
|
||||||
const metadata = JSON.stringify(
|
const metadata = JSON.stringify(
|
||||||
{
|
{
|
||||||
@@ -56,7 +60,7 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
|||||||
async () => {
|
async () => {
|
||||||
const handler = adapter.handler
|
const handler = adapter.handler
|
||||||
const dirMode = this._config.dirMode
|
const dirMode = this._config.dirMode
|
||||||
await handler.outputFile(dataFileName, data, { dirMode })
|
await handler.outputFile(dataFileName, isJson ? data : Buffer.from(data.data, data.encoding), { dirMode })
|
||||||
await handler.outputFile(metaDataFileName, metadata, {
|
await handler.outputFile(metaDataFileName, metadata, {
|
||||||
dirMode,
|
dirMode,
|
||||||
})
|
})
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
'use strict'
|
import { pipeline } from 'node:stream'
|
||||||
|
import { ThrottleGroup } from '@kldzj/stream-throttle'
|
||||||
const { pipeline } = require('node:stream')
|
import identity from 'lodash/identity.js'
|
||||||
const { ThrottleGroup } = require('@kldzj/stream-throttle')
|
|
||||||
const identity = require('lodash/identity.js')
|
|
||||||
|
|
||||||
const noop = Function.prototype
|
const noop = Function.prototype
|
||||||
|
|
||||||
module.exports = function createStreamThrottle(rate) {
|
export default function createStreamThrottle(rate) {
|
||||||
if (rate === 0) {
|
if (rate === 0) {
|
||||||
return identity
|
return identity
|
||||||
}
|
}
|
||||||
@@ -1,14 +1,13 @@
|
|||||||
'use strict'
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
import { finished, PassThrough } from 'node:stream'
|
||||||
|
|
||||||
const { finished, PassThrough } = require('node:stream')
|
const { debug } = createLogger('xo:backups:forkStreamUnpipe')
|
||||||
|
|
||||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
|
||||||
|
|
||||||
// create a new readable stream from an existing one which may be piped later
|
// create a new readable stream from an existing one which may be piped later
|
||||||
//
|
//
|
||||||
// in case of error in the new readable stream, it will simply be unpiped
|
// in case of error in the new readable stream, it will simply be unpiped
|
||||||
// from the original one
|
// from the original one
|
||||||
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
|
export function forkStreamUnpipe(source) {
|
||||||
const { forks = 0 } = source
|
const { forks = 0 } = source
|
||||||
source.forks = forks + 1
|
source.forks = forks + 1
|
||||||
|
|
||||||
@@ -1,9 +1,7 @@
|
|||||||
'use strict'
|
export function getAdaptersByRemote(adapters) {
|
||||||
const getAdaptersByRemote = adapters => {
|
|
||||||
const adaptersByRemote = {}
|
const adaptersByRemote = {}
|
||||||
adapters.forEach(({ adapter, remoteId }) => {
|
adapters.forEach(({ adapter, remoteId }) => {
|
||||||
adaptersByRemote[remoteId] = adapter
|
adaptersByRemote[remoteId] = adapter
|
||||||
})
|
})
|
||||||
return adaptersByRemote
|
return adaptersByRemote
|
||||||
}
|
}
|
||||||
exports.getAdaptersByRemote = getAdaptersByRemote
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
const { Task } = require('../Task.js')
|
|
||||||
const noop = Function.prototype
|
|
||||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
|
||||||
|
|
||||||
exports.runTask = runTask
|
|
||||||
5
@xen-orchestra/backups/_runners/_runTask.mjs
Normal file
5
@xen-orchestra/backups/_runners/_runTask.mjs
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
import { Task } from '../Task.mjs'
|
||||||
|
|
||||||
|
const noop = Function.prototype
|
||||||
|
|
||||||
|
export const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||||
@@ -1,14 +1,12 @@
|
|||||||
'use strict'
|
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||||
|
import { defer } from 'golike-defer'
|
||||||
|
import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||||
|
import { FullRemoteWriter } from '../_writers/FullRemoteWriter.mjs'
|
||||||
|
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||||
|
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||||
|
import { Task } from '../../Task.mjs'
|
||||||
|
|
||||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
export const FullRemote = class FullRemoteVmBackupRunner extends AbstractRemote {
|
||||||
const { defer } = require('golike-defer')
|
|
||||||
const { AbstractRemote } = require('./_AbstractRemote')
|
|
||||||
const { FullRemoteWriter } = require('../_writers/FullRemoteWriter')
|
|
||||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe')
|
|
||||||
const { watchStreamSize } = require('../../_watchStreamSize')
|
|
||||||
const { Task } = require('../../Task')
|
|
||||||
|
|
||||||
class FullRemoteVmBackupRunner extends AbstractRemote {
|
|
||||||
_getRemoteWriter() {
|
_getRemoteWriter() {
|
||||||
return FullRemoteWriter
|
return FullRemoteWriter
|
||||||
}
|
}
|
||||||
@@ -31,6 +29,8 @@ class FullRemoteVmBackupRunner extends AbstractRemote {
|
|||||||
writer =>
|
writer =>
|
||||||
writer.run({
|
writer.run({
|
||||||
stream: forkStreamUnpipe(stream),
|
stream: forkStreamUnpipe(stream),
|
||||||
|
// stream will be forked and transformed, it's not safe to attach additionnal properties to it
|
||||||
|
streamLength: stream.length,
|
||||||
timestamp: metadata.timestamp,
|
timestamp: metadata.timestamp,
|
||||||
vm: metadata.vm,
|
vm: metadata.vm,
|
||||||
vmSnapshot: metadata.vmSnapshot,
|
vmSnapshot: metadata.vmSnapshot,
|
||||||
@@ -47,7 +47,6 @@ class FullRemoteVmBackupRunner extends AbstractRemote {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.FullRemote = FullRemoteVmBackupRunner
|
decorateMethodsWith(FullRemote, {
|
||||||
decorateMethodsWith(FullRemoteVmBackupRunner, {
|
|
||||||
_run: defer,
|
_run: defer,
|
||||||
})
|
})
|
||||||
@@ -1,16 +1,14 @@
|
|||||||
'use strict'
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
|
||||||
const { createLogger } = require('@xen-orchestra/log')
|
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||||
|
import { FullRemoteWriter } from '../_writers/FullRemoteWriter.mjs'
|
||||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe.js')
|
import { FullXapiWriter } from '../_writers/FullXapiWriter.mjs'
|
||||||
const { FullRemoteWriter } = require('../_writers/FullRemoteWriter.js')
|
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||||
const { FullXapiWriter } = require('../_writers/FullXapiWriter.js')
|
import { AbstractXapi } from './_AbstractXapi.mjs'
|
||||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
|
||||||
const { AbstractXapi } = require('./_AbstractXapi.js')
|
|
||||||
|
|
||||||
const { debug } = createLogger('xo:backups:FullXapiVmBackup')
|
const { debug } = createLogger('xo:backups:FullXapiVmBackup')
|
||||||
|
|
||||||
exports.FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
export const FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
||||||
_getWriters() {
|
_getWriters() {
|
||||||
return [FullRemoteWriter, FullXapiWriter]
|
return [FullRemoteWriter, FullXapiWriter]
|
||||||
}
|
}
|
||||||
@@ -37,13 +35,25 @@ exports.FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
|||||||
useSnapshot: false,
|
useSnapshot: false,
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const vdis = await exportedVm.$getDisks()
|
||||||
|
let maxStreamLength = 1024 * 1024 // Ovf file and tar headers are a few KB, let's stay safe
|
||||||
|
for (const vdiRef of vdis) {
|
||||||
|
const vdi = await this._xapi.getRecord('VDI', vdiRef)
|
||||||
|
|
||||||
|
// the size a of fully allocated vdi will be virtual_size exaclty, it's a gross over evaluation
|
||||||
|
// of the real stream size in general, since a disk is never completly full
|
||||||
|
// vdi.physical_size seems to underevaluate a lot the real disk usage of a VDI, as of 2023-10-30
|
||||||
|
maxStreamLength += vdi.virtual_size
|
||||||
|
}
|
||||||
|
|
||||||
const sizeContainer = watchStreamSize(stream)
|
const sizeContainer = watchStreamSize(stream)
|
||||||
|
|
||||||
const timestamp = Date.now()
|
const timestamp = Date.now()
|
||||||
|
|
||||||
await this._callWriters(
|
await this._callWriters(
|
||||||
writer =>
|
writer =>
|
||||||
writer.run({
|
writer.run({
|
||||||
|
maxStreamLength,
|
||||||
sizeContainer,
|
sizeContainer,
|
||||||
stream: forkStreamUnpipe(stream),
|
stream: forkStreamUnpipe(stream),
|
||||||
timestamp,
|
timestamp,
|
||||||
@@ -1,15 +1,14 @@
|
|||||||
'use strict'
|
import { asyncEach } from '@vates/async-each'
|
||||||
const assert = require('node:assert')
|
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||||
|
import { defer } from 'golike-defer'
|
||||||
|
import assert from 'node:assert'
|
||||||
|
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||||
|
import mapValues from 'lodash/mapValues.js'
|
||||||
|
|
||||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
import { AbstractRemote } from './_AbstractRemote.mjs'
|
||||||
const { defer } = require('golike-defer')
|
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||||
const { mapValues } = require('lodash')
|
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||||
const { Task } = require('../../Task')
|
import { Task } from '../../Task.mjs'
|
||||||
const { AbstractRemote } = require('./_AbstractRemote')
|
|
||||||
const { IncrementalRemoteWriter } = require('../_writers/IncrementalRemoteWriter')
|
|
||||||
const { forkDeltaExport } = require('./_forkDeltaExport')
|
|
||||||
const isVhdDifferencingDisk = require('vhd-lib/isVhdDifferencingDisk')
|
|
||||||
const { asyncEach } = require('@vates/async-each')
|
|
||||||
|
|
||||||
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||||
_getRemoteWriter() {
|
_getRemoteWriter() {
|
||||||
@@ -33,10 +32,10 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
|||||||
useChain: false,
|
useChain: false,
|
||||||
})
|
})
|
||||||
|
|
||||||
const differentialVhds = {}
|
const isVhdDifferencing = {}
|
||||||
|
|
||||||
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
|
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
|
||||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
|
||||||
})
|
})
|
||||||
|
|
||||||
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
||||||
@@ -44,7 +43,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
|||||||
writer =>
|
writer =>
|
||||||
writer.transfer({
|
writer.transfer({
|
||||||
deltaExport: forkDeltaExport(incrementalExport),
|
deltaExport: forkDeltaExport(incrementalExport),
|
||||||
differentialVhds,
|
isVhdDifferencing,
|
||||||
timestamp: metadata.timestamp,
|
timestamp: metadata.timestamp,
|
||||||
vm: metadata.vm,
|
vm: metadata.vm,
|
||||||
vmSnapshot: metadata.vmSnapshot,
|
vmSnapshot: metadata.vmSnapshot,
|
||||||
@@ -61,7 +60,7 @@ class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.IncrementalRemote = IncrementalRemoteVmBackupRunner
|
export const IncrementalRemote = IncrementalRemoteVmBackupRunner
|
||||||
decorateMethodsWith(IncrementalRemoteVmBackupRunner, {
|
decorateMethodsWith(IncrementalRemoteVmBackupRunner, {
|
||||||
_run: defer,
|
_run: defer,
|
||||||
})
|
})
|
||||||
@@ -1,28 +1,26 @@
|
|||||||
'use strict'
|
import { asyncEach } from '@vates/async-each'
|
||||||
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
import { pipeline } from 'node:stream'
|
||||||
|
import findLast from 'lodash/findLast.js'
|
||||||
|
import isVhdDifferencingDisk from 'vhd-lib/isVhdDifferencingDisk.js'
|
||||||
|
import keyBy from 'lodash/keyBy.js'
|
||||||
|
import mapValues from 'lodash/mapValues.js'
|
||||||
|
import vhdStreamValidator from 'vhd-lib/vhdStreamValidator.js'
|
||||||
|
|
||||||
const findLast = require('lodash/findLast.js')
|
import { AbstractXapi } from './_AbstractXapi.mjs'
|
||||||
const keyBy = require('lodash/keyBy.js')
|
import { exportIncrementalVm } from '../../_incrementalVm.mjs'
|
||||||
const mapValues = require('lodash/mapValues.js')
|
import { forkDeltaExport } from './_forkDeltaExport.mjs'
|
||||||
const vhdStreamValidator = require('vhd-lib/vhdStreamValidator.js')
|
import { IncrementalRemoteWriter } from '../_writers/IncrementalRemoteWriter.mjs'
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
import { IncrementalXapiWriter } from '../_writers/IncrementalXapiWriter.mjs'
|
||||||
const { createLogger } = require('@xen-orchestra/log')
|
import { Task } from '../../Task.mjs'
|
||||||
const { pipeline } = require('node:stream')
|
import { watchStreamSize } from '../../_watchStreamSize.mjs'
|
||||||
|
|
||||||
const { IncrementalRemoteWriter } = require('../_writers/IncrementalRemoteWriter.js')
|
|
||||||
const { IncrementalXapiWriter } = require('../_writers/IncrementalXapiWriter.js')
|
|
||||||
const { exportIncrementalVm } = require('../../_incrementalVm.js')
|
|
||||||
const { Task } = require('../../Task.js')
|
|
||||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
|
||||||
const { AbstractXapi } = require('./_AbstractXapi.js')
|
|
||||||
const { forkDeltaExport } = require('./_forkDeltaExport.js')
|
|
||||||
const isVhdDifferencingDisk = require('vhd-lib/isVhdDifferencingDisk')
|
|
||||||
const { asyncEach } = require('@vates/async-each')
|
|
||||||
|
|
||||||
const { debug } = createLogger('xo:backups:IncrementalXapiVmBackup')
|
const { debug } = createLogger('xo:backups:IncrementalXapiVmBackup')
|
||||||
|
|
||||||
const noop = Function.prototype
|
const noop = Function.prototype
|
||||||
|
|
||||||
exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXapi {
|
export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXapi {
|
||||||
_getWriters() {
|
_getWriters() {
|
||||||
return [IncrementalRemoteWriter, IncrementalXapiWriter]
|
return [IncrementalRemoteWriter, IncrementalXapiWriter]
|
||||||
}
|
}
|
||||||
@@ -43,6 +41,8 @@ exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXa
|
|||||||
|
|
||||||
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
|
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
|
||||||
fullVdisRequired,
|
fullVdisRequired,
|
||||||
|
nbdConcurrency: this._settings.nbdConcurrency,
|
||||||
|
preferNbd: this._settings.preferNbd,
|
||||||
})
|
})
|
||||||
// since NBD is network based, if one disk use nbd , all the disk use them
|
// since NBD is network based, if one disk use nbd , all the disk use them
|
||||||
// except the suspended VDI
|
// except the suspended VDI
|
||||||
@@ -50,11 +50,11 @@ exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXa
|
|||||||
Task.info('Transfer data using NBD')
|
Task.info('Transfer data using NBD')
|
||||||
}
|
}
|
||||||
|
|
||||||
const differentialVhds = {}
|
const isVhdDifferencing = {}
|
||||||
// since isVhdDifferencingDisk is reading and unshifting data in stream
|
// since isVhdDifferencingDisk is reading and unshifting data in stream
|
||||||
// it should be done BEFORE any other stream transform
|
// it should be done BEFORE any other stream transform
|
||||||
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
|
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
|
||||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
isVhdDifferencing[key] = await isVhdDifferencingDisk(stream)
|
||||||
})
|
})
|
||||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXa
|
|||||||
writer =>
|
writer =>
|
||||||
writer.transfer({
|
writer.transfer({
|
||||||
deltaExport: forkDeltaExport(deltaExport),
|
deltaExport: forkDeltaExport(deltaExport),
|
||||||
differentialVhds,
|
isVhdDifferencing,
|
||||||
sizeContainers,
|
sizeContainers,
|
||||||
timestamp,
|
timestamp,
|
||||||
vm,
|
vm,
|
||||||
@@ -1,8 +1,6 @@
|
|||||||
'use strict'
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
import { Task } from '../../Task.mjs'
|
||||||
const { createLogger } = require('@xen-orchestra/log')
|
|
||||||
const { Task } = require('../../Task.js')
|
|
||||||
|
|
||||||
const { debug, warn } = createLogger('xo:backups:AbstractVmRunner')
|
const { debug, warn } = createLogger('xo:backups:AbstractVmRunner')
|
||||||
|
|
||||||
@@ -19,7 +17,7 @@ const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
exports.Abstract = class AbstractVmBackupRunner {
|
export const Abstract = class AbstractVmBackupRunner {
|
||||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||||
async _callWriters(fn, step, parallel = true) {
|
async _callWriters(fn, step, parallel = true) {
|
||||||
const writers = this._writers
|
const writers = this._writers
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
'use strict'
|
import { asyncEach } from '@vates/async-each'
|
||||||
const { Abstract } = require('./_Abstract')
|
import { Disposable } from 'promise-toolbox'
|
||||||
|
|
||||||
const { getVmBackupDir } = require('../../_getVmBackupDir')
|
import { getVmBackupDir } from '../../_getVmBackupDir.mjs'
|
||||||
const { asyncEach } = require('@vates/async-each')
|
|
||||||
const { Disposable } = require('promise-toolbox')
|
|
||||||
|
|
||||||
exports.AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
|
import { Abstract } from './_Abstract.mjs'
|
||||||
|
import { extractIdsFromSimplePattern } from '../../extractIdsFromSimplePattern.mjs'
|
||||||
|
|
||||||
|
export const AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
|
||||||
constructor({
|
constructor({
|
||||||
config,
|
config,
|
||||||
job,
|
job,
|
||||||
@@ -34,7 +35,8 @@ exports.AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
|
|||||||
this._writers = writers
|
this._writers = writers
|
||||||
|
|
||||||
const RemoteWriter = this._getRemoteWriter()
|
const RemoteWriter = this._getRemoteWriter()
|
||||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
extractIdsFromSimplePattern(job.remotes).forEach(remoteId => {
|
||||||
|
const adapter = remoteAdapters[remoteId]
|
||||||
const targetSettings = {
|
const targetSettings = {
|
||||||
...settings,
|
...settings,
|
||||||
...allSettings[remoteId],
|
...allSettings[remoteId],
|
||||||
@@ -1,18 +1,16 @@
|
|||||||
'use strict'
|
import assert from 'node:assert'
|
||||||
|
import groupBy from 'lodash/groupBy.js'
|
||||||
|
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||||
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||||
|
import { defer } from 'golike-defer'
|
||||||
|
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||||
|
|
||||||
const assert = require('assert')
|
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||||
const groupBy = require('lodash/groupBy.js')
|
import { Task } from '../../Task.mjs'
|
||||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
import { Abstract } from './_Abstract.mjs'
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
|
||||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
|
||||||
const { defer } = require('golike-defer')
|
|
||||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
|
||||||
|
|
||||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
export const AbstractXapi = class AbstractXapiVmBackupRunner extends Abstract {
|
||||||
const { Task } = require('../../Task.js')
|
|
||||||
const { Abstract } = require('./_Abstract.js')
|
|
||||||
|
|
||||||
class AbstractXapiVmBackupRunner extends Abstract {
|
|
||||||
constructor({
|
constructor({
|
||||||
config,
|
config,
|
||||||
getSnapshotNameLabel,
|
getSnapshotNameLabel,
|
||||||
@@ -33,6 +31,11 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
|||||||
throw new Error('cannot backup a VM created by this very job')
|
throw new Error('cannot backup a VM created by this very job')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const currentOperations = Object.values(vm.current_operations)
|
||||||
|
if (currentOperations.some(_ => _ === 'migrate_send' || _ === 'pool_migrate')) {
|
||||||
|
throw new Error('cannot backup a VM currently being migrated')
|
||||||
|
}
|
||||||
|
|
||||||
this.config = config
|
this.config = config
|
||||||
this.job = job
|
this.job = job
|
||||||
this.remoteAdapters = remoteAdapters
|
this.remoteAdapters = remoteAdapters
|
||||||
@@ -258,7 +261,15 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (this._writers.size !== 0) {
|
if (this._writers.size !== 0) {
|
||||||
await this._copy()
|
const { pool_migrate = null, migrate_send = null } = this._exportedVm.blocked_operations
|
||||||
|
|
||||||
|
const reason = 'VM migration is blocked during backup'
|
||||||
|
await this._exportedVm.update_blocked_operations({ pool_migrate: reason, migrate_send: reason })
|
||||||
|
try {
|
||||||
|
await this._copy()
|
||||||
|
} finally {
|
||||||
|
await this._exportedVm.update_blocked_operations({ pool_migrate, migrate_send })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (startAfter) {
|
if (startAfter) {
|
||||||
@@ -271,8 +282,7 @@ class AbstractXapiVmBackupRunner extends Abstract {
|
|||||||
await this._healthCheck()
|
await this._healthCheck()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exports.AbstractXapi = AbstractXapiVmBackupRunner
|
|
||||||
|
|
||||||
decorateMethodsWith(AbstractXapiVmBackupRunner, {
|
decorateMethodsWith(AbstractXapi, {
|
||||||
run: defer,
|
run: defer,
|
||||||
})
|
})
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
'use strict'
|
|
||||||
|
|
||||||
const { mapValues } = require('lodash')
|
|
||||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe')
|
|
||||||
|
|
||||||
exports.forkDeltaExport = function forkDeltaExport(deltaExport) {
|
|
||||||
return Object.create(deltaExport, {
|
|
||||||
streams: {
|
|
||||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
import cloneDeep from 'lodash/cloneDeep.js'
|
||||||
|
import mapValues from 'lodash/mapValues.js'
|
||||||
|
|
||||||
|
import { forkStreamUnpipe } from '../_forkStreamUnpipe.mjs'
|
||||||
|
|
||||||
|
export function forkDeltaExport(deltaExport) {
|
||||||
|
const { streams, ...rest } = deltaExport
|
||||||
|
const newMetadata = cloneDeep(rest)
|
||||||
|
newMetadata.streams = mapValues(streams, forkStreamUnpipe)
|
||||||
|
return newMetadata
|
||||||
|
}
|
||||||
@@ -1,13 +1,11 @@
|
|||||||
'use strict'
|
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||||
|
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||||
|
import { Task } from '../../Task.mjs'
|
||||||
|
|
||||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
|
||||||
const { Task } = require('../../Task.js')
|
|
||||||
|
|
||||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
export class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
|
||||||
|
|
||||||
exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
|
||||||
constructor(props) {
|
constructor(props) {
|
||||||
super(props)
|
super(props)
|
||||||
|
|
||||||
@@ -26,7 +24,7 @@ exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(Abst
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
async _run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
|
async _run({ maxStreamLength, timestamp, sizeContainer, stream, streamLength, vm, vmSnapshot }) {
|
||||||
const settings = this._settings
|
const settings = this._settings
|
||||||
const job = this._job
|
const job = this._job
|
||||||
const scheduleId = this._scheduleId
|
const scheduleId = this._scheduleId
|
||||||
@@ -67,6 +65,8 @@ exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(Abst
|
|||||||
|
|
||||||
await Task.run({ name: 'transfer' }, async () => {
|
await Task.run({ name: 'transfer' }, async () => {
|
||||||
await adapter.outputStream(dataFilename, stream, {
|
await adapter.outputStream(dataFilename, stream, {
|
||||||
|
maxStreamLength,
|
||||||
|
streamLength,
|
||||||
validator: tmpPath => adapter.isValidXva(tmpPath),
|
validator: tmpPath => adapter.isValidXva(tmpPath),
|
||||||
})
|
})
|
||||||
return { size: sizeContainer.size }
|
return { size: sizeContainer.size }
|
||||||
@@ -1,18 +1,16 @@
|
|||||||
'use strict'
|
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||||
|
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||||
|
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||||
|
|
||||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
import { Task } from '../../Task.mjs'
|
||||||
|
|
||||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
import { AbstractFullWriter } from './_AbstractFullWriter.mjs'
|
||||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
|
||||||
const { Task } = require('../../Task.js')
|
import { listReplicatedVms } from './_listReplicatedVms.mjs'
|
||||||
|
|
||||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
export class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
||||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
|
||||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
|
||||||
|
|
||||||
exports.FullXapiWriter = class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
|
||||||
constructor(props) {
|
constructor(props) {
|
||||||
super(props)
|
super(props)
|
||||||
|
|
||||||
@@ -1,29 +1,28 @@
|
|||||||
'use strict'
|
import assert from 'node:assert'
|
||||||
|
import mapValues from 'lodash/mapValues.js'
|
||||||
|
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||||
|
import { asyncEach } from '@vates/async-each'
|
||||||
|
import { asyncMap } from '@xen-orchestra/async-map'
|
||||||
|
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
|
||||||
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
|
import { decorateClass } from '@vates/decorate-with'
|
||||||
|
import { defer } from 'golike-defer'
|
||||||
|
import { dirname } from 'node:path'
|
||||||
|
|
||||||
const assert = require('assert')
|
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||||
const mapValues = require('lodash/mapValues.js')
|
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
import { TAG_BASE_DELTA } from '../../_incrementalVm.mjs'
|
||||||
const { asyncEach } = require('@vates/async-each')
|
import { Task } from '../../Task.mjs'
|
||||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
|
||||||
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
|
|
||||||
const { createLogger } = require('@xen-orchestra/log')
|
|
||||||
const { decorateClass } = require('@vates/decorate-with')
|
|
||||||
const { defer } = require('golike-defer')
|
|
||||||
const { dirname } = require('path')
|
|
||||||
|
|
||||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
import { MixinRemoteWriter } from './_MixinRemoteWriter.mjs'
|
||||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
|
||||||
const { Task } = require('../../Task.js')
|
import { checkVhd } from './_checkVhd.mjs'
|
||||||
|
import { packUuid } from './_packUuid.mjs'
|
||||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
import { Disposable } from 'promise-toolbox'
|
||||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
|
||||||
const { checkVhd } = require('./_checkVhd.js')
|
|
||||||
const { packUuid } = require('./_packUuid.js')
|
|
||||||
const { Disposable } = require('promise-toolbox')
|
|
||||||
|
|
||||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||||
|
|
||||||
class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||||
const { handler } = this._adapter
|
const { handler } = this._adapter
|
||||||
const adapter = this._adapter
|
const adapter = this._adapter
|
||||||
@@ -134,7 +133,7 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async _transfer($defer, { differentialVhds, timestamp, deltaExport, vm, vmSnapshot }) {
|
async _transfer($defer, { isVhdDifferencing, timestamp, deltaExport, vm, vmSnapshot }) {
|
||||||
const adapter = this._adapter
|
const adapter = this._adapter
|
||||||
const job = this._job
|
const job = this._job
|
||||||
const scheduleId = this._scheduleId
|
const scheduleId = this._scheduleId
|
||||||
@@ -162,6 +161,7 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
|||||||
)
|
)
|
||||||
|
|
||||||
metadataContent = {
|
metadataContent = {
|
||||||
|
isVhdDifferencing,
|
||||||
jobId,
|
jobId,
|
||||||
mode: job.mode,
|
mode: job.mode,
|
||||||
scheduleId,
|
scheduleId,
|
||||||
@@ -181,9 +181,9 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
|||||||
async ([id, vdi]) => {
|
async ([id, vdi]) => {
|
||||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||||
|
|
||||||
const isDelta = differentialVhds[`${id}.vhd`]
|
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||||
let parentPath
|
let parentPath
|
||||||
if (isDelta) {
|
if (isDifferencing) {
|
||||||
const vdiDir = dirname(path)
|
const vdiDir = dirname(path)
|
||||||
parentPath = (
|
parentPath = (
|
||||||
await handler.list(vdiDir, {
|
await handler.list(vdiDir, {
|
||||||
@@ -197,7 +197,7 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
|||||||
assert.notStrictEqual(
|
assert.notStrictEqual(
|
||||||
parentPath,
|
parentPath,
|
||||||
undefined,
|
undefined,
|
||||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config['xo:base_delta']}`
|
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
|
||||||
)
|
)
|
||||||
|
|
||||||
parentPath = parentPath.slice(1) // remove leading slash
|
parentPath = parentPath.slice(1) // remove leading slash
|
||||||
@@ -206,15 +206,19 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
|||||||
await checkVhd(handler, parentPath)
|
await checkVhd(handler, parentPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
// don't write it as transferSize += await async function
|
||||||
|
// since i += await asyncFun lead to race condition
|
||||||
|
// as explained : https://eslint.org/docs/latest/rules/require-atomic-updates
|
||||||
|
const transferSizeOneDisk = await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
||||||
// no checksum for VHDs, because they will be invalidated by
|
// no checksum for VHDs, because they will be invalidated by
|
||||||
// merges and chainings
|
// merges and chainings
|
||||||
checksum: false,
|
checksum: false,
|
||||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||||
})
|
})
|
||||||
|
transferSize += transferSizeOneDisk
|
||||||
|
|
||||||
if (isDelta) {
|
if (isDifferencing) {
|
||||||
await chainVhd(handler, parentPath, handler, path)
|
await chainVhd(handler, parentPath, handler, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,6 +242,6 @@ class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWrite
|
|||||||
// TODO: run cleanup?
|
// TODO: run cleanup?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exports.IncrementalRemoteWriter = decorateClass(IncrementalRemoteWriter, {
|
decorateClass(IncrementalRemoteWriter, {
|
||||||
_transfer: defer,
|
_transfer: defer,
|
||||||
})
|
})
|
||||||
@@ -1,19 +1,18 @@
|
|||||||
'use strict'
|
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||||
|
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||||
|
import { formatDateTime } from '@xen-orchestra/xapi'
|
||||||
|
|
||||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
import { importIncrementalVm, TAG_BACKUP_SR, TAG_BASE_DELTA, TAG_COPY_SRC } from '../../_incrementalVm.mjs'
|
||||||
|
import { Task } from '../../Task.mjs'
|
||||||
|
|
||||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
import { AbstractIncrementalWriter } from './_AbstractIncrementalWriter.mjs'
|
||||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
import { MixinXapiWriter } from './_MixinXapiWriter.mjs'
|
||||||
const { importIncrementalVm, TAG_COPY_SRC } = require('../../_incrementalVm.js')
|
import { listReplicatedVms } from './_listReplicatedVms.mjs'
|
||||||
const { Task } = require('../../Task.js')
|
import find from 'lodash/find.js'
|
||||||
|
|
||||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
export class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
||||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
|
||||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
|
||||||
|
|
||||||
exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
|
||||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||||
const sr = this._sr
|
const sr = this._sr
|
||||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
|
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
|
||||||
@@ -83,6 +82,54 @@ exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWri
|
|||||||
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
|
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#decorateVmMetadata(backup) {
|
||||||
|
const { _warmMigration } = this._settings
|
||||||
|
const sr = this._sr
|
||||||
|
const xapi = sr.$xapi
|
||||||
|
const vm = backup.vm
|
||||||
|
vm.other_config[TAG_COPY_SRC] = vm.uuid
|
||||||
|
const remoteBaseVmUuid = vm.other_config[TAG_BASE_DELTA]
|
||||||
|
let baseVm
|
||||||
|
if (remoteBaseVmUuid) {
|
||||||
|
baseVm = find(
|
||||||
|
xapi.objects.all,
|
||||||
|
obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid && obj[TAG_BACKUP_SR] === sr.$id
|
||||||
|
)
|
||||||
|
|
||||||
|
if (!baseVm) {
|
||||||
|
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const baseVdis = {}
|
||||||
|
baseVm?.$VBDs.forEach(vbd => {
|
||||||
|
const vdi = vbd.$VDI
|
||||||
|
if (vdi !== undefined) {
|
||||||
|
baseVdis[vbd.VDI] = vbd.$VDI
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
vm.other_config[TAG_COPY_SRC] = vm.uuid
|
||||||
|
if (!_warmMigration) {
|
||||||
|
vm.tags.push('Continuous Replication')
|
||||||
|
}
|
||||||
|
|
||||||
|
Object.values(backup.vdis).forEach(vdi => {
|
||||||
|
vdi.other_config[TAG_COPY_SRC] = vdi.uuid
|
||||||
|
vdi.SR = sr.$ref
|
||||||
|
// vdi.other_config[TAG_BASE_DELTA] is never defined on a suspend vdi
|
||||||
|
if (vdi.other_config[TAG_BASE_DELTA]) {
|
||||||
|
const remoteBaseVdiUuid = vdi.other_config[TAG_BASE_DELTA]
|
||||||
|
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
|
||||||
|
if (!baseVdi) {
|
||||||
|
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
||||||
|
}
|
||||||
|
vdi.baseVdi = baseVdi
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return backup
|
||||||
|
}
|
||||||
|
|
||||||
async _transfer({ timestamp, deltaExport, sizeContainers, vm }) {
|
async _transfer({ timestamp, deltaExport, sizeContainers, vm }) {
|
||||||
const { _warmMigration } = this._settings
|
const { _warmMigration } = this._settings
|
||||||
const sr = this._sr
|
const sr = this._sr
|
||||||
@@ -93,16 +140,7 @@ exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWri
|
|||||||
|
|
||||||
let targetVmRef
|
let targetVmRef
|
||||||
await Task.run({ name: 'transfer' }, async () => {
|
await Task.run({ name: 'transfer' }, async () => {
|
||||||
targetVmRef = await importIncrementalVm(
|
targetVmRef = await importIncrementalVm(this.#decorateVmMetadata(deltaExport), sr)
|
||||||
{
|
|
||||||
__proto__: deltaExport,
|
|
||||||
vm: {
|
|
||||||
...deltaExport.vm,
|
|
||||||
tags: _warmMigration ? deltaExport.vm.tags : [...deltaExport.vm.tags, 'Continuous Replication'],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
sr
|
|
||||||
)
|
|
||||||
return {
|
return {
|
||||||
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
||||||
}
|
}
|
||||||
@@ -123,13 +161,13 @@ exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWri
|
|||||||
)
|
)
|
||||||
),
|
),
|
||||||
targetVm.update_other_config({
|
targetVm.update_other_config({
|
||||||
'xo:backup:sr': srUuid,
|
[TAG_BACKUP_SR]: srUuid,
|
||||||
|
|
||||||
// these entries need to be added in case of offline backup
|
// these entries need to be added in case of offline backup
|
||||||
'xo:backup:datetime': formatDateTime(timestamp),
|
'xo:backup:datetime': formatDateTime(timestamp),
|
||||||
'xo:backup:job': job.id,
|
'xo:backup:job': job.id,
|
||||||
'xo:backup:schedule': scheduleId,
|
'xo:backup:schedule': scheduleId,
|
||||||
'xo:backup:vm': vm.uuid,
|
[TAG_BASE_DELTA]: vm.uuid,
|
||||||
}),
|
}),
|
||||||
])
|
])
|
||||||
}
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user