Compare commits
850 Commits
feat_test_
...
xo6/update
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d2dd32e87 | ||
|
|
a0a6b73bce | ||
|
|
64bba27923 | ||
|
|
eedaca0195 | ||
|
|
9ffa52cc01 | ||
|
|
e9a23755b6 | ||
|
|
5712f29a58 | ||
|
|
509ebf900e | ||
|
|
757a8915d9 | ||
|
|
35c660dbf6 | ||
|
|
f23fd69e7e | ||
|
|
39c10a7197 | ||
|
|
7a1bc16468 | ||
|
|
93dd1a63da | ||
|
|
b4e1064914 | ||
|
|
810cdc1a77 | ||
|
|
1023131828 | ||
|
|
e2d83324ac | ||
|
|
7cea445c21 | ||
|
|
b5d9d9a9e1 | ||
|
|
3a4e9b8f8e | ||
|
|
92efd28b33 | ||
|
|
a2c36c0832 | ||
|
|
2eb49cfdf1 | ||
|
|
ba9d4d4bb5 | ||
|
|
18dea2f2fe | ||
|
|
70c51227bf | ||
|
|
e162fd835b | ||
|
|
bcdcfbf20b | ||
|
|
a6e93c895c | ||
|
|
5c4f907358 | ||
|
|
e19dbc06fe | ||
|
|
287378f9c6 | ||
|
|
83a94eefd6 | ||
|
|
92fc19e2e3 | ||
|
|
521d31ac84 | ||
|
|
2b3ccb4b0e | ||
|
|
2498a4f47c | ||
|
|
dd61feeaf3 | ||
|
|
7851f8c196 | ||
|
|
404a764821 | ||
|
|
59cc418973 | ||
|
|
bc00551cb3 | ||
|
|
4d24248ea2 | ||
|
|
5c731fd56e | ||
|
|
79abb97b1f | ||
|
|
3314ba6e08 | ||
|
|
0fe8f8cac3 | ||
|
|
aaffe8c872 | ||
|
|
cf0e820632 | ||
|
|
82fdfbd18b | ||
|
|
116f2ec47a | ||
|
|
5d4723ec06 | ||
|
|
092475d57f | ||
|
|
e7a97fd175 | ||
|
|
2262ce8814 | ||
|
|
7ec64476a4 | ||
|
|
bbb43808dd | ||
|
|
02d7f0f20b | ||
|
|
e226db708a | ||
|
|
b7af74e27e | ||
|
|
70f2e50274 | ||
|
|
619b3f3c78 | ||
|
|
ac182374d6 | ||
|
|
8461d68d40 | ||
|
|
838f9a42d1 | ||
|
|
a9dbc01b84 | ||
|
|
5daf269ab3 | ||
|
|
383677f4dd | ||
|
|
28ad2af9b0 | ||
|
|
53b25508af | ||
|
|
54836ec576 | ||
|
|
c9897166e0 | ||
|
|
1b35fddf3d | ||
|
|
fc2ca1f4d4 | ||
|
|
ed7046c1ab | ||
|
|
215579ff4d | ||
|
|
4c79a78a05 | ||
|
|
7864c05ee1 | ||
|
|
1026d18e4b | ||
|
|
d62acd3fe7 | ||
|
|
cc1b4bc06c | ||
|
|
d45f843308 | ||
|
|
3c7fa05c43 | ||
|
|
05df055552 | ||
|
|
755b206901 | ||
|
|
3360399b2a | ||
|
|
091bc04ace | ||
|
|
3ab2a8354b | ||
|
|
2d047c4fef | ||
|
|
0f1dcda7db | ||
|
|
44760668a3 | ||
|
|
a4023dbc54 | ||
|
|
3cad7e2467 | ||
|
|
ad5f37436a | ||
|
|
8c05eab720 | ||
|
|
4db605f14a | ||
|
|
06d411543a | ||
|
|
6084db22a9 | ||
|
|
2e6f7d35ef | ||
|
|
0f1f45953c | ||
|
|
89a4de5b21 | ||
|
|
32dd16114e | ||
|
|
f5a3cc0cdb | ||
|
|
5cabf9916a | ||
|
|
8e65ef7dbc | ||
|
|
0c0251082d | ||
|
|
c250cd9b89 | ||
|
|
d6abdb246b | ||
|
|
5769da3ebc | ||
|
|
4f383635ef | ||
|
|
8a7abc2e54 | ||
|
|
af1650bd14 | ||
|
|
c6fdef33c4 | ||
|
|
5f73f09f59 | ||
|
|
1b0fc62e2e | ||
|
|
aa2dc9206d | ||
|
|
2b1562da81 | ||
|
|
25e270edb4 | ||
|
|
51c11c15a8 | ||
|
|
47922dee56 | ||
|
|
3dda4dbaad | ||
|
|
901f7b3fe2 | ||
|
|
774d66512e | ||
|
|
ec1669a32e | ||
|
|
bbcd4184b0 | ||
|
|
85ec26194b | ||
|
|
f0242380ca | ||
|
|
a624330818 | ||
|
|
3892efcca2 | ||
|
|
c1c122d92c | ||
|
|
b7a66e9f73 | ||
|
|
d92d2efc78 | ||
|
|
c2cb51a470 | ||
|
|
5242affdc1 | ||
|
|
71f3be288b | ||
|
|
58769815b0 | ||
|
|
c81c23c0d0 | ||
|
|
f06f89b5b4 | ||
|
|
fa748ed9de | ||
|
|
cd753acff7 | ||
|
|
8ff861e2be | ||
|
|
95ccb2e0ae | ||
|
|
b0e5846ad1 | ||
|
|
19fd456ccf | ||
|
|
7946a7db68 | ||
|
|
6127e30574 | ||
|
|
4aad9d8e32 | ||
|
|
78d15ddf96 | ||
|
|
302f7fb85e | ||
|
|
ea19b0851f | ||
|
|
b0c37df8d7 | ||
|
|
beba6f7e8d | ||
|
|
9388b5500c | ||
|
|
bae8ad25e9 | ||
|
|
c96b29fe96 | ||
|
|
9888013aff | ||
|
|
0bbb0c289d | ||
|
|
80097ea777 | ||
|
|
be452a5d63 | ||
|
|
bcc0452646 | ||
|
|
9d9691c5a3 | ||
|
|
e56edc70d5 | ||
|
|
d7f4d0f5e0 | ||
|
|
8c24dd1732 | ||
|
|
575a423edf | ||
|
|
e311860bb5 | ||
|
|
e6289ebc16 | ||
|
|
013e20aa0f | ||
|
|
45a0a83fa4 | ||
|
|
ae518399fa | ||
|
|
d949112921 | ||
|
|
bb19afc45c | ||
|
|
7780cb176a | ||
|
|
74ff64dfb4 | ||
|
|
9be3c40ead | ||
|
|
0f00c7e393 | ||
|
|
95492f6f89 | ||
|
|
046fa7282b | ||
|
|
6cd99c39f4 | ||
|
|
48c3a65cc6 | ||
|
|
8b0b2d7c31 | ||
|
|
d8280087a4 | ||
|
|
c14261a0bc | ||
|
|
3d6defca37 | ||
|
|
d062a5175a | ||
|
|
f218874c4b | ||
|
|
b1e879ca2f | ||
|
|
c5010c2caa | ||
|
|
2c40b99d8b | ||
|
|
0d127f2b92 | ||
|
|
0464886e80 | ||
|
|
d655a3e222 | ||
|
|
579f0b91d5 | ||
|
|
72b1878254 | ||
|
|
74dd4c8db7 | ||
|
|
ef4ecce572 | ||
|
|
1becccffbc | ||
|
|
b95b1622b1 | ||
|
|
36d6e3779d | ||
|
|
b0e000328d | ||
|
|
cc080ec681 | ||
|
|
0d4cf48410 | ||
|
|
2ec164c560 | ||
|
|
d38dce9302 | ||
|
|
4c775f6b86 | ||
|
|
dd6c858737 | ||
|
|
194db8d0dd | ||
|
|
5b00cf3ecd | ||
|
|
afa957b305 | ||
|
|
ea1921625e | ||
|
|
65a154a1b3 | ||
|
|
2249c90be9 | ||
|
|
1b55d5e2b4 | ||
|
|
9f5be8029a | ||
|
|
16458b953f | ||
|
|
d63a567a1c | ||
|
|
d36e10e73f | ||
|
|
5d80a58754 | ||
|
|
77b14a316f | ||
|
|
213eb6a56a | ||
|
|
2c298ef47a | ||
|
|
b7b7af8cff | ||
|
|
5cf5d14449 | ||
|
|
e0bf9ee9d5 | ||
|
|
54808967f6 | ||
|
|
c63d38dc0f | ||
|
|
41ed5625be | ||
|
|
e66bcf2a5c | ||
|
|
c40e71ed49 | ||
|
|
439c721472 | ||
|
|
99429edf23 | ||
|
|
cec8237a47 | ||
|
|
e13d55bfa9 | ||
|
|
141c141516 | ||
|
|
7a47d23191 | ||
|
|
7a8bf671fb | ||
|
|
7f83a3e55e | ||
|
|
7f8ab07692 | ||
|
|
2634008a6a | ||
|
|
4c652a457f | ||
|
|
89dc40a1c5 | ||
|
|
04a7982801 | ||
|
|
df9b59f980 | ||
|
|
fe215a53af | ||
|
|
0559c843c4 | ||
|
|
79967e0eec | ||
|
|
847ad63c09 | ||
|
|
fc1357db93 | ||
|
|
b644cbe28d | ||
|
|
7ddfb2a684 | ||
|
|
5a0cfd86c7 | ||
|
|
70e3ba17af | ||
|
|
4784bbfb99 | ||
|
|
ceddddd7f2 | ||
|
|
32afd5c463 | ||
|
|
ac391f6a0f | ||
|
|
a0b50b47ef | ||
|
|
e3618416bf | ||
|
|
37fd6d13db | ||
|
|
eb56666f98 | ||
|
|
b7daee81c0 | ||
|
|
bee0eb9091 | ||
|
|
59a9a63971 | ||
|
|
a2e8b999da | ||
|
|
489ad51b4d | ||
|
|
7db2516a38 | ||
|
|
1141ef524f | ||
|
|
f449258ed3 | ||
|
|
bb3b83c690 | ||
|
|
2b973275c0 | ||
|
|
037e1c1dfa | ||
|
|
f0da94081b | ||
|
|
cd44a6e28c | ||
|
|
70b09839c7 | ||
|
|
12140143d2 | ||
|
|
e68236c9f2 | ||
|
|
8a1a0d76f7 | ||
|
|
4a5bc5dccc | ||
|
|
0ccdfbd6f4 | ||
|
|
75af7668b5 | ||
|
|
0b454fa670 | ||
|
|
2dcb5cb7cd | ||
|
|
a5aeeceb7f | ||
|
|
b2f2c3cbc4 | ||
|
|
0f7ac004ad | ||
|
|
7faa82a9c8 | ||
|
|
4b3f60b280 | ||
|
|
b29d5ba95c | ||
|
|
408fc5af84 | ||
|
|
2748aea4e9 | ||
|
|
a5acc7d267 | ||
|
|
87a9fbe237 | ||
|
|
9d0b7242f0 | ||
|
|
20ec44c3b3 | ||
|
|
6f68456bae | ||
|
|
b856c1a6b4 | ||
|
|
61e1f83a9f | ||
|
|
5820e19731 | ||
|
|
cdb51f8fe3 | ||
|
|
57940e0a52 | ||
|
|
6cc95efe51 | ||
|
|
b0ff2342ab | ||
|
|
0f67692be4 | ||
|
|
865461bfb9 | ||
|
|
e108cb0990 | ||
|
|
c4535c6bae | ||
|
|
ad8eaaa771 | ||
|
|
9419cade3d | ||
|
|
272e6422bd | ||
|
|
547908a8f9 | ||
|
|
8abfaa0bd5 | ||
|
|
a9fbcf3962 | ||
|
|
887b49ebbf | ||
|
|
858ecbc217 | ||
|
|
ffd523679d | ||
|
|
bd9db437f1 | ||
|
|
0365bacfbb | ||
|
|
f3e0227c55 | ||
|
|
4504141cbf | ||
|
|
ecbbf878d0 | ||
|
|
c1faaa3107 | ||
|
|
59f04b4a6b | ||
|
|
781b070e74 | ||
|
|
1911386aba | ||
|
|
5b0339315f | ||
|
|
5fe53dfa99 | ||
|
|
06068cdcc6 | ||
|
|
c88cc2b020 | ||
|
|
03de8ad481 | ||
|
|
08ba7e7253 | ||
|
|
9ca3f3df26 | ||
|
|
511908bb7d | ||
|
|
4351aad312 | ||
|
|
af7aa29c91 | ||
|
|
315d626055 | ||
|
|
7af0899800 | ||
|
|
46ec2dfd56 | ||
|
|
b2348474c3 | ||
|
|
836300755a | ||
|
|
55c8c8a6e9 | ||
|
|
38e32cd24c | ||
|
|
5ceacfaf5a | ||
|
|
1ee6b106b9 | ||
|
|
eaef4f22d2 | ||
|
|
96025df12f | ||
|
|
a8aac295eb | ||
|
|
83141989f0 | ||
|
|
9dea52281d | ||
|
|
2164c72034 | ||
|
|
0d0c38f3b5 | ||
|
|
e5be21a590 | ||
|
|
bc1a8be862 | ||
|
|
3df4dbaae7 | ||
|
|
8f2cfebda6 | ||
|
|
0d00c1c45f | ||
|
|
9886e06d6a | ||
|
|
478dbdfe41 | ||
|
|
2bfdb60dda | ||
|
|
cabd04470d | ||
|
|
f6819b23f9 | ||
|
|
c9dbcf1384 | ||
|
|
457fec0bc8 | ||
|
|
db99a22244 | ||
|
|
89d8adc6c6 | ||
|
|
a3ea70c61c | ||
|
|
ae0f3b4fe0 | ||
|
|
2552ef37d2 | ||
|
|
9803e8c6cb | ||
|
|
3410cbc3b9 | ||
|
|
93fce0d4bf | ||
|
|
dbdc5f3e3b | ||
|
|
581b42fa9d | ||
|
|
e07e2d3ccd | ||
|
|
ad928ec23d | ||
|
|
1d7559ded2 | ||
|
|
9099b58557 | ||
|
|
9e70397240 | ||
|
|
5f69b0e9a0 | ||
|
|
2a9bff1607 | ||
|
|
9e621d7de8 | ||
|
|
3e5c73528d | ||
|
|
397b5cd56d | ||
|
|
55cb6042e8 | ||
|
|
339d920b78 | ||
|
|
f14f716f3d | ||
|
|
fb83d1fc98 | ||
|
|
62208e7847 | ||
|
|
df91772f5c | ||
|
|
cf8a9d40be | ||
|
|
93d1c6c3fc | ||
|
|
f1fa811e5c | ||
|
|
5a9812c492 | ||
|
|
b53d613a64 | ||
|
|
225a67ae3b | ||
|
|
c7eb7db463 | ||
|
|
edfa729672 | ||
|
|
77d9798319 | ||
|
|
680f1e2f07 | ||
|
|
7c009b0fc0 | ||
|
|
eb7de4f2dd | ||
|
|
2378399981 | ||
|
|
37b2113763 | ||
|
|
5048485a85 | ||
|
|
9e667533e9 | ||
|
|
1fac7922b4 | ||
|
|
1a0e5eb6fc | ||
|
|
321e322492 | ||
|
|
8834af65f7 | ||
|
|
1a1dd0531d | ||
|
|
8752487280 | ||
|
|
4b12a6d31d | ||
|
|
2924f82754 | ||
|
|
9b236a6191 | ||
|
|
a3b8553cec | ||
|
|
00a1778a6d | ||
|
|
3b6bc629bc | ||
|
|
04dfd9a02c | ||
|
|
fb52868074 | ||
|
|
77d53d2abf | ||
|
|
6afb87def1 | ||
|
|
8bfe293414 | ||
|
|
2e634a9d1c | ||
|
|
bea771ca90 | ||
|
|
99e3622f31 | ||
|
|
a16522241e | ||
|
|
b86cb12649 | ||
|
|
2af74008b2 | ||
|
|
2e689592f1 | ||
|
|
3f8436b58b | ||
|
|
e3dd59d684 | ||
|
|
549d9b70a9 | ||
|
|
3bf6aae103 | ||
|
|
afb110c473 | ||
|
|
8727c3cf96 | ||
|
|
b13302ddeb | ||
|
|
e89ed06314 | ||
|
|
e3f57998f7 | ||
|
|
8cdb5ee31b | ||
|
|
5b734db656 | ||
|
|
e853f9d04f | ||
|
|
2a5e09719e | ||
|
|
3c0477e0da | ||
|
|
060d1c5297 | ||
|
|
55dd7bfb9c | ||
|
|
b00cf13029 | ||
|
|
73755e4ccf | ||
|
|
a1bd96da6a | ||
|
|
0e934c1413 | ||
|
|
eb69234a8e | ||
|
|
7659d9c0be | ||
|
|
2ba81d55f8 | ||
|
|
2e1abad255 | ||
|
|
c7d5b4b063 | ||
|
|
cc5f4b0996 | ||
|
|
55f627ed83 | ||
|
|
988179a3f0 | ||
|
|
ce617e0732 | ||
|
|
f0f429a473 | ||
|
|
bb6e158301 | ||
|
|
7ff304a042 | ||
|
|
7df1994d7f | ||
|
|
a3a2fda157 | ||
|
|
d8530f9518 | ||
|
|
d3062ac35c | ||
|
|
b11f11f4db | ||
|
|
79d48f3b56 | ||
|
|
869f7ffab0 | ||
|
|
6665d6a8e6 | ||
|
|
8eb0bdbda7 | ||
|
|
710689db0b | ||
|
|
801eea7e75 | ||
|
|
7885e1e6e7 | ||
|
|
d384c746ca | ||
|
|
a30d962b1d | ||
|
|
b6e078716b | ||
|
|
34b69c7ee8 | ||
|
|
70bf8d9620 | ||
|
|
c8bfda9cf5 | ||
|
|
1eb4c20844 | ||
|
|
e5c5f19219 | ||
|
|
db92f0e365 | ||
|
|
570de7c0fe | ||
|
|
90e0f26845 | ||
|
|
c714bc3518 | ||
|
|
48e0acda32 | ||
|
|
013cdbcd96 | ||
|
|
fdd886f213 | ||
|
|
de70ef3064 | ||
|
|
9142a95f79 | ||
|
|
1c6aebf997 | ||
|
|
7b9ec4b7a7 | ||
|
|
decb87f0c9 | ||
|
|
e17470f56c | ||
|
|
99ddbcdc67 | ||
|
|
6953e2fe7b | ||
|
|
beb1063ba1 | ||
|
|
7773edd590 | ||
|
|
0104649b84 | ||
|
|
1c9d1049e0 | ||
|
|
d992a4cb87 | ||
|
|
52114ad4b0 | ||
|
|
bcc62cfcaf | ||
|
|
60434b136a | ||
|
|
13f3c8851d | ||
|
|
f386f94dc2 | ||
|
|
fda1fd1a04 | ||
|
|
0b17bdd9bc | ||
|
|
2c5706a89b | ||
|
|
5448452b71 | ||
|
|
22e7c126e6 | ||
|
|
750fefe957 | ||
|
|
025e671989 | ||
|
|
df0ed5e794 | ||
|
|
da45ace7c1 | ||
|
|
2a623b8ae7 | ||
|
|
f034ec45f3 | ||
|
|
970bc0ac5d | ||
|
|
3abbc8d57e | ||
|
|
06570d78a0 | ||
|
|
6a0df7aec2 | ||
|
|
30aeb95f3a | ||
|
|
36d6d53a26 | ||
|
|
895773b6c6 | ||
|
|
8ebc0dba4f | ||
|
|
006f12f17f | ||
|
|
b22239804a | ||
|
|
afd174ca21 | ||
|
|
27c6c1b896 | ||
|
|
311b420b74 | ||
|
|
e403298140 | ||
|
|
9c7fd94a9b | ||
|
|
8cdae83150 | ||
|
|
5b1cc7415e | ||
|
|
f5d3bc1f2d | ||
|
|
ba81d0e08a | ||
|
|
3b3f927e4b | ||
|
|
5e8539865f | ||
|
|
3a3fa2882c | ||
|
|
3baa37846e | ||
|
|
999fba2030 | ||
|
|
785a5857ef | ||
|
|
067f4ac882 | ||
|
|
8a26e08102 | ||
|
|
42aa202f7a | ||
|
|
403d2c8e7b | ||
|
|
ad46bde302 | ||
|
|
1b6ec2c545 | ||
|
|
56388557cb | ||
|
|
1ddbe87d0f | ||
|
|
3081810450 | ||
|
|
155be7fd95 | ||
|
|
ef960e94d3 | ||
|
|
bfd99a48fe | ||
|
|
a13fda5fe9 | ||
|
|
66bee59774 | ||
|
|
685400bbf8 | ||
|
|
5bef8fc411 | ||
|
|
aa7ff1449a | ||
|
|
3dca7f2a71 | ||
|
|
3dc2f649f6 | ||
|
|
9eb537c2f9 | ||
|
|
dfd5f6882f | ||
|
|
7214016338 | ||
|
|
606e3c4ce5 | ||
|
|
fb04d3d25d | ||
|
|
db8c042131 | ||
|
|
fd9005fba8 | ||
|
|
2d25413b8d | ||
|
|
035679800a | ||
|
|
abd0a3035a | ||
|
|
d307730c68 | ||
|
|
1b44de4958 | ||
|
|
ec78a1ce8b | ||
|
|
19c82ab30d | ||
|
|
9986f3fb18 | ||
|
|
d24e9c093d | ||
|
|
70c8b24fac | ||
|
|
9c9c11104b | ||
|
|
cba90b27f4 | ||
|
|
46cbced570 | ||
|
|
52cf2d1514 | ||
|
|
e51351be8d | ||
|
|
2a42e0ff94 | ||
|
|
3a824a2bfc | ||
|
|
fc1c809a18 | ||
|
|
221cd40199 | ||
|
|
aca19d9a81 | ||
|
|
0601bbe18d | ||
|
|
2d52aee952 | ||
|
|
99605bf185 | ||
|
|
91b19d9bc4 | ||
|
|
562401ebe4 | ||
|
|
6fd2f2610d | ||
|
|
6ae19b0640 | ||
|
|
6b936d8a8c | ||
|
|
8f2cfaae00 | ||
|
|
5c215e1a8a | ||
|
|
e3cb98124f | ||
|
|
90c3319880 | ||
|
|
348db876d2 | ||
|
|
408fd7ec03 | ||
|
|
1fd84836b1 | ||
|
|
522204795f | ||
|
|
e29c422ac9 | ||
|
|
152cf09b7e | ||
|
|
ff728099dc | ||
|
|
706d94221d | ||
|
|
340e9af7f4 | ||
|
|
40e536ba61 | ||
|
|
fd4c56c8c2 | ||
|
|
20d04ba956 | ||
|
|
3b1bcc67ae | ||
|
|
1add3fbf9d | ||
|
|
97f0759de0 | ||
|
|
005ab47d9b | ||
|
|
14a0caa4c6 | ||
|
|
1c23bd5ff7 | ||
|
|
49c161b17a | ||
|
|
18dce3fce6 | ||
|
|
d6fc86b6bc | ||
|
|
61d960d4b1 | ||
|
|
02d3465832 | ||
|
|
4bbadc9515 | ||
|
|
78586291ca | ||
|
|
945dec94bf | ||
|
|
003140d96b | ||
|
|
363d7cf0d0 | ||
|
|
f0c94496bf | ||
|
|
de217eabd9 | ||
|
|
7c80d0c1e1 | ||
|
|
9fb749b1db | ||
|
|
ad9c59669a | ||
|
|
76a038e403 | ||
|
|
0e12072922 | ||
|
|
158a8e14a2 | ||
|
|
0c97910349 | ||
|
|
8347ac6ed8 | ||
|
|
996abd6e7e | ||
|
|
de8abd5b63 | ||
|
|
3de928c488 | ||
|
|
a2a514e483 | ||
|
|
ff432e04b0 | ||
|
|
4502590bb0 | ||
|
|
6d440a5af5 | ||
|
|
0840b4c359 | ||
|
|
696ee7dbe5 | ||
|
|
5e23e356ce | ||
|
|
c705051a89 | ||
|
|
ce2b918a29 | ||
|
|
df740b1e8e | ||
|
|
c3e0308ad0 | ||
|
|
1005e295b2 | ||
|
|
b3cf58b8c0 | ||
|
|
2652c87917 | ||
|
|
9e0b5575a4 | ||
|
|
56c089dc01 | ||
|
|
3b94da1790 | ||
|
|
ec39a8e9fe | ||
|
|
6339f971ca | ||
|
|
2978ad1486 | ||
|
|
c0d6dc48de | ||
|
|
f327422254 | ||
|
|
938d15d31b | ||
|
|
5ab1ddb9cb | ||
|
|
01302d7a60 | ||
|
|
c68630e2d6 | ||
|
|
db082bfbe9 | ||
|
|
650d88db46 | ||
|
|
7d1ecca669 | ||
|
|
5f71e629ae | ||
|
|
68205d4676 | ||
|
|
cdb466225d | ||
|
|
0e7fbd598f | ||
|
|
99147c893d | ||
|
|
c63fb6173d | ||
|
|
5932ada717 | ||
|
|
0d579748d6 | ||
|
|
8c5ee4eafe | ||
|
|
b03935ad2f | ||
|
|
38439cbc43 | ||
|
|
161c20b534 | ||
|
|
603696dad1 | ||
|
|
6b2ad5a7cc | ||
|
|
88063d4d87 | ||
|
|
8956a99745 | ||
|
|
0f0c0ec0d0 | ||
|
|
e5932e2c33 | ||
|
|
84ec8f5f3c | ||
|
|
661c5a269f | ||
|
|
5c6d7cae66 | ||
|
|
fcc73859b7 | ||
|
|
36645b0319 | ||
|
|
a62575e3cf | ||
|
|
d7af3d3c03 | ||
|
|
130ebb7d5f | ||
|
|
2af845ebd3 | ||
|
|
8e4d1701e6 | ||
|
|
4d16b6708f | ||
|
|
34ee08be25 | ||
|
|
d66a76a09e | ||
|
|
0d801c9766 | ||
|
|
b82b676fdb | ||
|
|
3494c0f64f | ||
|
|
311098adc2 | ||
|
|
58182e2083 | ||
|
|
a62ae43274 | ||
|
|
f256610e08 | ||
|
|
983d048219 | ||
|
|
3c6033f904 | ||
|
|
ef2bd2b59d | ||
|
|
04d70e9aa8 | ||
|
|
a2587ffc0a | ||
|
|
6776e7bb3d | ||
|
|
4c05064294 | ||
|
|
c135f1394f | ||
|
|
d68f4215f1 | ||
|
|
af562f3c3a | ||
|
|
7b949716bc | ||
|
|
d3e256289b | ||
|
|
3688e762b1 | ||
|
|
249f1a7af4 | ||
|
|
2de26030ff | ||
|
|
fcc76fb8d0 | ||
|
|
88d5b7095e | ||
|
|
b0e55d88de | ||
|
|
370ad3e928 | ||
|
|
07bf77d2dd | ||
|
|
a5ec65f3c0 | ||
|
|
522b318fd9 | ||
|
|
9eb2a4033f | ||
|
|
e87b0c393a | ||
|
|
1fb7e665fa | ||
|
|
7ea476d787 | ||
|
|
8260d07d61 | ||
|
|
ac0b4e6514 | ||
|
|
27b2f8cf27 | ||
|
|
27b5737f65 | ||
|
|
55b2e0292f | ||
|
|
464d83e70f | ||
|
|
614255a73a | ||
|
|
90d15e1346 | ||
|
|
b0e2ea64e9 | ||
|
|
1da05e239d | ||
|
|
fe7f0db81f | ||
|
|
983153e620 | ||
|
|
6fe791dcf2 | ||
|
|
1ad406c7dd | ||
|
|
4e032e11b1 | ||
|
|
ea34516d73 | ||
|
|
e1145f35ee | ||
|
|
6864775b8a | ||
|
|
f28721b847 | ||
|
|
2dc174fd9d | ||
|
|
07142d0410 | ||
|
|
41bb16ca30 | ||
|
|
d8f1034858 | ||
|
|
52b3c49cdb | ||
|
|
c5cb1a5e96 | ||
|
|
92d9d3232c | ||
|
|
9c4e0464f0 | ||
|
|
72d25754fd | ||
|
|
1465a0ba59 | ||
|
|
ac8ce28286 | ||
|
|
c4b06e1915 | ||
|
|
f77675a8a3 | ||
|
|
b907c1fd03 | ||
|
|
fba86bf653 | ||
|
|
b18ebcc38d | ||
|
|
4f7f18458e | ||
|
|
d412196052 | ||
|
|
1d140d8fd2 | ||
|
|
6948a25b09 | ||
|
|
26131917e3 | ||
|
|
44a0ab6d0a | ||
|
|
2b8b033ad7 | ||
|
|
3ee0b3e7df | ||
|
|
927a55ab30 | ||
|
|
b70721cb60 | ||
|
|
f71c820f15 | ||
|
|
74e0405a5e | ||
|
|
79b55ba30a | ||
|
|
ee0adaebc5 | ||
|
|
83c5c976e3 | ||
|
|
18bd2c607e | ||
|
|
e2695ce327 | ||
|
|
3f316fcaea | ||
|
|
8b7b162c76 | ||
|
|
aa36629def | ||
|
|
ca345bd6d8 | ||
|
|
61324d10f9 | ||
|
|
92fd92ae63 | ||
|
|
e48bfa2c88 | ||
|
|
cd5762fa19 | ||
|
|
71f7a6cd6c | ||
|
|
b8cade8b7a | ||
|
|
696c6f13f0 | ||
|
|
b8d923d3ba | ||
|
|
1a96c1bf0f | ||
|
|
14a01d0141 | ||
|
|
74a2a4d2e5 | ||
|
|
b13b44cfd0 | ||
|
|
50a164423a | ||
|
|
a40d50a3bd | ||
|
|
529e33140a | ||
|
|
132b1a41db | ||
|
|
75948b2977 | ||
|
|
eb84d4a7ef | ||
|
|
1816d0240e | ||
|
|
2c6d36b63e | ||
|
|
d9776ae8ed | ||
|
|
b456394663 | ||
|
|
94f599bdbd | ||
|
|
d466ca143a | ||
|
|
78ed85a49f | ||
|
|
c24e7f9ecd | ||
|
|
98caa89625 | ||
|
|
8e176eadb1 | ||
|
|
444268406f | ||
|
|
7e062977d0 | ||
|
|
f4bf56f159 | ||
|
|
9f3b020361 | ||
|
|
ef35021a44 | ||
|
|
b74ebd050a | ||
|
|
8a16d6aa3b | ||
|
|
cf7393992c | ||
|
|
c576114dad | ||
|
|
deeb399046 | ||
|
|
9cf8f8f492 | ||
|
|
28b7e99ebc | ||
|
|
0ba729e5b9 | ||
|
|
ac8c146cf7 | ||
|
|
2ba437be31 | ||
|
|
bd8bb73309 | ||
|
|
485c2f4669 | ||
|
|
6fb562d92f | ||
|
|
85efdcf7b9 | ||
|
|
fc1357d5d6 | ||
|
|
88b015bda4 | ||
|
|
b46f76cccf | ||
|
|
c3bb2185c2 | ||
|
|
a240853fe0 | ||
|
|
d7ce609940 | ||
|
|
1b0ec9839e | ||
|
|
77b166bb3b | ||
|
|
76bd54d7de | ||
|
|
684282f0a4 | ||
|
|
2459f46c19 | ||
|
|
5f0466e4d8 | ||
|
|
3738edfa83 |
60
.eslintrc.js
60
.eslintrc.js
@@ -15,9 +15,10 @@ module.exports = {
|
||||
|
||||
overrides: [
|
||||
{
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js', '**/scripts/**.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-process-exit': 'off',
|
||||
'n/shebang': 'off',
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
@@ -46,6 +47,58 @@ module.exports = {
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['@xen-orchestra/{web-core,lite,web}/**/*.{vue,ts}'],
|
||||
parserOptions: {
|
||||
sourceType: 'module',
|
||||
},
|
||||
plugins: ['import'],
|
||||
extends: [
|
||||
'plugin:import/recommended',
|
||||
'plugin:import/typescript',
|
||||
'plugin:vue/vue3-recommended',
|
||||
'@vue/eslint-config-typescript/recommended',
|
||||
'@vue/eslint-config-prettier',
|
||||
],
|
||||
settings: {
|
||||
'import/resolver': {
|
||||
typescript: true,
|
||||
'eslint-import-resolver-custom-alias': {
|
||||
alias: {
|
||||
'@core': '../web-core/lib',
|
||||
'@': './src',
|
||||
},
|
||||
extensions: ['.ts'],
|
||||
packages: ['@xen-orchestra/lite', '@xen-orchestra/web'],
|
||||
},
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-void': 'off',
|
||||
'n/no-missing-import': 'off', // using 'import' plugin instead to support TS aliases
|
||||
'@typescript-eslint/no-explicit-any': 'off',
|
||||
'vue/require-default-prop': 'off', // https://github.com/vuejs/eslint-plugin-vue/issues/2051
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['@xen-orchestra/{web-core,lite,web}/src/pages/**/*.vue'],
|
||||
parserOptions: {
|
||||
sourceType: 'module',
|
||||
},
|
||||
rules: {
|
||||
'vue/multi-word-component-names': 'off',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['@xen-orchestra/{web-core,lite,web}/typed-router.d.ts'],
|
||||
parserOptions: {
|
||||
sourceType: 'module',
|
||||
},
|
||||
rules: {
|
||||
'eslint-comments/disable-enable-pair': 'off',
|
||||
'eslint-comments/no-unlimited-disable': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parserOptions: {
|
||||
@@ -68,6 +121,11 @@ module.exports = {
|
||||
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }],
|
||||
|
||||
// this rule can prevent race condition bugs like parallel `a += await foo()`
|
||||
//
|
||||
// as it has a lots of false positive, it is only enabled as a warning for now
|
||||
'require-atomic-updates': 'warn',
|
||||
|
||||
strict: 'error',
|
||||
},
|
||||
}
|
||||
|
||||
48
.github/ISSUE_TEMPLATE/bug_report.md
vendored
48
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,48 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
1. ⚠️ **If you don't follow this template, the issue will be closed**.
|
||||
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
|
||||
|
||||
Are you using XOA or XO from the sources?
|
||||
|
||||
If XOA:
|
||||
|
||||
- which release channel? (`stable` vs `latest`)
|
||||
- please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
|
||||
|
||||
If XO from the sources:
|
||||
|
||||
- Provide **your commit number**. If it's older than a week, we won't investigate
|
||||
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
||||
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Environment (please provide the following information):**
|
||||
|
||||
- Node: [e.g. 16.12.1]
|
||||
- hypervisor: [e.g. XCP-ng 8.2.0]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
119
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
119
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
name: Bug Report
|
||||
description: Create a report to help us improve
|
||||
labels: ['type: bug :bug:', 'status: triaging :triangular_flag_on_post:']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
1. ⚠️ **If you don't follow this template, the issue will be closed**.
|
||||
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '## Are you using XOA or XO from the sources?'
|
||||
- type: dropdown
|
||||
id: xo-origin
|
||||
attributes:
|
||||
label: Are you using XOA or XO from the sources?
|
||||
options:
|
||||
- XOA
|
||||
- XO from the sources
|
||||
- both
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '### If XOA:'
|
||||
- type: dropdown
|
||||
id: xoa-channel
|
||||
attributes:
|
||||
label: Which release channel?
|
||||
description: please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
|
||||
options:
|
||||
- stable
|
||||
- latest
|
||||
- both
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '### If XO from the sources:'
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
||||
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
||||
- type: input
|
||||
id: xo-sources-commit-number
|
||||
attributes:
|
||||
label: Provide your commit number
|
||||
description: If it's older than a week, we won't investigate
|
||||
placeholder: e.g. 579f0
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '## Bug description:'
|
||||
- type: textarea
|
||||
id: bug-description
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: A clear and concise description of what the bug is
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: error-message
|
||||
attributes:
|
||||
label: Error message
|
||||
render: Text
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: steps
|
||||
attributes:
|
||||
label: To reproduce
|
||||
description: 'Steps to reproduce the behavior:'
|
||||
value: |
|
||||
1. Go to '...'
|
||||
2. Click on '...'
|
||||
3. Scroll down to '...'
|
||||
4. See error
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: A clear and concise description of what you expected to happen
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: screenshots
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: '## Environment (please provide the following information):'
|
||||
- type: input
|
||||
id: node-version
|
||||
attributes:
|
||||
label: Node
|
||||
placeholder: e.g. 16.12.1
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: hypervisor-version
|
||||
attributes:
|
||||
label: Hypervisor
|
||||
placeholder: e.g. XCP-ng 8.2.0
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here
|
||||
validations:
|
||||
required: false
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -24,8 +24,12 @@ jobs:
|
||||
cache: 'yarn'
|
||||
- name: Install project dependencies
|
||||
run: yarn
|
||||
- name: Ensure yarn.lock is up-to-date
|
||||
run: git diff --exit-code yarn.lock
|
||||
- name: Build the project
|
||||
run: yarn build
|
||||
- name: Unit tests
|
||||
run: yarn test-unit
|
||||
- name: Lint tests
|
||||
run: yarn test-lint
|
||||
- name: Integration tests
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -30,8 +30,12 @@ pnpm-debug.log.*
|
||||
yarn-error.log
|
||||
yarn-error.log.*
|
||||
.env
|
||||
*.tsbuildinfo
|
||||
|
||||
# code coverage
|
||||
.nyc_output/
|
||||
coverage/
|
||||
.turbo/
|
||||
|
||||
# https://node-tap.org/dot-tap-folder/
|
||||
.tap/
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = {
|
||||
arrowParens: 'avoid',
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
trailingComma: 'es5',
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
|
||||
@@ -33,8 +33,7 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^15.0.1",
|
||||
"tap": "^16.3.0",
|
||||
"sinon": "^17.0.1",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,6 +62,42 @@ decorateClass(Foo, {
|
||||
})
|
||||
```
|
||||
|
||||
### `decorateObject(object, map)`
|
||||
|
||||
Decorates an object the same way `decorateClass()` decorates a class:
|
||||
|
||||
```js
|
||||
import { decorateObject } from '@vates/decorate-with'
|
||||
|
||||
const object = {
|
||||
get bar() {
|
||||
// body
|
||||
},
|
||||
|
||||
set bar(value) {
|
||||
// body
|
||||
},
|
||||
|
||||
baz() {
|
||||
// body
|
||||
},
|
||||
}
|
||||
|
||||
decorateObject(object, {
|
||||
// getter and/or setter
|
||||
bar: {
|
||||
// without arguments
|
||||
get: lodash.memoize,
|
||||
|
||||
// with arguments
|
||||
set: [lodash.debounce, 150],
|
||||
},
|
||||
|
||||
// method (with or without arguments)
|
||||
baz: lodash.curry,
|
||||
})
|
||||
```
|
||||
|
||||
### `perInstance(fn, ...args)`
|
||||
|
||||
Helper to decorate the method by instance instead of for the whole class.
|
||||
|
||||
@@ -80,6 +80,42 @@ decorateClass(Foo, {
|
||||
})
|
||||
```
|
||||
|
||||
### `decorateObject(object, map)`
|
||||
|
||||
Decorates an object the same way `decorateClass()` decorates a class:
|
||||
|
||||
```js
|
||||
import { decorateObject } from '@vates/decorate-with'
|
||||
|
||||
const object = {
|
||||
get bar() {
|
||||
// body
|
||||
},
|
||||
|
||||
set bar(value) {
|
||||
// body
|
||||
},
|
||||
|
||||
baz() {
|
||||
// body
|
||||
},
|
||||
}
|
||||
|
||||
decorateObject(object, {
|
||||
// getter and/or setter
|
||||
bar: {
|
||||
// without arguments
|
||||
get: lodash.memoize,
|
||||
|
||||
// with arguments
|
||||
set: [lodash.debounce, 150],
|
||||
},
|
||||
|
||||
// method (with or without arguments)
|
||||
baz: lodash.curry,
|
||||
})
|
||||
```
|
||||
|
||||
### `perInstance(fn, ...args)`
|
||||
|
||||
Helper to decorate the method by instance instead of for the whole class.
|
||||
|
||||
@@ -14,10 +14,13 @@ function applyDecorator(decorator, value) {
|
||||
}
|
||||
|
||||
exports.decorateClass = exports.decorateMethodsWith = function decorateClass(klass, map) {
|
||||
const { prototype } = klass
|
||||
return decorateObject(klass.prototype, map)
|
||||
}
|
||||
|
||||
function decorateObject(object, map) {
|
||||
for (const name of Object.keys(map)) {
|
||||
const decorator = map[name]
|
||||
const descriptor = getOwnPropertyDescriptor(prototype, name)
|
||||
const descriptor = getOwnPropertyDescriptor(object, name)
|
||||
if (typeof decorator === 'function' || Array.isArray(decorator)) {
|
||||
descriptor.value = applyDecorator(decorator, descriptor.value)
|
||||
} else {
|
||||
@@ -30,10 +33,11 @@ exports.decorateClass = exports.decorateMethodsWith = function decorateClass(kla
|
||||
}
|
||||
}
|
||||
|
||||
defineProperty(prototype, name, descriptor)
|
||||
defineProperty(object, name, descriptor)
|
||||
}
|
||||
return klass
|
||||
return object
|
||||
}
|
||||
exports.decorateObject = decorateObject
|
||||
|
||||
exports.perInstance = function perInstance(fn, decorator, ...args) {
|
||||
const map = new WeakMap()
|
||||
|
||||
@@ -13,12 +13,15 @@ describe('decorateWith', () => {
|
||||
const expectedFn = Function.prototype
|
||||
const newFn = () => {}
|
||||
|
||||
const decorator = decorateWith(function wrapper(fn, ...args) {
|
||||
assert.deepStrictEqual(fn, expectedFn)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
const decorator = decorateWith(
|
||||
function wrapper(fn, ...args) {
|
||||
assert.deepStrictEqual(fn, expectedFn)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
|
||||
return newFn
|
||||
}, ...expectedArgs)
|
||||
return newFn
|
||||
},
|
||||
...expectedArgs
|
||||
)
|
||||
|
||||
const descriptor = {
|
||||
configurable: true,
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "2.0.0",
|
||||
"version": "2.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.4",
|
||||
"version": "0.1.5",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -23,13 +23,13 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@vates/multi-key-map": "^0.2.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"ensure-array": "^1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^15.0.1",
|
||||
"sinon": "^17.0.1",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,9 @@ function assertListeners(t, event, listeners) {
|
||||
}
|
||||
|
||||
t.beforeEach(function (t) {
|
||||
// work around https://github.com/tapjs/tapjs/issues/998
|
||||
t.context = {}
|
||||
|
||||
t.context.ee = new EventEmitter()
|
||||
t.context.em = new EventListenersManager(t.context.ee)
|
||||
})
|
||||
|
||||
@@ -38,9 +38,9 @@
|
||||
"version": "1.0.1",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap --branches=72"
|
||||
"test": "tap --allow-incomplete-coverage"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.2.0"
|
||||
"tap": "^18.7.0"
|
||||
}
|
||||
}
|
||||
|
||||
28
@vates/fuse-vhd/.USAGE.md
Normal file
28
@vates/fuse-vhd/.USAGE.md
Normal file
@@ -0,0 +1,28 @@
|
||||
Mount a vhd generated by xen-orchestra to filesystem
|
||||
|
||||
### Library
|
||||
|
||||
```js
|
||||
import { mount } from 'fuse-vhd'
|
||||
|
||||
// return a disposable, see promise-toolbox/Disposable
|
||||
// unmount automatically when disposable is disposed
|
||||
// in case of differencing VHD, it mounts the full chain
|
||||
await mount(handler, diskId, mountPoint)
|
||||
```
|
||||
|
||||
### cli
|
||||
|
||||
From the install folder :
|
||||
|
||||
```
|
||||
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
After installing the package
|
||||
|
||||
```
|
||||
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`
|
||||
59
@vates/fuse-vhd/README.md
Normal file
59
@vates/fuse-vhd/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/fuse-vhd
|
||||
|
||||
[](https://npmjs.org/package/@vates/fuse-vhd)  [](https://bundlephobia.com/result?p=@vates/fuse-vhd) [](https://npmjs.org/package/@vates/fuse-vhd)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/fuse-vhd):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/fuse-vhd
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Mount a vhd generated by xen-orchestra to filesystem
|
||||
|
||||
### Library
|
||||
|
||||
```js
|
||||
import { mount } from 'fuse-vhd'
|
||||
|
||||
// return a disposable, see promise-toolbox/Disposable
|
||||
// unmount automatically when disposable is disposed
|
||||
// in case of differencing VHD, it mounts the full chain
|
||||
await mount(handler, diskId, mountPoint)
|
||||
```
|
||||
|
||||
### cli
|
||||
|
||||
From the install folder :
|
||||
|
||||
```
|
||||
cli.mjs <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
After installing the package
|
||||
|
||||
```
|
||||
xo-fuse-vhd <remoteUrl> <vhdPathInRemote> <mountPoint>
|
||||
```
|
||||
|
||||
remoteUrl can be found by using cli in `@xen-orchestra/fs` , for example a local remote will have a url like `file:///path/to/remote/root`
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
26
@vates/fuse-vhd/cli.mjs
Executable file
26
@vates/fuse-vhd/cli.mjs
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
|
||||
import { mount } from './index.mjs'
|
||||
|
||||
async function* main([remoteUrl, vhdPathInRemote, mountPoint]) {
|
||||
if (mountPoint === undefined) {
|
||||
throw new TypeError('missing arg: cli <remoteUrl> <vhdPathInRemote> <mountPoint>')
|
||||
}
|
||||
const handler = yield getSyncedHandler({ url: remoteUrl })
|
||||
const mounted = await mount(handler, vhdPathInRemote, mountPoint)
|
||||
|
||||
let disposePromise
|
||||
process.on('SIGINT', async () => {
|
||||
// ensure single dispose
|
||||
if (!disposePromise) {
|
||||
disposePromise = mounted.dispose()
|
||||
}
|
||||
await disposePromise
|
||||
process.exit()
|
||||
})
|
||||
}
|
||||
|
||||
Disposable.wrap(main)(process.argv.slice(2))
|
||||
@@ -1,9 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const LRU = require('lru-cache')
|
||||
const Fuse = require('fuse-native')
|
||||
const { VhdSynthetic } = require('vhd-lib')
|
||||
const { Disposable, fromCallback } = require('promise-toolbox')
|
||||
import LRU from 'lru-cache'
|
||||
import Fuse from 'fuse-native'
|
||||
import { VhdSynthetic } from 'vhd-lib'
|
||||
import { Disposable, fromCallback } from 'promise-toolbox'
|
||||
|
||||
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
||||
const stat = st => ({
|
||||
@@ -16,7 +14,7 @@ const stat = st => ({
|
||||
gid: st.gid !== undefined ? st.gid : process.getgid(),
|
||||
})
|
||||
|
||||
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
export const mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
|
||||
|
||||
const cache = new LRU({
|
||||
@@ -60,7 +58,7 @@ exports.mount = Disposable.factory(async function* mount(handler, diskPath, moun
|
||||
},
|
||||
})
|
||||
return new Disposable(
|
||||
() => fromCallback(() => fuse.unmount()),
|
||||
fromCallback(() => fuse.mount())
|
||||
() => fromCallback(cb => fuse.unmount(cb)),
|
||||
fromCallback(cb => fuse.mount(cb))
|
||||
)
|
||||
})
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@vates/fuse-vhd",
|
||||
"version": "1.0.0",
|
||||
"version": "2.1.0",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||
@@ -15,13 +15,18 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10.0"
|
||||
"node": ">=14"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.4.0"
|
||||
"vhd-lib": "^4.9.0"
|
||||
},
|
||||
"bin": {
|
||||
"xo-fuse-vhd": "./cli.mjs"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
@@ -17,4 +17,14 @@ map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
|
||||
map.delete([])
|
||||
|
||||
for (const [key, value] of map.entries() {
|
||||
console.log(key, value)
|
||||
}
|
||||
|
||||
for (const value of map.values()) {
|
||||
console.log(value)
|
||||
}
|
||||
```
|
||||
|
||||
@@ -35,6 +35,16 @@ map.get(['foo', 'bar']) // 2
|
||||
map.get(['bar', 'foo']) // 3
|
||||
map.get([OBJ]) // 4
|
||||
map.get([{}]) // undefined
|
||||
|
||||
map.delete([])
|
||||
|
||||
for (const [key, value] of map.entries() {
|
||||
console.log(key, value)
|
||||
}
|
||||
|
||||
for (const value of map.values()) {
|
||||
console.log(value)
|
||||
}
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -36,14 +36,31 @@ function del(node, i, keys) {
|
||||
return node
|
||||
}
|
||||
|
||||
function* entries(node, key) {
|
||||
if (node !== undefined) {
|
||||
if (node instanceof Node) {
|
||||
const { value } = node
|
||||
if (value !== undefined) {
|
||||
yield [key, node.value]
|
||||
}
|
||||
|
||||
for (const [childKey, child] of node.children.entries()) {
|
||||
yield* entries(child, key.concat(childKey))
|
||||
}
|
||||
} else {
|
||||
yield [key, node]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function get(node, i, keys) {
|
||||
return i === keys.length
|
||||
? node instanceof Node
|
||||
? node.value
|
||||
: node
|
||||
: node instanceof Node
|
||||
? get(node.children.get(keys[i]), i + 1, keys)
|
||||
: undefined
|
||||
? get(node.children.get(keys[i]), i + 1, keys)
|
||||
: undefined
|
||||
}
|
||||
|
||||
function set(node, i, keys, value) {
|
||||
@@ -69,6 +86,22 @@ function set(node, i, keys, value) {
|
||||
return node
|
||||
}
|
||||
|
||||
function* values(node) {
|
||||
if (node !== undefined) {
|
||||
if (node instanceof Node) {
|
||||
const { value } = node
|
||||
if (value !== undefined) {
|
||||
yield node.value
|
||||
}
|
||||
for (const child of node.children.values()) {
|
||||
yield* values(child)
|
||||
}
|
||||
} else {
|
||||
yield node
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exports.MultiKeyMap = class MultiKeyMap {
|
||||
constructor() {
|
||||
// each node is either a value or a Node if it contains children
|
||||
@@ -79,6 +112,10 @@ exports.MultiKeyMap = class MultiKeyMap {
|
||||
this._root = del(this._root, 0, keys)
|
||||
}
|
||||
|
||||
entries() {
|
||||
return entries(this._root, [])
|
||||
}
|
||||
|
||||
get(keys) {
|
||||
return get(this._root, 0, keys)
|
||||
}
|
||||
@@ -86,4 +123,8 @@ exports.MultiKeyMap = class MultiKeyMap {
|
||||
set(keys, value) {
|
||||
this._root = set(this._root, 0, keys, value)
|
||||
}
|
||||
|
||||
values() {
|
||||
return values(this._root)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ describe('MultiKeyMap', () => {
|
||||
// reverse composite key
|
||||
['bar', 'foo'],
|
||||
]
|
||||
const values = keys.map(() => ({}))
|
||||
const values = keys.map(() => Math.random())
|
||||
|
||||
// set all values first to make sure they are all stored and not only the
|
||||
// last one
|
||||
@@ -27,6 +27,12 @@ describe('MultiKeyMap', () => {
|
||||
map.set(key, values[i])
|
||||
})
|
||||
|
||||
assert.deepEqual(
|
||||
Array.from(map.entries()),
|
||||
keys.map((key, i) => [key, values[i]])
|
||||
)
|
||||
assert.deepEqual(Array.from(map.values()), values)
|
||||
|
||||
keys.forEach((key, i) => {
|
||||
// copy the key to make sure the array itself is not the key
|
||||
assert.strictEqual(map.get(key.slice()), values[i])
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
'use strict'
|
||||
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
exports.NBD_OPT_EXPORT_NAME = 1
|
||||
exports.NBD_OPT_ABORT = 2
|
||||
exports.NBD_OPT_LIST = 3
|
||||
exports.NBD_OPT_STARTTLS = 5
|
||||
exports.NBD_OPT_INFO = 6
|
||||
exports.NBD_OPT_GO = 7
|
||||
|
||||
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
exports.NBD_FLAG_READ_ONLY = 1 << 1
|
||||
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
exports.NBD_FLAG_SEND_FUA = 1 << 3
|
||||
exports.NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
exports.NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
exports.NBD_CMD_FLAG_FUA = 1 << 0
|
||||
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
exports.NBD_CMD_FLAG_DF = 1 << 2
|
||||
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
exports.NBD_CMD_READ = 0
|
||||
exports.NBD_CMD_WRITE = 1
|
||||
exports.NBD_CMD_DISC = 2
|
||||
exports.NBD_CMD_FLUSH = 3
|
||||
exports.NBD_CMD_TRIM = 4
|
||||
exports.NBD_CMD_CACHE = 5
|
||||
exports.NBD_CMD_WRITE_ZEROES = 6
|
||||
exports.NBD_CMD_BLOCK_STATUS = 7
|
||||
exports.NBD_CMD_RESIZE = 8
|
||||
|
||||
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
exports.NBD_REPLY_ACK = 1
|
||||
|
||||
exports.NBD_DEFAULT_PORT = 10809
|
||||
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
41
@vates/nbd-client/constants.mjs
Normal file
41
@vates/nbd-client/constants.mjs
Normal file
@@ -0,0 +1,41 @@
|
||||
export const INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
export const OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
export const NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
export const NBD_OPT_EXPORT_NAME = 1
|
||||
export const NBD_OPT_ABORT = 2
|
||||
export const NBD_OPT_LIST = 3
|
||||
export const NBD_OPT_STARTTLS = 5
|
||||
export const NBD_OPT_INFO = 6
|
||||
export const NBD_OPT_GO = 7
|
||||
|
||||
export const NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
export const NBD_FLAG_READ_ONLY = 1 << 1
|
||||
export const NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
export const NBD_FLAG_SEND_FUA = 1 << 3
|
||||
export const NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
export const NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
export const NBD_CMD_FLAG_FUA = 1 << 0
|
||||
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
export const NBD_CMD_FLAG_DF = 1 << 2
|
||||
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
export const NBD_CMD_READ = 0
|
||||
export const NBD_CMD_WRITE = 1
|
||||
export const NBD_CMD_DISC = 2
|
||||
export const NBD_CMD_FLUSH = 3
|
||||
export const NBD_CMD_TRIM = 4
|
||||
export const NBD_CMD_CACHE = 5
|
||||
export const NBD_CMD_WRITE_ZEROES = 6
|
||||
export const NBD_CMD_BLOCK_STATUS = 7
|
||||
export const NBD_CMD_RESIZE = 8
|
||||
|
||||
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
export const NBD_REPLY_ACK = 1
|
||||
|
||||
export const NBD_DEFAULT_PORT = 10809
|
||||
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,8 +1,10 @@
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
const { Socket } = require('node:net')
|
||||
const { connect } = require('node:tls')
|
||||
const {
|
||||
import assert from 'node:assert'
|
||||
import { Socket } from 'node:net'
|
||||
import { connect } from 'node:tls'
|
||||
import { fromCallback, pRetry, pDelay, pTimeout, pFromCallback } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
NBD_DEFAULT_BLOCK_SIZE,
|
||||
@@ -17,16 +19,12 @@ const {
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
} = require('./constants.js')
|
||||
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
|
||||
const { readChunkStrict } = require('@vates/read-chunk')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
} from './constants.mjs'
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
module.exports = class NbdClient {
|
||||
export default class NbdClient {
|
||||
#serverAddress
|
||||
#serverCert
|
||||
#serverPort
|
||||
@@ -40,6 +38,7 @@ module.exports = class NbdClient {
|
||||
#readBlockRetries
|
||||
#reconnectRetry
|
||||
#connectTimeout
|
||||
#messageTimeout
|
||||
|
||||
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
||||
// so we handle a backlog of command waiting for response and handle concurrency manually
|
||||
@@ -52,7 +51,14 @@ module.exports = class NbdClient {
|
||||
#reconnectingPromise
|
||||
constructor(
|
||||
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
||||
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
|
||||
{
|
||||
connectTimeout = 6e4,
|
||||
messageTimeout = 6e4,
|
||||
waitBeforeReconnect = 1e3,
|
||||
readAhead = 10,
|
||||
readBlockRetries = 5,
|
||||
reconnectRetry = 5,
|
||||
} = {}
|
||||
) {
|
||||
this.#serverAddress = address
|
||||
this.#serverPort = port
|
||||
@@ -63,6 +69,7 @@ module.exports = class NbdClient {
|
||||
this.#readBlockRetries = readBlockRetries
|
||||
this.#reconnectRetry = reconnectRetry
|
||||
this.#connectTimeout = connectTimeout
|
||||
this.#messageTimeout = messageTimeout
|
||||
}
|
||||
|
||||
get exportSize() {
|
||||
@@ -115,13 +122,27 @@ module.exports = class NbdClient {
|
||||
if (!this.#connected) {
|
||||
return
|
||||
}
|
||||
this.#connected = false
|
||||
const socket = this.#serverSocket
|
||||
|
||||
const queryId = this.#nextCommandQueryId
|
||||
this.#nextCommandQueryId++
|
||||
|
||||
const buffer = Buffer.alloc(28)
|
||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
||||
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
||||
await this.#write(buffer)
|
||||
await this.#serverSocket.destroy()
|
||||
buffer.writeBigUInt64BE(queryId, 8)
|
||||
buffer.writeBigUInt64BE(0n, 16)
|
||||
buffer.writeInt32BE(0, 24)
|
||||
const promise = pFromCallback(cb => {
|
||||
socket.end(buffer, 'utf8', cb)
|
||||
})
|
||||
try {
|
||||
await pTimeout.call(promise, this.#messageTimeout)
|
||||
} catch (error) {
|
||||
socket.destroy()
|
||||
}
|
||||
this.#serverSocket = undefined
|
||||
this.#connected = false
|
||||
}
|
||||
@@ -195,11 +216,13 @@ module.exports = class NbdClient {
|
||||
}
|
||||
|
||||
#read(length) {
|
||||
return readChunkStrict(this.#serverSocket, length)
|
||||
const promise = readChunkStrict(this.#serverSocket, length)
|
||||
return pTimeout.call(promise, this.#messageTimeout)
|
||||
}
|
||||
|
||||
#write(buffer) {
|
||||
return fromCallback.call(this.#serverSocket, 'write', buffer)
|
||||
const promise = fromCallback.call(this.#serverSocket, 'write', buffer)
|
||||
return pTimeout.call(promise, this.#messageTimeout)
|
||||
}
|
||||
|
||||
async #readInt32() {
|
||||
@@ -232,19 +255,20 @@ module.exports = class NbdClient {
|
||||
}
|
||||
try {
|
||||
this.#waitingForResponse = true
|
||||
const magic = await this.#readInt32()
|
||||
const buffer = await this.#read(16)
|
||||
const magic = buffer.readInt32BE(0)
|
||||
|
||||
if (magic !== NBD_REPLY_MAGIC) {
|
||||
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
|
||||
}
|
||||
|
||||
const error = await this.#readInt32()
|
||||
const error = buffer.readInt32BE(4)
|
||||
if (error !== 0) {
|
||||
// @todo use error code from constants.mjs
|
||||
throw new Error(`GOT ERROR CODE : ${error}`)
|
||||
}
|
||||
|
||||
const blockQueryId = await this.#readInt64()
|
||||
const blockQueryId = buffer.readBigUInt64BE(8)
|
||||
const query = this.#commandQueryBacklog.get(blockQueryId)
|
||||
if (!query) {
|
||||
throw new Error(` no query associated with id ${blockQueryId}`)
|
||||
@@ -265,7 +289,7 @@ module.exports = class NbdClient {
|
||||
}
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
async #readBlock(index, size) {
|
||||
// we don't want to add anything in backlog while reconnecting
|
||||
if (this.#reconnectingPromise) {
|
||||
await this.#reconnectingPromise
|
||||
@@ -281,7 +305,13 @@ module.exports = class NbdClient {
|
||||
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
|
||||
buffer.writeBigUInt64BE(queryId, 8)
|
||||
// byte offset in the raw disk
|
||||
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
|
||||
const offset = BigInt(index) * BigInt(size)
|
||||
const remaining = this.#exportSize - offset
|
||||
if (remaining < BigInt(size)) {
|
||||
size = Number(remaining)
|
||||
}
|
||||
|
||||
buffer.writeBigUInt64BE(offset, 16)
|
||||
buffer.writeInt32BE(size, 24)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
@@ -307,45 +337,13 @@ module.exports = class NbdClient {
|
||||
})
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
if (indexGenerator === undefined) {
|
||||
const exportSize = this.#exportSize
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(exportSize / chunkSize)
|
||||
for (let index = 0; index < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
return pRetry(() => this.#readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
85
@vates/nbd-client/multi.mjs
Normal file
85
@vates/nbd-client/multi.mjs
Normal file
@@ -0,0 +1,85 @@
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { NBD_DEFAULT_BLOCK_SIZE } from './constants.mjs'
|
||||
import NbdClient from './index.mjs'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client:multi')
|
||||
export default class MultiNbdClient {
|
||||
#clients = []
|
||||
#readAhead
|
||||
|
||||
get exportSize() {
|
||||
return this.#clients[0].exportSize
|
||||
}
|
||||
|
||||
constructor(settings, { nbdConcurrency = 8, readAhead = 16, ...options } = {}) {
|
||||
this.#readAhead = readAhead
|
||||
if (!Array.isArray(settings)) {
|
||||
settings = [settings]
|
||||
}
|
||||
for (let i = 0; i < nbdConcurrency; i++) {
|
||||
this.#clients.push(
|
||||
new NbdClient(settings[i % settings.length], { ...options, readAhead: Math.ceil(readAhead / nbdConcurrency) })
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async connect() {
|
||||
const connectedClients = []
|
||||
for (const clientId in this.#clients) {
|
||||
const client = this.#clients[clientId]
|
||||
try {
|
||||
await client.connect()
|
||||
connectedClients.push(client)
|
||||
} catch (err) {
|
||||
client.disconnect().catch(() => {})
|
||||
warn(`can't connect to one nbd client`, { err })
|
||||
}
|
||||
}
|
||||
if (connectedClients.length === 0) {
|
||||
throw new Error(`Fail to connect to any Nbd client`)
|
||||
}
|
||||
if (connectedClients.length < this.#clients.length) {
|
||||
warn(
|
||||
`incomplete connection by multi Nbd, only ${connectedClients.length} over ${this.#clients.length} expected clients`
|
||||
)
|
||||
this.#clients = connectedClients
|
||||
}
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
await asyncEach(this.#clients, client => client.disconnect(), {
|
||||
stopOnError: false,
|
||||
})
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
const clientId = index % this.#clients.length
|
||||
return this.#clients[clientId].readBlock(index, size)
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
const readAhead = []
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = this.readBlock(index, size)
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === this.#readAhead) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
'use strict'
|
||||
const NbdClient = require('./index.js')
|
||||
const { spawn } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const FILE_SIZE = 2 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
|
||||
const client = new NbdClient({
|
||||
address: 'localhost',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
secure: false,
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 128 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
// read mutiple blocks in parallel
|
||||
await asyncEach(
|
||||
indexes,
|
||||
async i => {
|
||||
const block = await client.readBlock(i, CHUNK_SIZE)
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
},
|
||||
{ concurrency: 8 }
|
||||
)
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -13,24 +13,25 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.2.0",
|
||||
"version": "3.0.0",
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/read-chunk": "^1.1.1",
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^1.3.1"
|
||||
"xen-api": "^2.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0",
|
||||
"tap": "^18.7.0",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap --lines 70 --functions 36 --branches 54 --statements 69 *.integ.js"
|
||||
"test-integration": "tap --allow-incomplete-coverage"
|
||||
}
|
||||
}
|
||||
|
||||
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
@@ -0,0 +1,182 @@
|
||||
Public Key Info:
|
||||
Public Key Algorithm: RSA
|
||||
Key Security Level: High (3072 bits)
|
||||
|
||||
modulus:
|
||||
00:be:92:be:df:de:0a:ab:38:fc:1a:c0:1a:58:4d:86
|
||||
b8:1f:25:10:7d:19:05:17:bf:02:3d:e9:ef:f8:c0:04
|
||||
5d:6f:98:de:5c:dd:c3:0f:e2:61:61:e4:b5:9c:42:ac
|
||||
3e:af:fd:30:10:e1:54:32:66:75:f6:80:90:85:05:a0
|
||||
6a:14:a2:6f:a7:2e:f0:f3:52:94:2a:f2:34:fc:0d:b4
|
||||
fb:28:5d:1c:11:5c:59:6e:63:34:ba:b3:fd:73:b1:48
|
||||
35:00:84:53:da:6a:9b:84:ab:64:b1:a1:2b:3a:d1:5a
|
||||
d7:13:7c:12:2a:4e:72:e9:96:d6:30:74:c5:71:05:14
|
||||
4b:2d:01:94:23:67:4e:37:3c:1e:c1:a0:bc:34:04:25
|
||||
21:11:fb:4b:6b:53:74:8f:90:93:57:af:7f:3b:78:d6
|
||||
a4:87:fe:7d:ed:20:11:8b:70:54:67:b8:c9:f5:c0:6b
|
||||
de:4e:e7:a5:79:ff:f7:ad:cf:10:57:f5:51:70:7b:54
|
||||
68:28:9e:b9:c2:10:7b:ab:aa:11:47:9f:ec:e6:2f:09
|
||||
44:4a:88:5b:dd:8c:10:b4:c4:03:25:06:d9:e0:9f:a0
|
||||
0d:cf:94:4b:3b:fa:a5:17:2c:e4:67:c4:17:6a:ab:d8
|
||||
c8:7a:16:41:b9:91:b7:9c:ae:8c:94:be:26:61:51:71
|
||||
c1:a6:39:39:97:75:28:a9:0e:21:ea:f0:bd:71:4a:8c
|
||||
e1:f8:1d:a9:22:2f:10:a8:1b:e5:a4:9a:fd:0f:fa:c6
|
||||
20:bc:96:99:79:c6:ba:a4:1f:3e:d4:91:c5:af:bb:71
|
||||
0a:5a:ef:69:9c:64:69:ce:5a:fe:3f:c2:24:f4:26:d4
|
||||
3d:ab:ab:9a:f0:f6:f1:b1:64:a9:f4:e2:34:6a:ab:2e
|
||||
95:47:b9:07:5a:39:c6:95:9c:a9:e8:ed:71:dd:c1:21
|
||||
16:c8:2d:4c:2c:af:06:9d:c6:fa:fe:c5:2a:6c:b4:c3
|
||||
d5:96:fc:5e:fd:ec:1c:30:b4:9d:cb:29:ef:a8:50:1c
|
||||
21:
|
||||
|
||||
public exponent:
|
||||
01:00:01:
|
||||
|
||||
private exponent:
|
||||
25:37:c5:7d:35:01:02:65:73:9e:c9:cb:9b:59:30:a9
|
||||
3e:b3:df:5f:7f:06:66:97:d0:19:45:59:af:4b:d8:ce
|
||||
62:a0:09:35:3b:bd:ff:99:27:89:95:bf:fe:0f:6b:52
|
||||
26:ce:9c:97:7f:5a:11:29:bf:79:ef:ab:c9:be:ca:90
|
||||
4d:0d:58:1e:df:65:01:30:2c:6d:a2:b5:c4:4f:ec:fb
|
||||
6b:eb:9b:32:ac:c5:6e:70:83:78:be:f4:0d:a7:1e:c1
|
||||
f3:22:e4:b9:70:3e:85:0f:6f:ef:dc:d8:f3:78:b5:73
|
||||
f1:83:36:8c:fa:9b:28:91:63:ad:3c:f0:de:5c:ae:94
|
||||
eb:ea:36:03:20:06:bf:74:c7:50:eb:52:36:1a:65:21
|
||||
eb:40:17:7f:93:61:dd:33:d0:02:bc:ec:6d:31:f1:41
|
||||
5a:a9:d1:f0:00:66:4c:c4:18:47:d5:67:e3:cd:bb:83
|
||||
44:07:ab:62:83:21:dc:d8:e6:89:37:08:bb:9d:ea:62
|
||||
c2:5d:ce:85:c2:dc:48:27:0c:a4:23:61:b7:30:e7:26
|
||||
44:dc:1e:5c:2e:16:35:2b:2e:a6:e6:a4:ce:1f:9b:e9
|
||||
fe:96:fa:49:1d:fb:2a:df:bc:bf:46:da:52:f8:37:8a
|
||||
84:ab:e4:73:e6:46:56:b5:b4:3d:e1:63:eb:02:8e:d7
|
||||
67:96:c4:dc:28:6d:6b:b6:0c:a3:0b:db:87:29:ad:f9
|
||||
ec:73:b6:55:a3:40:32:13:84:c7:2f:33:74:04:dc:42
|
||||
00:11:9c:fb:fc:62:35:b3:82:c3:3c:28:80:e8:09:a8
|
||||
97:c7:c1:2e:3d:27:fa:4f:9b:fc:c2:34:58:41:5c:a1
|
||||
e2:70:2e:2f:82:ad:bd:bd:8e:dd:23:12:25:de:89:70
|
||||
60:75:48:90:80:ac:55:74:51:6f:49:9e:7f:63:41:8b
|
||||
3c:b1:f5:c3:6b:4b:5a:50:a6:4d:38:e8:82:c2:04:c8
|
||||
30:fd:06:9b:c1:04:27:b6:63:3a:5e:f5:4d:00:c3:d1
|
||||
|
||||
|
||||
prime1:
|
||||
00:f6:00:2e:7d:89:61:24:16:5e:87:ca:18:6c:03:b8
|
||||
b4:33:df:4a:a7:7f:db:ed:39:15:41:12:61:4f:4e:b4
|
||||
de:ab:29:d9:0c:6c:01:7e:53:2e:ee:e7:5f:a2:e4:6d
|
||||
c6:4b:07:4e:d8:a3:ae:45:06:97:bd:18:a3:e9:dd:29
|
||||
54:64:6d:f0:af:08:95:ae:ae:3e:71:63:76:2a:a1:18
|
||||
c4:b1:fc:bc:3d:42:15:74:b3:c5:38:1f:5d:92:f1:b2
|
||||
c6:3f:10:fe:35:1a:c6:b1:ce:70:38:ff:08:5c:de:61
|
||||
79:c7:50:91:22:4d:e9:c8:18:49:e2:5c:91:84:86:e2
|
||||
4d:0f:6e:9b:0d:81:df:aa:f3:59:75:56:e9:33:18:dd
|
||||
ab:39:da:e2:25:01:05:a1:6e:23:59:15:2c:89:35:c7
|
||||
ae:9c:c7:ea:88:9a:1a:f3:48:07:11:82:59:79:8c:62
|
||||
53:06:37:30:14:b3:82:b1:50:fc:ae:b8:f7:1c:57:44
|
||||
7d:
|
||||
|
||||
prime2:
|
||||
00:c6:51:cc:dc:88:2e:cf:98:90:10:19:e0:d3:a4:d1
|
||||
3f:dc:b0:29:d3:bb:26:ee:eb:00:17:17:d1:d1:bb:9b
|
||||
34:b1:4e:af:b5:6c:1c:54:53:b4:bb:55:da:f7:78:cd
|
||||
38:b4:2e:3a:8c:63:80:3b:64:9c:b4:2b:cd:dd:50:0b
|
||||
05:d2:00:7a:df:8e:c3:e6:29:e0:9c:d8:40:b7:11:09
|
||||
f4:38:df:f6:ed:93:1e:18:d4:93:fa:8d:ee:82:9c:0f
|
||||
c1:88:26:84:9d:4f:ae:8a:17:d5:55:54:4c:c6:0a:ac
|
||||
4d:ec:33:51:68:0f:4b:92:2e:04:57:fe:15:f5:00:46
|
||||
5c:8e:ad:09:2c:e7:df:d5:36:7a:4e:bd:da:21:22:d7
|
||||
58:b4:72:93:94:af:34:cc:e2:b8:d0:4f:0b:5d:97:08
|
||||
12:19:17:34:c5:15:49:00:48:56:13:b8:45:4e:3b:f8
|
||||
bc:d5:ab:d9:6d:c2:4a:cc:01:1a:53:4d:46:50:49:3b
|
||||
75:
|
||||
|
||||
coefficient:
|
||||
63:67:50:29:10:6a:85:a3:dc:51:90:20:76:86:8c:83
|
||||
8e:d5:ff:aa:75:fd:b5:f8:31:b0:96:6c:18:1d:5b:ed
|
||||
a4:2e:47:8d:9c:c2:1e:2c:a8:6d:4b:10:a5:c2:53:46
|
||||
8a:9a:84:91:d7:fc:f5:cc:03:ce:b9:3d:5c:01:d2:27
|
||||
99:7b:79:89:4f:a1:12:e3:05:5d:ee:10:f6:8c:e6:ce
|
||||
5e:da:32:56:6d:6f:eb:32:b4:75:7b:94:49:d8:2d:9e
|
||||
4d:19:59:2e:e4:0b:bc:95:df:df:65:67:a1:dd:c6:2b
|
||||
99:f4:76:e8:9f:fa:57:1d:ca:f9:58:a9:ce:9b:30:5c
|
||||
42:8a:ba:05:e7:e2:15:45:25:bc:e9:68:c1:8b:1a:37
|
||||
cc:e1:aa:45:2e:94:f5:81:47:1e:64:7f:c0:c1:b7:a8
|
||||
21:58:18:a9:a0:ed:e0:27:75:bf:65:81:6b:e4:1d:5a
|
||||
b7:7e:df:d8:28:c6:36:21:19:c8:6e:da:ca:9e:da:84
|
||||
|
||||
|
||||
exp1:
|
||||
00:ba:d7:fe:77:a9:0d:98:2c:49:56:57:c0:5e:e2:20
|
||||
ba:f6:1f:26:03:bc:d0:5d:08:9b:45:16:61:c4:ab:e2
|
||||
22:b1:dc:92:17:a6:3d:28:26:a4:22:1e:a8:7b:ff:86
|
||||
05:33:5d:74:9c:85:0d:cb:2d:ab:b8:9b:6b:7c:28:57
|
||||
c8:da:92:ca:59:17:6b:21:07:05:34:78:37:fb:3e:ea
|
||||
a2:13:12:04:23:7e:fa:ee:ed:cf:e0:c5:a9:fb:ff:0a
|
||||
2b:1b:21:9c:02:d7:b8:8c:ba:60:70:59:fc:8f:14:f4
|
||||
f2:5a:d9:ad:b2:61:7d:2c:56:8e:5f:98:b1:89:f8:2d
|
||||
10:1c:a5:84:ad:28:b4:aa:92:34:a3:34:04:e1:a3:84
|
||||
52:16:1a:52:e3:8a:38:2d:99:8a:cd:91:90:87:12:ca
|
||||
fc:ab:e6:08:14:03:00:6f:41:88:e4:da:9d:7c:fd:8c
|
||||
7c:c4:de:cb:ed:1d:3f:29:d0:7a:6b:76:df:71:ae:32
|
||||
bd:
|
||||
|
||||
exp2:
|
||||
4a:e9:d3:6c:ea:b4:64:0e:c9:3c:8b:c9:f5:a8:a8:b2
|
||||
6a:f6:d0:95:fe:78:32:7f:ea:c4:ce:66:9f:c7:32:55
|
||||
b1:34:7c:03:18:17:8b:73:23:2e:30:bc:4a:07:03:de
|
||||
8b:91:7a:e4:55:21:b7:4d:c6:33:f8:e8:06:d5:99:94
|
||||
55:43:81:26:b9:93:1e:7a:6b:32:54:2d:fd:f9:1d:bd
|
||||
77:4e:82:c4:33:72:87:06:a5:ef:5b:75:e1:38:7a:6b
|
||||
2c:b7:00:19:3c:64:3e:1d:ca:a4:34:f7:db:47:64:d6
|
||||
fa:86:58:15:ea:d1:2d:22:dc:d9:30:4d:b3:02:ab:91
|
||||
83:03:b2:17:98:6f:60:e6:f7:44:8f:4a:ba:81:a2:bf
|
||||
0b:4a:cc:9c:b9:a2:44:52:d0:65:3f:b6:97:5f:d9:d8
|
||||
9c:49:bb:d1:46:bd:10:b2:42:71:a8:85:e5:8b:99:e6
|
||||
1b:00:93:5d:76:ab:32:6c:a8:39:17:53:9c:38:4d:91
|
||||
|
||||
|
||||
|
||||
Public Key PIN:
|
||||
pin-sha256:ISh/UeFjUG5Gwrpx6hMUGQPvg9wOKjOkHmRbs4YjZqs=
|
||||
Public Key ID:
|
||||
sha256:21287f51e163506e46c2ba71ea13141903ef83dc0e2a33a41e645bb3862366ab
|
||||
sha1:1a48455111ac45fb5807c5cdb7b20b896c52f0b6
|
||||
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAvpK+394Kqzj8GsAaWE2GuB8lEH0ZBRe/Aj3p7/jABF1vmN5c
|
||||
3cMP4mFh5LWcQqw+r/0wEOFUMmZ19oCQhQWgahSib6cu8PNSlCryNPwNtPsoXRwR
|
||||
XFluYzS6s/1zsUg1AIRT2mqbhKtksaErOtFa1xN8EipOcumW1jB0xXEFFEstAZQj
|
||||
Z043PB7BoLw0BCUhEftLa1N0j5CTV69/O3jWpIf+fe0gEYtwVGe4yfXAa95O56V5
|
||||
//etzxBX9VFwe1RoKJ65whB7q6oRR5/s5i8JREqIW92MELTEAyUG2eCfoA3PlEs7
|
||||
+qUXLORnxBdqq9jIehZBuZG3nK6MlL4mYVFxwaY5OZd1KKkOIerwvXFKjOH4Haki
|
||||
LxCoG+Wkmv0P+sYgvJaZeca6pB8+1JHFr7txClrvaZxkac5a/j/CJPQm1D2rq5rw
|
||||
9vGxZKn04jRqqy6VR7kHWjnGlZyp6O1x3cEhFsgtTCyvBp3G+v7FKmy0w9WW/F79
|
||||
7BwwtJ3LKe+oUBwhAgMBAAECggGAJTfFfTUBAmVznsnLm1kwqT6z319/BmaX0BlF
|
||||
Wa9L2M5ioAk1O73/mSeJlb/+D2tSJs6cl39aESm/ee+ryb7KkE0NWB7fZQEwLG2i
|
||||
tcRP7Ptr65syrMVucIN4vvQNpx7B8yLkuXA+hQ9v79zY83i1c/GDNoz6myiRY608
|
||||
8N5crpTr6jYDIAa/dMdQ61I2GmUh60AXf5Nh3TPQArzsbTHxQVqp0fAAZkzEGEfV
|
||||
Z+PNu4NEB6tigyHc2OaJNwi7nepiwl3OhcLcSCcMpCNhtzDnJkTcHlwuFjUrLqbm
|
||||
pM4fm+n+lvpJHfsq37y/RtpS+DeKhKvkc+ZGVrW0PeFj6wKO12eWxNwobWu2DKML
|
||||
24cprfnsc7ZVo0AyE4THLzN0BNxCABGc+/xiNbOCwzwogOgJqJfHwS49J/pPm/zC
|
||||
NFhBXKHicC4vgq29vY7dIxIl3olwYHVIkICsVXRRb0mef2NBizyx9cNrS1pQpk04
|
||||
6ILCBMgw/QabwQQntmM6XvVNAMPRAoHBAPYALn2JYSQWXofKGGwDuLQz30qnf9vt
|
||||
ORVBEmFPTrTeqynZDGwBflMu7udfouRtxksHTtijrkUGl70Yo+ndKVRkbfCvCJWu
|
||||
rj5xY3YqoRjEsfy8PUIVdLPFOB9dkvGyxj8Q/jUaxrHOcDj/CFzeYXnHUJEiTenI
|
||||
GEniXJGEhuJND26bDYHfqvNZdVbpMxjdqzna4iUBBaFuI1kVLIk1x66cx+qImhrz
|
||||
SAcRgll5jGJTBjcwFLOCsVD8rrj3HFdEfQKBwQDGUczciC7PmJAQGeDTpNE/3LAp
|
||||
07sm7usAFxfR0bubNLFOr7VsHFRTtLtV2vd4zTi0LjqMY4A7ZJy0K83dUAsF0gB6
|
||||
347D5ingnNhAtxEJ9Djf9u2THhjUk/qN7oKcD8GIJoSdT66KF9VVVEzGCqxN7DNR
|
||||
aA9Lki4EV/4V9QBGXI6tCSzn39U2ek692iEi11i0cpOUrzTM4rjQTwtdlwgSGRc0
|
||||
xRVJAEhWE7hFTjv4vNWr2W3CSswBGlNNRlBJO3UCgcEAutf+d6kNmCxJVlfAXuIg
|
||||
uvYfJgO80F0Im0UWYcSr4iKx3JIXpj0oJqQiHqh7/4YFM110nIUNyy2ruJtrfChX
|
||||
yNqSylkXayEHBTR4N/s+6qITEgQjfvru7c/gxan7/worGyGcAte4jLpgcFn8jxT0
|
||||
8lrZrbJhfSxWjl+YsYn4LRAcpYStKLSqkjSjNATho4RSFhpS44o4LZmKzZGQhxLK
|
||||
/KvmCBQDAG9BiOTanXz9jHzE3svtHT8p0Hprdt9xrjK9AoHASunTbOq0ZA7JPIvJ
|
||||
9aiosmr20JX+eDJ/6sTOZp/HMlWxNHwDGBeLcyMuMLxKBwPei5F65FUht03GM/jo
|
||||
BtWZlFVDgSa5kx56azJULf35Hb13ToLEM3KHBqXvW3XhOHprLLcAGTxkPh3KpDT3
|
||||
20dk1vqGWBXq0S0i3NkwTbMCq5GDA7IXmG9g5vdEj0q6gaK/C0rMnLmiRFLQZT+2
|
||||
l1/Z2JxJu9FGvRCyQnGoheWLmeYbAJNddqsybKg5F1OcOE2RAoHAY2dQKRBqhaPc
|
||||
UZAgdoaMg47V/6p1/bX4MbCWbBgdW+2kLkeNnMIeLKhtSxClwlNGipqEkdf89cwD
|
||||
zrk9XAHSJ5l7eYlPoRLjBV3uEPaM5s5e2jJWbW/rMrR1e5RJ2C2eTRlZLuQLvJXf
|
||||
32Vnod3GK5n0duif+lcdyvlYqc6bMFxCiroF5+IVRSW86WjBixo3zOGqRS6U9YFH
|
||||
HmR/wMG3qCFYGKmg7eAndb9lgWvkHVq3ft/YKMY2IRnIbtrKntqE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
158
@vates/nbd-client/tests/nbdclient.integ.mjs
Normal file
158
@vates/nbd-client/tests/nbdclient.integ.mjs
Normal file
@@ -0,0 +1,158 @@
|
||||
import { spawn, exec } from 'node:child_process'
|
||||
import fs from 'node:fs/promises'
|
||||
import { test } from 'tap'
|
||||
import tmp from 'tmp'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { Socket } from 'node:net'
|
||||
import { NBD_DEFAULT_PORT } from '../constants.mjs'
|
||||
import assert from 'node:assert'
|
||||
import MultiNbdClient from '../multi.mjs'
|
||||
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const FILE_SIZE = 1024 * 1024 * 9.5 // non aligned file size
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
async function spawnNbdKit(path) {
|
||||
let tries = 5
|
||||
// wait for server to be ready
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
'--tls=on',
|
||||
'--tls-certificates=./tests/',
|
||||
// '--tls-verify-peer',
|
||||
// '--verbose',
|
||||
'--exit-with-parent',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
nbdServer.on('error', err => {
|
||||
console.error(err)
|
||||
})
|
||||
do {
|
||||
try {
|
||||
const socket = new Socket()
|
||||
await new Promise((resolve, reject) => {
|
||||
socket.connect(NBD_DEFAULT_PORT, 'localhost')
|
||||
socket.once('error', reject)
|
||||
socket.once('connect', resolve)
|
||||
})
|
||||
socket.destroy()
|
||||
break
|
||||
} catch (err) {
|
||||
tries--
|
||||
if (tries <= 0) {
|
||||
throw err
|
||||
} else {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
}
|
||||
} while (true)
|
||||
return nbdServer
|
||||
}
|
||||
|
||||
async function killNbdKit() {
|
||||
return new Promise((resolve, reject) =>
|
||||
exec('pkill -9 -f -o nbdkit', err => {
|
||||
err ? reject(err) : resolve()
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
let nbdServer = await spawnNbdKit(path)
|
||||
const client = new MultiNbdClient(
|
||||
{
|
||||
address: '127.0.0.1',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: `-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
{
|
||||
nbdConcurrency: 1,
|
||||
readAhead: 2,
|
||||
}
|
||||
)
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
const nbdIterator = client.readBlocks(function* () {
|
||||
for (const index of indexes) {
|
||||
yield { index, size: CHUNK_SIZE }
|
||||
}
|
||||
})
|
||||
let i = 0
|
||||
for await (const block of nbdIterator) {
|
||||
let blockOk = block.length === Math.min(CHUNK_SIZE, FILE_SIZE - CHUNK_SIZE * i)
|
||||
let firstFail
|
||||
for (let j = 0; j < block.length; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content ${block.length}`)
|
||||
i++
|
||||
|
||||
// flaky server is flaky
|
||||
if (i % 7 === 0) {
|
||||
// kill the older nbdkit process
|
||||
await killNbdKit()
|
||||
nbdServer = await spawnNbdKit(path)
|
||||
}
|
||||
}
|
||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||
|
||||
await client.disconnect()
|
||||
// double disconnection shouldn't pose any problem
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
21
@vates/nbd-client/tests/server-cert.pem
Normal file
21
@vates/nbd-client/tests/server-cert.pem
Normal file
@@ -0,0 +1,21 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
28
@vates/nbd-client/tests/server-key.pem
Normal file
28
@vates/nbd-client/tests/server-key.pem
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/8wLopj/iZY6i
|
||||
jmpvgCJsl+zY0hQZQcIoaCs0H75u8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZol
|
||||
evaSJLNT2Iolscvc2W9NCF4N1V6yzs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh
|
||||
67u+uI40732AfQqD01BNCTD/uHRBlKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y
|
||||
2SJVTeT4a1sSJixl6I1YPmt80FJhgq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULw
|
||||
dJOGgmqGRDzgZKJS5UUpxe/ViEO459I18vIkgibaRYhENgmnP3lIzTOLlUe07tbS
|
||||
ML5RGBbBAgMBAAECggEATLYiafcTHfgnZmjTOad0WoDnC4n9tVBV948WARlUooLS
|
||||
duL3RQRHCLz9/ZaTuFA1XDpNcYyc/B/IZoU7aJGZR3+JSmJBjowpUphu+klVNNG4
|
||||
i6lDRrzYlUI0hfdLjHsDTDBIKi91KcB0lix/VkvsrVQvDHwsiR2ZAIiVWAWQFKrR
|
||||
5O3DhSTHbqyq47uR58rWr4Zf3zvZaUl841AS1yELzCiZqz7AenvyWphim0c0XA5d
|
||||
I63CEShntHnEAA9OMcP8+BNf/3AmqB4welY+m8elB3aJNH+j7DKq/AWqaM5nl2PC
|
||||
cS6qgpxwOyTxEOyj1xhwK5ZMRR3heW3NfutIxSOPlwKBgQDB9ZkrBeeGVtCISO7C
|
||||
eCANzSLpeVrahTvaCSQLdPHsLRLDUc+5mxdpi3CaRlzYs3S1OWdAtyWX9mBryltF
|
||||
qDPhCNjFDyHok4D3wLEWdS9oUVwEKUM8fOPW3tXLLiMM7p4862Qo7LqnqHzPqsnz
|
||||
22iZo5yjcc7aLJ+VmFrbAowwOwKBgQD9WNCvczTd7Ymn7zEvdiAyNoS0OZ0orwEJ
|
||||
zGaxtjqVguGklNfrb/UB+eKNGE80+YnMiSaFc9IQPetLntZdV0L7kWYdCI8kGDNA
|
||||
DbVRCOp+z8DwAojlrb/zsYu23anQozT3WeHxVU66lNuyEQvSW2tJa8gN1htrD7uY
|
||||
5KLibYrBMwKBgEM0iiHyJcrSgeb2/mO7o7+keJhVSDm3OInP6QFfQAQJihrLWiKB
|
||||
rpcPjbCm+LzNUX8JqNEvpIMHB1nR/9Ye9frfSdzd5W3kzicKSVHywL5wkmWOtpFa
|
||||
5Mcq5wFDtzlf5MxO86GKhRJauwRptRgdyhySKFApuva1x4XaCIEiXNjJAoGBAN82
|
||||
t3c+HCBEv3o05rMYcrmLC1T3Rh6oQlPtwbVmByvfywsFEVCgrc/16MPD3VWhXuXV
|
||||
GRmPuE8THxLbead30M5xhvShq+xzXgRbj5s8Lc9ZIHbW5OLoOS1vCtgtaQcoJOyi
|
||||
Rs4pCVqe+QpktnO6lEZ2Libys+maTQEiwNibBxu9AoGAUG1V5aKMoXa7pmGeuFR6
|
||||
ES+1NDiCt6yDq9BsLZ+e2uqvWTkvTGLLwvH6xf9a0pnnILd0AUTKAAaoUdZS6++E
|
||||
cGob7fxMwEE+UETp0QBgLtfjtExMOFwr2avw8PV4CYEUkPUAm2OFB2Twh+d/PNfr
|
||||
FAxF1rN47SBPNbFI8N4TFsg=
|
||||
-----END PRIVATE KEY-----
|
||||
1
@vates/node-vsphere-soap/.npmignore
Symbolic link
1
@vates/node-vsphere-soap/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
22
@vates/node-vsphere-soap/LICENSE
Normal file
22
@vates/node-vsphere-soap/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 reedog117
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
127
@vates/node-vsphere-soap/README.md
Normal file
127
@vates/node-vsphere-soap/README.md
Normal file
@@ -0,0 +1,127 @@
|
||||
forked from https://github.com/reedog117/node-vsphere-soap
|
||||
|
||||
# node-vsphere-soap
|
||||
|
||||
[](https://gitter.im/reedog117/node-vsphere-soap?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
This is a Node.js module to connect to VMware vCenter servers and/or ESXi hosts and perform operations using the [vSphere Web Services API]. If you're feeling really adventurous, you can use this module to port vSphere operations from other languages (such as the Perl, Python, and Go libraries that exist) and have fully native Node.js code controlling your VMware virtual infrastructure!
|
||||
|
||||
This is very much in alpha.
|
||||
|
||||
## Authors
|
||||
|
||||
- Patrick C - [@reedog117]
|
||||
|
||||
## Version
|
||||
|
||||
0.0.2-5
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
$ npm install node-vsphere-soap --save
|
||||
```
|
||||
|
||||
## Sample Code
|
||||
|
||||
### To connect to a vCenter server:
|
||||
|
||||
var nvs = require('node-vsphere-soap');
|
||||
var vc = new nvs.Client(host, user, password, sslVerify);
|
||||
vc.once('ready', function() {
|
||||
// perform work here
|
||||
});
|
||||
vc.once('error', function(err) {
|
||||
// handle error here
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- host = hostname or IP of vCenter/ESX/ESXi server
|
||||
- user = username
|
||||
- password = password
|
||||
- sslVerify = true|false - set to false if you have self-signed/unverified certificates
|
||||
|
||||
#### Events
|
||||
|
||||
- ready = emits when session authenticated with server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
#### Client instance variables
|
||||
|
||||
- serviceContent - ServiceContent object retrieved by RetrieveServiceContent API call
|
||||
- userName - username of authenticated user
|
||||
- fullName - full name of authenticated user
|
||||
|
||||
### To run a command:
|
||||
|
||||
var vcCmd = vc.runCommand( commandToRun, arguments );
|
||||
vcCmd.once('result', function( result, raw, soapHeader) {
|
||||
// handle results
|
||||
});
|
||||
vcCmd.once('error', function( err) {
|
||||
// handle errors
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- commandToRun = Method from the vSphere API
|
||||
- arguments = JSON document containing arguments to send
|
||||
|
||||
#### Events
|
||||
|
||||
- result = emits when session authenticated with server
|
||||
- _result_ contains the JSON-formatted result from the server
|
||||
- _raw_ contains the raw SOAP XML response from the server
|
||||
- _soapHeader_ contains any soapHeaders from the server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
Make sure you check out tests/vsphere-soap.test.js for examples on how to create commands to run
|
||||
|
||||
## Development
|
||||
|
||||
node-vsphere-soap uses a number of open source projects to work properly:
|
||||
|
||||
- [node.js] - evented I/O for the backend
|
||||
- [node-soap] - SOAP client for Node.js
|
||||
- [soap-cookie] - cookie authentication for the node-soap module
|
||||
- [lodash] - for quickly manipulating JSON
|
||||
- [lab] - testing engine
|
||||
- [code] - assertion engine used with lab
|
||||
|
||||
Want to contribute? Great!
|
||||
|
||||
### Todo's
|
||||
|
||||
- Write More Tests
|
||||
- Create Travis CI test harness with a fake vCenter Instance
|
||||
- Add Code Comments
|
||||
|
||||
### Testing
|
||||
|
||||
I have been testing on a Mac with node v0.10.36 and both ESXi and vCenter 5.5.
|
||||
|
||||
To edit tests, edit the file **test/vsphere-soap.test.js**
|
||||
|
||||
To point the module at your own vCenter/ESXi host, edit **config-test.stub.js** and save it as **config-test.js**
|
||||
|
||||
To run test scripts:
|
||||
|
||||
```sh
|
||||
$ npm test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
[vSphere Web Services API]: http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/right-pane.html
|
||||
[node-soap]: https://github.com/vpulim/node-soap
|
||||
[node.js]: http://nodejs.org/
|
||||
[soap-cookie]: https://github.com/shanestillwell/soap-cookie
|
||||
[code]: https://github.com/hapijs/code
|
||||
[lab]: https://github.com/hapijs/lab
|
||||
[lodash]: https://lodash.com/
|
||||
[@reedog117]: http://www.twitter.com/reedog117
|
||||
230
@vates/node-vsphere-soap/lib/client.mjs
Normal file
230
@vates/node-vsphere-soap/lib/client.mjs
Normal file
@@ -0,0 +1,230 @@
|
||||
/*
|
||||
|
||||
node-vsphere-soap
|
||||
|
||||
client.js
|
||||
|
||||
This file creates the Client class
|
||||
|
||||
- when the class is instantiated, a connection will be made to the ESXi/vCenter server to verify that the creds are good
|
||||
- upon a bad login, the connnection will be terminated
|
||||
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
import axios from 'axios'
|
||||
import https from 'node:https'
|
||||
import util from 'util'
|
||||
import soap from 'soap'
|
||||
import Cookie from 'soap-cookie' // required for session persistence
|
||||
|
||||
// Client class
|
||||
// inherits from EventEmitter
|
||||
// possible events: connect, error, ready
|
||||
|
||||
export function Client(vCenterHostname, username, password, sslVerify) {
|
||||
this.status = 'disconnected'
|
||||
this.reconnectCount = 0
|
||||
|
||||
sslVerify = typeof sslVerify !== 'undefined' ? sslVerify : false
|
||||
|
||||
EventEmitter.call(this)
|
||||
|
||||
// sslVerify argument handling
|
||||
if (sslVerify) {
|
||||
this.clientopts = {}
|
||||
} else {
|
||||
this.clientopts = {
|
||||
request: axios.create({
|
||||
httpsAgent: new https.Agent({
|
||||
rejectUnauthorized: false,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
this.connectionInfo = {
|
||||
host: vCenterHostname,
|
||||
user: username,
|
||||
password,
|
||||
sslVerify,
|
||||
}
|
||||
|
||||
this._loginArgs = {
|
||||
userName: this.connectionInfo.user,
|
||||
password: this.connectionInfo.password,
|
||||
}
|
||||
|
||||
this._vcUrl = 'https://' + this.connectionInfo.host + '/sdk/vimService.wsdl'
|
||||
|
||||
// connect to the vCenter / ESXi host
|
||||
this.on('connect', this._connect)
|
||||
this.emit('connect')
|
||||
|
||||
// close session
|
||||
this.on('close', this._close)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
util.inherits(Client, EventEmitter)
|
||||
|
||||
Client.prototype.runCommand = function (command, args) {
|
||||
const self = this
|
||||
let cmdargs
|
||||
if (!args || args === null) {
|
||||
cmdargs = {}
|
||||
} else {
|
||||
cmdargs = args
|
||||
}
|
||||
|
||||
const emitter = new EventEmitter()
|
||||
|
||||
// check if client has successfully connected
|
||||
if (self.status === 'ready' || self.status === 'connecting') {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
} else {
|
||||
// if connection not ready or connecting, reconnect to instance
|
||||
if (self.status === 'disconnected') {
|
||||
self.emit('connect')
|
||||
}
|
||||
self.once('ready', function () {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return emitter
|
||||
}
|
||||
|
||||
Client.prototype.close = function () {
|
||||
const self = this
|
||||
|
||||
self.emit('close')
|
||||
}
|
||||
|
||||
Client.prototype._connect = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status !== 'disconnected') {
|
||||
return
|
||||
}
|
||||
|
||||
self.status = 'connecting'
|
||||
|
||||
soap.createClient(
|
||||
self._vcUrl,
|
||||
self.clientopts,
|
||||
function (err, client) {
|
||||
if (err) {
|
||||
self.emit('error', err)
|
||||
throw err
|
||||
}
|
||||
|
||||
self.client = client // save client for later use
|
||||
|
||||
self
|
||||
.runCommand('RetrieveServiceContent', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
if (!result.returnval) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', raw)
|
||||
return
|
||||
}
|
||||
|
||||
self.serviceContent = result.returnval
|
||||
self.sessionManager = result.returnval.sessionManager
|
||||
const loginArgs = { _this: self.sessionManager, ...self._loginArgs }
|
||||
|
||||
self
|
||||
.runCommand('Login', loginArgs)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
self.authCookie = new Cookie(client.lastResponseHeaders)
|
||||
self.client.setSecurity(self.authCookie) // needed since vSphere SOAP WS uses cookies
|
||||
|
||||
self.userName = result.returnval.userName
|
||||
self.fullName = result.returnval.fullName
|
||||
self.reconnectCount = 0
|
||||
|
||||
self.status = 'ready'
|
||||
self.emit('ready')
|
||||
process.once('beforeExit', self._close)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
},
|
||||
self._vcUrl
|
||||
)
|
||||
}
|
||||
|
||||
Client.prototype._close = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status === 'ready') {
|
||||
self
|
||||
.runCommand('Logout', { _this: self.sessionManager })
|
||||
.once('result', function () {
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
.once('error', function () {
|
||||
/* don't care of error during disconnection */
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
} else {
|
||||
self.status = 'disconnected'
|
||||
}
|
||||
}
|
||||
|
||||
function _soapErrorHandler(self, emitter, command, args, err) {
|
||||
err = err || { body: 'general error' }
|
||||
|
||||
if (err.body.match(/session is not authenticated/)) {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
|
||||
if (self.reconnectCount < 10) {
|
||||
self.reconnectCount += 1
|
||||
self
|
||||
.runCommand(command, args)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
})
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
// end
|
||||
38
@vates/node-vsphere-soap/package.json
Normal file
38
@vates/node-vsphere-soap/package.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"name": "@vates/node-vsphere-soap",
|
||||
"version": "2.0.0",
|
||||
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
|
||||
"main": "lib/client.mjs",
|
||||
"author": "reedog117",
|
||||
"repository": {
|
||||
"directory": "@vates/node-vsphere-soap",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"axios": "^1.4.0",
|
||||
"soap": "^1.0.0",
|
||||
"soap-cookie": "^0.10.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"keywords": [
|
||||
"vsphere",
|
||||
"vcenter",
|
||||
"api",
|
||||
"soap",
|
||||
"wsdl"
|
||||
],
|
||||
"preferGlobal": false,
|
||||
"license": "MIT",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
11
@vates/node-vsphere-soap/test/config-test.stub.mjs
Normal file
11
@vates/node-vsphere-soap/test/config-test.stub.mjs
Normal file
@@ -0,0 +1,11 @@
|
||||
// place your own credentials here for a vCenter or ESXi server
|
||||
// this information will be used for connecting to a vCenter instance
|
||||
// for module testing
|
||||
// name the file config-test.js
|
||||
|
||||
export const vCenterTestCreds = {
|
||||
vCenterIP: 'vcsa',
|
||||
vCenterUser: 'vcuser',
|
||||
vCenterPassword: 'vcpw',
|
||||
vCenter: true,
|
||||
}
|
||||
138
@vates/node-vsphere-soap/test/vsphere-soap.test.mjs
Normal file
138
@vates/node-vsphere-soap/test/vsphere-soap.test.mjs
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
vsphere-soap.test.js
|
||||
|
||||
tests for the vCenterConnectionInstance class
|
||||
*/
|
||||
|
||||
import assert from 'assert'
|
||||
import { describe, it } from 'test'
|
||||
|
||||
import * as vc from '../lib/client.mjs'
|
||||
|
||||
// eslint-disable-next-line n/no-missing-import
|
||||
import { vCenterTestCreds as TestCreds } from '../config-test.mjs'
|
||||
|
||||
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)
|
||||
|
||||
describe('Client object initialization:', function () {
|
||||
it('provides a successful login', { timeout: 5000 }, function (t, done) {
|
||||
VItest.once('ready', function () {
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
}).once('error', function (err) {
|
||||
console.error(err)
|
||||
// this should fail if there's a problem
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Client reconnection test:', function () {
|
||||
it('can successfully reconnect', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('Logout', { _this: VItest.serviceContent.sessionManager })
|
||||
.once('result', function (result) {
|
||||
// now we're logged out, so let's try running a command to test automatic re-login
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// these tests don't work yet
|
||||
describe('Client tests - query commands:', function () {
|
||||
it('retrieves current time', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('retrieves current time 2 (check for event clobbering)', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('can obtain the names of all Virtual Machines in the inventory', { timeout: 20000 }, function (t, done) {
|
||||
// get property collector
|
||||
const propertyCollector = VItest.serviceContent.propertyCollector
|
||||
// get view manager
|
||||
const viewManager = VItest.serviceContent.viewManager
|
||||
// get root folder
|
||||
const rootFolder = VItest.serviceContent.rootFolder
|
||||
|
||||
let containerView, objectSpec, traversalSpec, propertySpec, propertyFilterSpec
|
||||
// this is the equivalent to
|
||||
VItest.runCommand('CreateContainerView', {
|
||||
_this: viewManager,
|
||||
container: rootFolder,
|
||||
type: ['VirtualMachine'],
|
||||
recursive: true,
|
||||
}).once('result', function (result) {
|
||||
// build all the data structures needed to query all the vm names
|
||||
containerView = result.returnval
|
||||
|
||||
objectSpec = {
|
||||
attributes: { 'xsi:type': 'ObjectSpec' }, // setting attributes xsi:type is important or else the server may mis-recognize types!
|
||||
obj: containerView,
|
||||
skip: true,
|
||||
}
|
||||
|
||||
traversalSpec = {
|
||||
attributes: { 'xsi:type': 'TraversalSpec' },
|
||||
name: 'traverseEntities',
|
||||
type: 'ContainerView',
|
||||
path: 'view',
|
||||
skip: false,
|
||||
}
|
||||
|
||||
objectSpec = { ...objectSpec, selectSet: [traversalSpec] }
|
||||
|
||||
propertySpec = {
|
||||
attributes: { 'xsi:type': 'PropertySpec' },
|
||||
type: 'VirtualMachine',
|
||||
pathSet: ['name'],
|
||||
}
|
||||
|
||||
propertyFilterSpec = {
|
||||
attributes: { 'xsi:type': 'PropertyFilterSpec' },
|
||||
propSet: [propertySpec],
|
||||
objectSet: [objectSpec],
|
||||
}
|
||||
// TODO: research why it fails if propSet is declared after objectSet
|
||||
|
||||
VItest.runCommand('RetrievePropertiesEx', {
|
||||
_this: propertyCollector,
|
||||
specSet: [propertyFilterSpec],
|
||||
options: { attributes: { type: 'RetrieveOptions' } },
|
||||
})
|
||||
.once('result', function (result, raw) {
|
||||
assert.notEqual(result.returnval.objects, null)
|
||||
if (Array.isArray(result.returnval.objects)) {
|
||||
assert.strictEqual(result.returnval.objects[0].obj.attributes.type, 'VirtualMachine')
|
||||
} else {
|
||||
assert.strictEqual(result.returnval.objects.obj.attributes.type, 'VirtualMachine')
|
||||
}
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error('\n\nlast request : ' + VItest.client.lastRequest, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,5 +1,5 @@
|
||||
import { strict as assert } from 'node:assert'
|
||||
import { describe, it } from 'tap/mocha'
|
||||
import test from 'test'
|
||||
|
||||
import {
|
||||
generateHotp,
|
||||
@@ -11,6 +11,8 @@ import {
|
||||
verifyTotp,
|
||||
} from './index.mjs'
|
||||
|
||||
const { describe, it } = test
|
||||
|
||||
describe('generateSecret', function () {
|
||||
it('generates a string of 32 chars', async function () {
|
||||
const secret = generateSecret()
|
||||
@@ -31,9 +31,9 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap"
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0"
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert/strict')
|
||||
const { describe, it } = require('tap').mocha
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { every, not, some } = require('./')
|
||||
|
||||
@@ -32,9 +32,9 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap"
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.0.1"
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const isUtf8 = require('isutf8')
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
@@ -21,41 +22,41 @@ const readChunk = (stream, size) =>
|
||||
stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: stream.closed || stream.readableEnded
|
||||
? Promise.resolve(null)
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
? Promise.resolve(null)
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read(size)
|
||||
if (data !== null) {
|
||||
resolve(data)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
/**
|
||||
@@ -81,6 +82,13 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
|
||||
// Buffer.isUtf8 is too recent for now
|
||||
// @todo : replace external package by Buffer.isUtf8 when the supported version of node reach 18
|
||||
|
||||
if (chunk.length < 1024 && isUtf8(chunk)) {
|
||||
error.text = chunk.toString('utf8')
|
||||
}
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
@@ -103,42 +111,42 @@ async function skip(stream, size) {
|
||||
return stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: size === 0 || stream.closed || stream.readableEnded
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
exports.skip = skip
|
||||
|
||||
|
||||
@@ -102,12 +102,37 @@ describe('readChunkStrict', function () {
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
it('throws if stream ends with not enough data, utf8', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.strictEqual(error.text, 'foobar')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, non utf8 ', async () => {
|
||||
const source = [Buffer.alloc(10, 128), Buffer.alloc(10, 128)]
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(source), 30))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 20, expected: 30)')
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, Buffer.concat(source))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, utf8 , long data', async () => {
|
||||
const source = Buffer.from('a'.repeat(1500))
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([source]), 2000))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, `stream has ended with not enough data (actual: 1500, expected: 2000)`)
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, source)
|
||||
})
|
||||
|
||||
it('succeed', async () => {
|
||||
const source = Buffer.from('a'.repeat(20))
|
||||
const chunk = await readChunkStrict(makeStream([source]), 10)
|
||||
assert.deepEqual(source.subarray(10), chunk)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip', function () {
|
||||
@@ -134,6 +159,16 @@ describe('skip', function () {
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||
})
|
||||
|
||||
it('put back if it read too much', async () => {
|
||||
let source = makeStream(['foo', 'bar'])
|
||||
await skip(source, 1) // read part of data chunk
|
||||
const chunk = (await readChunkStrict(source, 2)).toString('utf-8')
|
||||
assert.strictEqual(chunk, 'oo')
|
||||
|
||||
source = makeStream(['foo', 'bar'])
|
||||
assert.strictEqual(await skip(source, 3), 3) // read aligned with data chunk
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict', function () {
|
||||
@@ -144,4 +179,9 @@ describe('skipStrict', function () {
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
it('succeed', async () => {
|
||||
const source = makeStream(['foo', 'bar', 'baz'])
|
||||
const res = await skipStrict(source, 4)
|
||||
assert.strictEqual(res, undefined)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.1.1",
|
||||
"version": "1.2.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -33,5 +33,8 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"isutf8": "^4.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
"node": ">=12.3"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
@@ -16,13 +14,15 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -36,7 +36,6 @@ task.id
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
@@ -89,6 +88,30 @@ const onProgress = makeOnProgress({
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
@@ -100,7 +123,7 @@ const onProgress = makeOnProgress({
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
@@ -18,10 +18,8 @@ npm install --save @vates/task
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// data in this object will be sent along the *start* event
|
||||
//
|
||||
// property names should be chosen as not to clash with properties used by `Task` or `combineEvents`
|
||||
data: {
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
@@ -32,13 +30,15 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -52,7 +52,6 @@ task.id
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
// - aborted
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
@@ -105,6 +104,30 @@ const onProgress = makeOnProgress({
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
@@ -116,7 +139,7 @@ const onProgress = makeOnProgress({
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
Task.run({ properties: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
@@ -4,36 +4,18 @@ const assert = require('node:assert').strict
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function omit(source, keys, target = { __proto__: null }) {
|
||||
for (const key of Object.keys(source)) {
|
||||
if (!keys.has(key)) {
|
||||
target[key] = source[key]
|
||||
}
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
const IGNORED_START_PROPS = new Set([
|
||||
'end',
|
||||
'infos',
|
||||
'properties',
|
||||
'result',
|
||||
'status',
|
||||
'tasks',
|
||||
'timestamp',
|
||||
'type',
|
||||
'warnings',
|
||||
])
|
||||
|
||||
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
|
||||
const taskLogs = new Map()
|
||||
return function onProgress(event) {
|
||||
const { id, type } = event
|
||||
let taskLog
|
||||
if (type === 'start') {
|
||||
taskLog = omit(event, IGNORED_START_PROPS)
|
||||
taskLog.start = event.timestamp
|
||||
taskLog.status = 'pending'
|
||||
taskLog = {
|
||||
id,
|
||||
properties: { __proto__: null, ...event.properties },
|
||||
start: event.timestamp,
|
||||
status: 'pending',
|
||||
}
|
||||
taskLogs.set(id, taskLog)
|
||||
|
||||
const { parentId } = event
|
||||
@@ -65,6 +47,8 @@ exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noo
|
||||
taskLog.end = event.timestamp
|
||||
taskLog.result = event.result
|
||||
taskLog.status = event.status
|
||||
} else if (type === 'abortionRequested') {
|
||||
taskLog.abortionRequestedAt = event.timestamp
|
||||
}
|
||||
|
||||
if (type === 'end' && taskLog.$root === taskLog) {
|
||||
|
||||
@@ -11,7 +11,7 @@ describe('makeOnProgress()', function () {
|
||||
const events = []
|
||||
let log
|
||||
const task = new Task({
|
||||
data: { name: 'task' },
|
||||
properties: { name: 'task' },
|
||||
onProgress: makeOnProgress({
|
||||
onRootTaskStart(log_) {
|
||||
assert.equal(log, undefined)
|
||||
@@ -32,36 +32,50 @@ describe('makeOnProgress()', function () {
|
||||
|
||||
assert.equal(events.length, 0)
|
||||
|
||||
let i = 0
|
||||
|
||||
await task.run(async () => {
|
||||
assert.equal(events[0], 'onRootTaskStart')
|
||||
assert.equal(events[1], 'onTaskUpdate')
|
||||
assert.equal(log.name, 'task')
|
||||
assert.equal(events[i++], 'onRootTaskStart')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.id, task.id)
|
||||
assert.equal(log.properties.name, 'task')
|
||||
assert(Math.abs(log.start - Date.now()) < 10)
|
||||
|
||||
Task.set('name', 'new name')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.name, 'new name')
|
||||
|
||||
Task.set('progress', 0)
|
||||
assert.equal(events[2], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 0)
|
||||
|
||||
Task.info('foo', {})
|
||||
assert.equal(events[3], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.infos, [{ data: {}, message: 'foo' }])
|
||||
|
||||
await Task.run({ data: { name: 'subtask' } }, () => {
|
||||
assert.equal(events[4], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].name, 'subtask')
|
||||
const subtask = new Task({ properties: { name: 'subtask' } })
|
||||
await subtask.run(() => {
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].properties.name, 'subtask')
|
||||
|
||||
Task.warning('bar', {})
|
||||
assert.equal(events[5], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.tasks[0].warnings, [{ data: {}, message: 'bar' }])
|
||||
|
||||
subtask.abort()
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.tasks[0].abortionRequestedAt - Date.now()) < 10)
|
||||
})
|
||||
assert.equal(events[6], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].status, 'success')
|
||||
|
||||
Task.set('progress', 100)
|
||||
assert.equal(events[7], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 100)
|
||||
})
|
||||
assert.equal(events[8], 'onRootTaskEnd')
|
||||
assert.equal(events[9], 'onTaskUpdate')
|
||||
assert.equal(events[i++], 'onRootTaskEnd')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.end - Date.now()) < 10)
|
||||
assert.equal(log.status, 'success')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -10,11 +10,10 @@ function define(object, property, value) {
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const ABORTED = 'aborted'
|
||||
const FAILURE = 'failure'
|
||||
const PENDING = 'pending'
|
||||
const SUCCESS = 'success'
|
||||
exports.STATUS = { ABORTED, FAILURE, PENDING, SUCCESS }
|
||||
exports.STATUS = { FAILURE, PENDING, SUCCESS }
|
||||
|
||||
// stored in the global context so that various versions of the library can interact.
|
||||
const asyncStorageKey = '@vates/task@0'
|
||||
@@ -83,8 +82,8 @@ exports.Task = class Task {
|
||||
return this.#status
|
||||
}
|
||||
|
||||
constructor({ data = {}, onProgress } = {}) {
|
||||
this.#startData = data
|
||||
constructor({ properties, onProgress } = {}) {
|
||||
this.#startData = { properties }
|
||||
|
||||
if (onProgress !== undefined) {
|
||||
this.#onProgress = onProgress
|
||||
@@ -105,12 +104,16 @@ exports.Task = class Task {
|
||||
|
||||
const { signal } = this.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.status === PENDING && !this.#running) {
|
||||
if (this.status === PENDING) {
|
||||
this.#maybeStart()
|
||||
|
||||
const status = ABORTED
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
this.#emit('abortionRequested', { reason: signal.reason })
|
||||
|
||||
if (!this.#running) {
|
||||
const status = FAILURE
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -156,9 +159,7 @@ exports.Task = class Task {
|
||||
this.#running = false
|
||||
return result
|
||||
} catch (result) {
|
||||
const { signal } = this.#abortController
|
||||
const aborted = signal.aborted && result === signal.reason
|
||||
const status = aborted ? ABORTED : FAILURE
|
||||
const status = FAILURE
|
||||
|
||||
this.#status = status
|
||||
this.#emit('end', { status, result })
|
||||
|
||||
@@ -15,7 +15,7 @@ function assertEvent(task, expected, eventIndex = -1) {
|
||||
assert.equal(typeof actual.id, 'string')
|
||||
assert.equal(typeof actual.timestamp, 'number')
|
||||
for (const keys of Object.keys(expected)) {
|
||||
assert.equal(actual[keys], expected[keys])
|
||||
assert.deepEqual(actual[keys], expected[keys])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,10 +30,10 @@ function createTask(opts) {
|
||||
describe('Task', function () {
|
||||
describe('contructor', function () {
|
||||
it('data properties are passed to the start event', async function () {
|
||||
const data = { foo: 0, bar: 1 }
|
||||
const task = createTask({ data })
|
||||
const properties = { foo: 0, bar: 1 }
|
||||
const task = createTask({ properties })
|
||||
await task.run(noop)
|
||||
assertEvent(task, { ...data, type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'start', properties }, 0)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -79,20 +79,22 @@ describe('Task', function () {
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task fails without the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = new Error()
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort({})
|
||||
task.abort(reason)
|
||||
|
||||
throw result
|
||||
})
|
||||
@@ -100,18 +102,20 @@ describe('Task', function () {
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task succeed', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort({})
|
||||
task.abort(reason)
|
||||
|
||||
return result
|
||||
})
|
||||
@@ -119,9 +123,10 @@ describe('Task', function () {
|
||||
|
||||
assert.equal(task.status, 'success')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
it('aborts before task is running', function () {
|
||||
@@ -130,11 +135,12 @@ describe('Task', function () {
|
||||
|
||||
task.abort(reason)
|
||||
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 2)
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'end', status: 'aborted', result: reason }, 1)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -243,7 +249,7 @@ describe('Task', function () {
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to aborted after run is complete', async function () {
|
||||
it('changes to failure if aborted after run is complete', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
@@ -252,13 +258,13 @@ describe('Task', function () {
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to aborted if aborted when not running', async function () {
|
||||
it('changes to failure if aborted when not running', function () {
|
||||
const task = createTask()
|
||||
task.abort()
|
||||
assert.equal(task.status, 'aborted')
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.2",
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^15.0.1",
|
||||
"sinon": "^17.0.1",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert/strict')
|
||||
const { afterEach, describe, it } = require('tap').mocha
|
||||
const { afterEach, describe, it } = require('test')
|
||||
|
||||
const { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } = require('.')
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap --lines 67 --functions 92 --branches 52 --statements 67"
|
||||
"test": "node--test"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/decorate-with": "^2.1.0",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"object-hash": "^2.0.1"
|
||||
@@ -28,6 +28,6 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.0.1"
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.mjs'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import getopts from 'getopts'
|
||||
import { basename, dirname } from 'path'
|
||||
|
||||
@@ -7,9 +7,9 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.36.1",
|
||||
"@xen-orchestra/fs": "^3.3.4",
|
||||
"filenamify": "^4.1.0",
|
||||
"@xen-orchestra/backups": "^0.44.6",
|
||||
"@xen-orchestra/fs": "^4.1.4",
|
||||
"filenamify": "^6.0.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox": "^0.21.0"
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "1.0.6",
|
||||
"version": "1.0.14",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,307 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const pTimeout = require('promise-toolbox/timeout')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
const DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
const DEFAULT_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
}
|
||||
}
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
|
||||
const { type } = job
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
if (type === 'backup') {
|
||||
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
this.run = this._runVmBackup
|
||||
} else if (type === 'metadataBackup') {
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
this.run = this._runMetadataBackup
|
||||
} else {
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
|
||||
const { getRemoteTimeout } = this._settings
|
||||
this._getAdapter = async function (remoteId) {
|
||||
try {
|
||||
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
|
||||
|
||||
return new Disposable(() => disposable.dispose(), {
|
||||
adapter: disposable.value,
|
||||
remoteId,
|
||||
})
|
||||
} catch (error) {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id: remoteId },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _runVmBackup() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
return this._getRecord('VM', vmUuid).then(
|
||||
disposableVm =>
|
||||
Disposable.use(disposableVm, vm => {
|
||||
taskStart.data.name_label = vm.name_label
|
||||
return runTask(taskStart, () =>
|
||||
new VmBackup({
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
}),
|
||||
error =>
|
||||
runTask(taskStart, () => {
|
||||
throw error
|
||||
})
|
||||
)
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
17
@xen-orchestra/backups/Backup.mjs
Normal file
17
@xen-orchestra/backups/Backup.mjs
Normal file
@@ -0,0 +1,17 @@
|
||||
import { Metadata } from './_runners/Metadata.mjs'
|
||||
import { VmsRemote } from './_runners/VmsRemote.mjs'
|
||||
import { VmsXapi } from './_runners/VmsXapi.mjs'
|
||||
|
||||
export function createRunner(opts) {
|
||||
const { type } = opts.job
|
||||
switch (type) {
|
||||
case 'backup':
|
||||
return new VmsXapi(opts)
|
||||
case 'mirrorBackup':
|
||||
return new VmsRemote(opts)
|
||||
case 'metadataBackup':
|
||||
return new Metadata(opts)
|
||||
default:
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
export class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
import { Task } from './Task.mjs'
|
||||
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
export class HealthCheckVmBackup {
|
||||
#restoredVm
|
||||
#timeout
|
||||
#xapi
|
||||
@@ -1,73 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importDeltaVm } = require('./_deltaVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importDeltaVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const adapter = this._adapter
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
const sizeContainer = { size: 0 }
|
||||
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
watchStreamSize(backup, sizeContainer)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(this._importDeltaVmSettings.mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
backup = await adapter.readDeltaVmBackup(metadata, ignoredVdis)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
{
|
||||
name: 'transfer',
|
||||
},
|
||||
async () => {
|
||||
const xapi = this._xapi
|
||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importDeltaVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||
xapi.call(
|
||||
'VM.set_name_label',
|
||||
vmRef,
|
||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||
),
|
||||
])
|
||||
|
||||
return {
|
||||
size: sizeContainer.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
280
@xen-orchestra/backups/ImportVmBackup.mjs
Normal file
280
@xen-orchestra/backups/ImportVmBackup.mjs
Normal file
@@ -0,0 +1,280 @@
|
||||
import assert from 'node:assert'
|
||||
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { importIncrementalVm } from './_incrementalVm.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
import { VhdNegative, VhdSynthetic } from 'vhd-lib'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { dirname, join } from 'node:path'
|
||||
import pickBy from 'lodash/pickBy.js'
|
||||
import { defer } from 'golike-defer'
|
||||
|
||||
const { debug, info, warn } = createLogger('xo:backups:importVmBackup')
|
||||
async function resolveUuid(xapi, cache, uuid, type) {
|
||||
if (uuid == null) {
|
||||
return uuid
|
||||
}
|
||||
const ref = cache.get(uuid)
|
||||
if (ref === undefined) {
|
||||
cache.set(uuid, xapi.call(`${type}.get_by_uuid`, uuid))
|
||||
}
|
||||
return cache.get(uuid)
|
||||
}
|
||||
export class ImportVmBackup {
|
||||
constructor({
|
||||
adapter,
|
||||
metadata,
|
||||
srUuid,
|
||||
xapi,
|
||||
settings: { additionnalVmTag, newMacAddresses, mapVdisSrs = {}, useDifferentialRestore = false } = {},
|
||||
}) {
|
||||
this._adapter = adapter
|
||||
this._importIncrementalVmSettings = { additionnalVmTag, newMacAddresses, mapVdisSrs, useDifferentialRestore }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async #getPathOfVdiSnapshot(snapshotUuid) {
|
||||
const metadata = this._metadata
|
||||
if (this._pathToVdis === undefined) {
|
||||
const backups = await this._adapter.listVmBackups(
|
||||
this._metadata.vm.uuid,
|
||||
({ mode, timestamp }) => mode === 'delta' && timestamp >= metadata.timestamp
|
||||
)
|
||||
const map = new Map()
|
||||
for (const backup of backups) {
|
||||
for (const [vdiRef, vdi] of Object.entries(backup.vdis)) {
|
||||
map.set(vdi.uuid, backup.vhds[vdiRef])
|
||||
}
|
||||
}
|
||||
this._pathToVdis = map
|
||||
}
|
||||
return this._pathToVdis.get(snapshotUuid)
|
||||
}
|
||||
|
||||
async _reuseNearestSnapshot($defer, ignoredVdis) {
|
||||
const metadata = this._metadata
|
||||
const { mapVdisSrs } = this._importIncrementalVmSettings
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const streams = {}
|
||||
const metdataDir = dirname(metadata._filename)
|
||||
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||
|
||||
for (const [vdiRef, vdi] of Object.entries(vdis)) {
|
||||
const vhdPath = join(metdataDir, vhds[vdiRef])
|
||||
|
||||
let xapiDisk
|
||||
try {
|
||||
xapiDisk = await this._xapi.getRecordByUuid('VDI', vdi.$snapshot_of$uuid)
|
||||
} catch (err) {
|
||||
// if this disk is not present anymore, fall back to default restore
|
||||
warn(err)
|
||||
}
|
||||
|
||||
let snapshotCandidate, backupCandidate
|
||||
if (xapiDisk !== undefined) {
|
||||
debug('found disks, wlll search its snapshots', { snapshots: xapiDisk.snapshots })
|
||||
for (const snapshotRef of xapiDisk.snapshots) {
|
||||
const snapshot = await this._xapi.getRecord('VDI', snapshotRef)
|
||||
debug('handling snapshot', { snapshot })
|
||||
|
||||
// take only the first snapshot
|
||||
if (snapshotCandidate && snapshotCandidate.snapshot_time < snapshot.snapshot_time) {
|
||||
debug('already got a better candidate')
|
||||
continue
|
||||
}
|
||||
|
||||
// have a corresponding backup more recent than metadata ?
|
||||
const pathToSnapshotData = await this.#getPathOfVdiSnapshot(snapshot.uuid)
|
||||
if (pathToSnapshotData === undefined) {
|
||||
debug('no backup linked to this snaphot')
|
||||
continue
|
||||
}
|
||||
if (snapshot.$SR.uuid !== (mapVdisSrs[vdi.$snapshot_of$uuid] ?? this._srUuid)) {
|
||||
debug('not restored on the same SR', { snapshotSr: snapshot.$SR.uuid, mapVdisSrs, srUuid: this._srUuid })
|
||||
continue
|
||||
}
|
||||
|
||||
debug('got a candidate', pathToSnapshotData)
|
||||
|
||||
snapshotCandidate = snapshot
|
||||
backupCandidate = pathToSnapshotData
|
||||
}
|
||||
}
|
||||
|
||||
let stream
|
||||
const backupWithSnapshotPath = join(metdataDir, backupCandidate ?? '')
|
||||
if (vhdPath === backupWithSnapshotPath) {
|
||||
// all the data are already on the host
|
||||
debug('direct reuse of a snapshot')
|
||||
stream = null
|
||||
vdis[vdiRef].baseVdi = snapshotCandidate
|
||||
// go next disk , we won't use this stream
|
||||
continue
|
||||
}
|
||||
|
||||
let disposableDescendants
|
||||
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(this._adapter._handler, vhdPath)
|
||||
|
||||
// this will also clean if another disk of this VM backup fails
|
||||
// if user really only need to restore non failing disks he can retry with ignoredVdis
|
||||
let disposed = false
|
||||
const disposeOnce = async () => {
|
||||
if (!disposed) {
|
||||
disposed = true
|
||||
try {
|
||||
await disposableDescendants?.dispose()
|
||||
await disposableSynthetic?.dispose()
|
||||
} catch (error) {
|
||||
warn('openVhd: failed to dispose VHDs', { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
$defer.onFailure(() => disposeOnce())
|
||||
|
||||
const parentVhd = disposableSynthetic.value
|
||||
await parentVhd.readBlockAllocationTable()
|
||||
debug('got vhd synthetic of parents', parentVhd.length)
|
||||
|
||||
if (snapshotCandidate !== undefined) {
|
||||
try {
|
||||
debug('will try to use differential restore', {
|
||||
backupWithSnapshotPath,
|
||||
vhdPath,
|
||||
vdiRef,
|
||||
})
|
||||
|
||||
disposableDescendants = await VhdSynthetic.fromVhdChain(this._adapter._handler, backupWithSnapshotPath, {
|
||||
until: vhdPath,
|
||||
})
|
||||
const descendantsVhd = disposableDescendants.value
|
||||
await descendantsVhd.readBlockAllocationTable()
|
||||
debug('got vhd synthetic of descendants')
|
||||
const negativeVhd = new VhdNegative(parentVhd, descendantsVhd)
|
||||
debug('got vhd negative')
|
||||
|
||||
// update the stream with the negative vhd stream
|
||||
stream = await negativeVhd.stream()
|
||||
vdis[vdiRef].baseVdi = snapshotCandidate
|
||||
} catch (error) {
|
||||
// can be a broken VHD chain, a vhd chain with a key backup, ....
|
||||
// not an irrecuperable error, don't dispose parentVhd, and fallback to full restore
|
||||
warn(`can't use differential restore`, { error })
|
||||
disposableDescendants?.dispose()
|
||||
}
|
||||
}
|
||||
// didn't make a negative stream : fallback to classic stream
|
||||
if (stream === undefined) {
|
||||
debug('use legacy restore')
|
||||
stream = await parentVhd.stream()
|
||||
}
|
||||
|
||||
stream.on('end', disposeOnce)
|
||||
stream.on('close', disposeOnce)
|
||||
stream.on('error', disposeOnce)
|
||||
info('everything is ready, will transfer', stream.length)
|
||||
streams[`${vdiRef}.vhd`] = stream
|
||||
}
|
||||
return {
|
||||
streams,
|
||||
vbds,
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
|
||||
}
|
||||
}
|
||||
|
||||
async #decorateIncrementalVmMetadata() {
|
||||
const { additionnalVmTag, mapVdisSrs, useDifferentialRestore } = this._importIncrementalVmSettings
|
||||
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
let backup
|
||||
if (useDifferentialRestore) {
|
||||
backup = await this._reuseNearestSnapshot(ignoredVdis)
|
||||
} else {
|
||||
backup = await this._adapter.readIncrementalVmBackup(this._metadata, ignoredVdis)
|
||||
}
|
||||
const xapi = this._xapi
|
||||
|
||||
const cache = new Map()
|
||||
const mapVdisSrRefs = {}
|
||||
if (additionnalVmTag !== undefined) {
|
||||
backup.vm.tags.push(additionnalVmTag)
|
||||
}
|
||||
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
|
||||
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
|
||||
}
|
||||
const srRef = await resolveUuid(xapi, cache, this._srUuid, 'SR')
|
||||
Object.values(backup.vdis).forEach(vdi => {
|
||||
vdi.SR = mapVdisSrRefs[vdi.uuid] ?? srRef
|
||||
})
|
||||
return backup
|
||||
}
|
||||
|
||||
async run() {
|
||||
const adapter = this._adapter
|
||||
const metadata = this._metadata
|
||||
const isFull = metadata.mode === 'full'
|
||||
|
||||
const sizeContainer = { size: 0 }
|
||||
const { newMacAddresses } = this._importIncrementalVmSettings
|
||||
let backup
|
||||
if (isFull) {
|
||||
backup = await adapter.readFullVmBackup(metadata)
|
||||
watchStreamSize(backup, sizeContainer)
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
backup = await this.#decorateIncrementalVmMetadata()
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
return Task.run(
|
||||
{
|
||||
name: 'transfer',
|
||||
},
|
||||
async () => {
|
||||
const xapi = this._xapi
|
||||
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
newMacAddresses,
|
||||
})
|
||||
|
||||
await Promise.all([
|
||||
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
|
||||
xapi.call(
|
||||
'VM.set_name_label',
|
||||
vmRef,
|
||||
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
|
||||
),
|
||||
xapi.call(
|
||||
'VM.set_name_description',
|
||||
vmRef,
|
||||
`Restored on ${formatFilenameDate(+new Date())} from ${adapter._handler._remote.name} -
|
||||
${metadata.vm.name_description}
|
||||
`
|
||||
),
|
||||
])
|
||||
|
||||
return {
|
||||
size: sizeContainer.size,
|
||||
id: await xapi.getField('VM', vmRef, 'uuid'),
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
decorateClass(ImportVmBackup, { _reuseNearestSnapshot: defer })
|
||||
@@ -1,43 +1,41 @@
|
||||
'use strict'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { execFile } from 'child_process'
|
||||
import { mount } from '@vates/fuse-vhd'
|
||||
import { readdir, lstat } from 'node:fs/promises'
|
||||
import { synchronized } from 'decorator-synchronized'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { ZipFile } from 'yazl'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
import pickBy from 'lodash/pickBy.js'
|
||||
import tar from 'tar'
|
||||
import zlib from 'zlib'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const pickBy = require('lodash/pickBy.js')
|
||||
const { dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
import { BACKUP_DIR } from './_getVmBackupDir.mjs'
|
||||
import { cleanVm } from './_cleanVm.mjs'
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { getTmpDir } from './_getTmpDir.mjs'
|
||||
import { isMetadataFile } from './_backupType.mjs'
|
||||
import { isValidXva } from './_isValidXva.mjs'
|
||||
import { listPartitions, LVM_PARTITION_TYPE } from './_listPartitions.mjs'
|
||||
import { lvs, pvs } from './_lvm.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize')
|
||||
// @todo : this import is marked extraneous , sould be fixed when lib is published
|
||||
const { mount } = require('@vates/fuse-vhd')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
const IMMUTABILTY_METADATA_FILENAME = '/immutability.json'
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
@@ -46,20 +44,23 @@ const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
const makeRelative = path => resolve('/', path).slice(1)
|
||||
const resolveSubpath = (root, path) => resolve(root, makeRelative(path))
|
||||
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
async function addZipEntries(zip, realBasePath, virtualBasePath, relativePaths) {
|
||||
for (const relativePath of relativePaths) {
|
||||
const realPath = join(realBasePath, relativePath)
|
||||
const virtualPath = join(virtualBasePath, relativePath)
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
const stats = await lstat(realPath)
|
||||
const { mode, mtime } = stats
|
||||
const opts = { mode, mtime }
|
||||
if (stats.isDirectory()) {
|
||||
zip.addEmptyDirectory(virtualPath, opts)
|
||||
await addZipEntries(zip, realPath, virtualPath, await readdir(realPath))
|
||||
} else if (stats.isFile()) {
|
||||
zip.addFile(realPath, virtualPath, opts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +77,7 @@ const debounceResourceFactory = factory =>
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
class RemoteAdapter {
|
||||
export class RemoteAdapter {
|
||||
constructor(
|
||||
handler,
|
||||
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
||||
@@ -187,17 +188,6 @@ class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// check if we will be allowed to merge a a vhd created in this adapter
|
||||
// with the vhd at path `path`
|
||||
async isMergeableParent(packedParentUid, path) {
|
||||
@@ -214,15 +204,24 @@ class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
fetchPartitionFiles(diskId, partitionId, paths, format) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
let outputStream
|
||||
|
||||
if (format === 'tgz') {
|
||||
outputStream = tar.c({ cwd: path, gzip: true }, paths.map(makeRelative))
|
||||
} else if (format === 'zip') {
|
||||
const zip = new ZipFile()
|
||||
await addZipEntries(zip, path, '', paths.map(makeRelative))
|
||||
zip.end()
|
||||
;({ outputStream } = zip)
|
||||
} else {
|
||||
throw new Error('unsupported format ' + format)
|
||||
}
|
||||
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
@@ -333,7 +332,7 @@ class RemoteAdapter {
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const diskPath = handler.getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
@@ -404,20 +403,27 @@ class RemoteAdapter {
|
||||
return `${baseName}.vhd`
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
async listAllVms() {
|
||||
const handler = this._handler
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
|
||||
const vmsUuids = []
|
||||
await asyncEach(await handler.list(BACKUP_DIR), async entry => {
|
||||
// ignore hidden and lock files
|
||||
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
|
||||
const vmBackups = await this.listVmBackups(entry)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[entry] = vmBackups
|
||||
}
|
||||
vmsUuids.push(entry)
|
||||
}
|
||||
})
|
||||
return vmsUuids
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const vmsUuids = await this.listAllVms()
|
||||
const backups = { __proto__: null }
|
||||
await asyncEach(vmsUuids, async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[vmUuid] = vmBackups
|
||||
}
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
@@ -677,11 +683,13 @@ class RemoteAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
async outputStream(path, input, { checksum = true, maxStreamLength, streamLength, validator = noop } = {}) {
|
||||
const container = watchStreamSize(input)
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
maxStreamLength,
|
||||
streamLength,
|
||||
async validator() {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
@@ -691,8 +699,8 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
// open the hierarchy of ancestors until we find a full one
|
||||
async _createSyntheticStream(handler, path) {
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
|
||||
async _createVhdStream(handler, path, { useChain }) {
|
||||
const disposableSynthetic = useChain ? await VhdSynthetic.fromVhdChain(handler, path) : await openVhd(handler, path)
|
||||
// I don't want the vhds to be disposed on return
|
||||
// but only when the stream is done ( or failed )
|
||||
|
||||
@@ -717,7 +725,7 @@ class RemoteAdapter {
|
||||
return stream
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata, ignoredVdis) {
|
||||
async readIncrementalVmBackup(metadata, ignoredVdis, { useChain = true } = {}) {
|
||||
const handler = this._handler
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
@@ -725,7 +733,7 @@ class RemoteAdapter {
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async ref => {
|
||||
streams[`${ref}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[ref]))
|
||||
streams[`${ref}.vhd`] = await this._createVhdStream(handler, join(dir, vhds[ref]), { useChain })
|
||||
})
|
||||
|
||||
return {
|
||||
@@ -743,10 +751,37 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
async readVmBackupMetadata(path) {
|
||||
let json
|
||||
let isImmutable = false
|
||||
let remoteIsImmutable = false
|
||||
// if the remote is immutable, check if this metadatas are also immutables
|
||||
try {
|
||||
// this file is not encrypted
|
||||
await this._handler._readFile(IMMUTABILTY_METADATA_FILENAME)
|
||||
remoteIsImmutable = true
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// this will trigger an EPERM error if the file is immutable
|
||||
json = await this.handler.readFile(path, { flag: 'r+' })
|
||||
// s3 handler don't respect flags
|
||||
} catch (err) {
|
||||
// retry without triggerring immutbaility check ,only on immutable remote
|
||||
if (err.code === 'EPERM' && remoteIsImmutable) {
|
||||
isImmutable = true
|
||||
json = await this._handler.readFile(path, { flag: 'r' })
|
||||
} else {
|
||||
throw err
|
||||
}
|
||||
}
|
||||
// _filename is a private field used to compute the backup id
|
||||
//
|
||||
// it's enumerable to make it cacheable
|
||||
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
const metadata = { ...JSON.parse(json), _filename: path, isImmutable }
|
||||
|
||||
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
||||
if (typeof metadata.vm.is_a_template === 'number') {
|
||||
@@ -822,11 +857,7 @@ decorateMethodsWith(RemoteAdapter, {
|
||||
debounceResourceFactory,
|
||||
]),
|
||||
|
||||
_usePartitionFiles: Disposable.factory,
|
||||
|
||||
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
|
||||
|
||||
getPartition: Disposable.factory,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
@@ -1,26 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
}
|
||||
}
|
||||
}
|
||||
32
@xen-orchestra/backups/RestoreMetadataBackup.mjs
Normal file
32
@xen-orchestra/backups/RestoreMetadataBackup.mjs
Normal file
@@ -0,0 +1,32 @@
|
||||
import { join, resolve } from 'node:path/posix'
|
||||
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from './RemoteAdapter.mjs'
|
||||
import { PATH_DB_DUMP } from './_runners/_PoolMetadataBackup.mjs'
|
||||
|
||||
export class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
|
||||
const dataFileName = resolve('/', backupId, metadata.data ?? 'data.json').slice(1)
|
||||
const data = await handler.readFile(dataFileName)
|
||||
|
||||
// if data is JSON, sent it as a plain string, otherwise, consider the data as binary and encode it
|
||||
const isJson = dataFileName.endsWith('.json')
|
||||
return isJson ? data.toString() : { encoding: 'base64', data: data.toString('base64') }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const Zone = require('node-zone')
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
import Zone from 'node-zone'
|
||||
|
||||
const logAfterEnd = log => {
|
||||
const error = new Error('task has already ended')
|
||||
@@ -30,7 +28,7 @@ const serializeError = error =>
|
||||
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
class Task {
|
||||
export class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
@@ -151,7 +149,6 @@ class Task {
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
@@ -1,515 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const vhdStreamValidator = require('vhd-lib/vhdStreamValidator.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
const { pipeline } = require('node:stream')
|
||||
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
const { exportDeltaVm } = require('./_deltaVm.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { FullBackupWriter } = require('./writers/FullBackupWriter.js')
|
||||
const { FullReplicationWriter } = require('./writers/FullReplicationWriter.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
class VmBackup {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}) {
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
// don't match replicated VMs created by this very job otherwise they
|
||||
// will be replicated again and again
|
||||
throw new Error('cannot backup a VM created by this very job')
|
||||
}
|
||||
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._throttleStream = throttleStream
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, DeltaReplicationWriter]
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot =
|
||||
settings.unconditionalSnapshot ||
|
||||
this._isDelta ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
ignoreNobakVdis: true,
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
unplugVusbs: true,
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
// since NBD is network based, if one disk use nbd , all the disk use them
|
||||
// except the suspended VDI
|
||||
if (Object.values(deltaExport.streams).some(({ _nbd }) => _nbd)) {
|
||||
Task.info('Transfer data using NBD')
|
||||
}
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
if (this._settings.validateVhdStreams) {
|
||||
deltaExport.streams = mapValues(deltaExport.streams, stream => pipeline(stream, vhdStreamValidator, noop))
|
||||
}
|
||||
|
||||
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = this._throttleStream(
|
||||
await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this.vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const [baseUuid, snapshotOf] = await Promise.all([
|
||||
xapi.getField('VDI', baseRef, 'uuid'),
|
||||
xapi.getField('VDI', baseRef, 'snapshot_of'),
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
async _healthCheck() {
|
||||
const settings = this._settings
|
||||
|
||||
if (this._healthCheckSr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const { tags } = this.vm
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
return
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
await this._selectBaseVm()
|
||||
}
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.VmBackup = VmBackup
|
||||
|
||||
decorateMethodsWith(VmBackup, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -1,6 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
4
@xen-orchestra/backups/_backupType.mjs
Normal file
4
@xen-orchestra/backups/_backupType.mjs
Normal file
@@ -0,0 +1,4 @@
|
||||
export const isMetadataFile = filename => filename.endsWith('.json')
|
||||
export const isVhdFile = filename => filename.endsWith('.vhd')
|
||||
export const isXvaFile = filename => filename.endsWith('.xva')
|
||||
export const isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
@@ -1,25 +1,25 @@
|
||||
'use strict'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
|
||||
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
|
||||
import { createRunner } from './Backup.mjs'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
import { Xapi } from '@xen-orchestra/xapi'
|
||||
|
||||
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
createCachedLookup().patchGlobal()
|
||||
|
||||
const logger = createLogger('xo:backups:worker')
|
||||
catchGlobalErrors(logger)
|
||||
const { debug } = logger
|
||||
|
||||
class BackupWorker {
|
||||
@@ -48,7 +48,7 @@ class BackupWorker {
|
||||
}
|
||||
|
||||
run() {
|
||||
return new Backup({
|
||||
return createRunner({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
import cancelable from 'promise-toolbox/cancelable'
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
// If any of the executions fails, the cancel token will be triggered and the
|
||||
// first reason will be rejected.
|
||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
export const cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||
try {
|
||||
return await Promise.all(
|
||||
@@ -1,19 +1,19 @@
|
||||
'use strict'
|
||||
import test from 'test'
|
||||
import { strict as assert } from 'node:assert'
|
||||
|
||||
const { beforeEach, afterEach, test, describe } = require('test')
|
||||
const assert = require('assert').strict
|
||||
import tmp from 'tmp'
|
||||
import fs from 'fs-extra'
|
||||
import * as uuid from 'uuid'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { VHDFOOTER, VHDHEADER } from './tests.fixtures.mjs'
|
||||
import { VhdFile, Constants, VhdDirectory, VhdAbstract } from 'vhd-lib'
|
||||
import { checkAliases } from './_cleanVm.mjs'
|
||||
import { dirname, basename } from 'node:path'
|
||||
import { rimraf } from 'rimraf'
|
||||
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const uuid = require('uuid')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
const { checkAliases } = require('./_cleanVm')
|
||||
const { dirname, basename } = require('path')
|
||||
const { beforeEach, afterEach, describe } = test
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||
@@ -67,6 +67,11 @@ async function generateVhd(path, opts = {}) {
|
||||
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
|
||||
}
|
||||
|
||||
if (opts.blocks) {
|
||||
for (const blockId of opts.blocks) {
|
||||
await vhd.writeEntireBlock({ id: blockId, buffer: Buffer.alloc(2 * 1024 * 1024 + 512, blockId) })
|
||||
}
|
||||
}
|
||||
await vhd.writeBlockAllocationTable()
|
||||
await vhd.writeHeader()
|
||||
await vhd.writeFooter()
|
||||
@@ -230,7 +235,7 @@ test('it merges delta of non destroyed chain', async () => {
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
// size should be the size of children + grand children after the merge
|
||||
assert.equal(metadata.size, 209920)
|
||||
assert.equal(metadata.size, 104960)
|
||||
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
// only check deletion
|
||||
@@ -320,6 +325,7 @@ describe('tests multiple combination ', () => {
|
||||
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
|
||||
useAlias,
|
||||
mode: vhdMode,
|
||||
blocks: [1, 3],
|
||||
})
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
useAlias,
|
||||
@@ -328,6 +334,7 @@ describe('tests multiple combination ', () => {
|
||||
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUuid: ancestor.footer.uuid,
|
||||
},
|
||||
blocks: [1, 2],
|
||||
})
|
||||
// a grand child vhd in metadata
|
||||
await generateVhd(`${basePath}/grandchild.vhd`, {
|
||||
@@ -337,6 +344,7 @@ describe('tests multiple combination ', () => {
|
||||
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUuid: child.footer.uuid,
|
||||
},
|
||||
blocks: [2, 3],
|
||||
})
|
||||
|
||||
// an older parent that was merging in clean
|
||||
@@ -395,7 +403,7 @@ describe('tests multiple combination ', () => {
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
// size should be the size of children + grand children + clean after the merge
|
||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
|
||||
assert.deepEqual(metadata.size, vhdMode === 'file' ? 6502400 : 6501888)
|
||||
|
||||
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
|
||||
// ancestor and child should be merged
|
||||
@@ -1,19 +1,18 @@
|
||||
'use strict'
|
||||
import * as UUID from 'uuid'
|
||||
import sum from 'lodash/sum.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { Constants, openVhd, VhdAbstract, VhdFile } from 'vhd-lib'
|
||||
import { isVhdAlias, resolveVhdAlias } from 'vhd-lib/aliases.js'
|
||||
import { dirname, resolve } from 'node:path'
|
||||
import { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } from './_backupType.mjs'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { mergeVhdChain } from 'vhd-lib/merge.js'
|
||||
|
||||
import { Task } from './Task.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
import handlerPath from '@xen-orchestra/fs/path'
|
||||
|
||||
const sum = require('lodash/sum')
|
||||
const UUID = require('uuid')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const handlerPath = require('@xen-orchestra/fs/path')
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
@@ -37,34 +36,32 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
||||
)
|
||||
|
||||
// chain is [ ancestor, child_1, ..., child_n ]
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
|
||||
if (merge) {
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, mergeBlockConcurrency }) {
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
})
|
||||
}
|
||||
}, 10e3)
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
mergeBlockConcurrency,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
removeUnused: remove,
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}, 10e3)
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
mergeBlockConcurrency,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
removeUnused: remove,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -117,7 +114,7 @@ const listVhds = async (handler, vmDir, logWarn) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
async function checkAliases(
|
||||
export async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
@@ -176,11 +173,9 @@ async function checkAliases(
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
exports.cleanVm = async function cleanVm(
|
||||
export async function cleanVm(
|
||||
vmDir,
|
||||
{
|
||||
fixMetadata,
|
||||
@@ -474,23 +469,20 @@ exports.cleanVm = async function cleanVm(
|
||||
const metadataWithMergedVhd = {}
|
||||
const doMerge = async () => {
|
||||
await asyncMap(toMerge, async chain => {
|
||||
const merged = await limitedMergeVhdChain(handler, chain, {
|
||||
const { finalVhdSize } = await limitedMergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
logWarn,
|
||||
remove,
|
||||
merge,
|
||||
mergeBlockConcurrency,
|
||||
})
|
||||
if (merged !== undefined) {
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = true
|
||||
}
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = (metadataWithMergedVhd[metadataPath] ?? 0) + finalVhdSize
|
||||
})
|
||||
}
|
||||
|
||||
await Promise.all([
|
||||
...unusedVhdsDeletion,
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : () => Promise.resolve()),
|
||||
asyncMap(unusedXvas, path => {
|
||||
logWarn('unused XVA', { path })
|
||||
if (remove) {
|
||||
@@ -512,12 +504,11 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// update size for delta metadata with merged VHD
|
||||
// check for the other that the size is the same as the real file size
|
||||
|
||||
await asyncMap(jsons, async metadataPath => {
|
||||
const metadata = backups.get(metadataPath)
|
||||
|
||||
let fileSystemSize
|
||||
const merged = metadataWithMergedVhd[metadataPath] !== undefined
|
||||
const mergedSize = metadataWithMergedVhd[metadataPath]
|
||||
|
||||
const { mode, size, vhds, xva } = metadata
|
||||
|
||||
@@ -527,26 +518,29 @@ exports.cleanVm = async function cleanVm(
|
||||
const linkedXva = resolve('/', vmDir, xva)
|
||||
try {
|
||||
fileSystemSize = await handler.getSize(linkedXva)
|
||||
if (fileSystemSize !== size && fileSystemSize !== undefined) {
|
||||
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
// can fail with encrypted remote
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
|
||||
// the size is not computed in some cases (e.g. VhdDirectory)
|
||||
if (fileSystemSize === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
|
||||
console.warn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
if (mergedSize === undefined) {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
// the size is not computed in some cases (e.g. VhdDirectory)
|
||||
if (fileSystemSize !== undefined && fileSystemSize !== size) {
|
||||
logWarn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -554,9 +548,19 @@ exports.cleanVm = async function cleanVm(
|
||||
return
|
||||
}
|
||||
|
||||
// systematically update size after a merge
|
||||
if ((merged || fixMetadata) && size !== fileSystemSize) {
|
||||
metadata.size = fileSystemSize
|
||||
// systematically update size and differentials after a merge
|
||||
|
||||
// @todo : after 2024-04-01 remove the fixmetadata options since the size computation is fixed
|
||||
if (mergedSize || (fixMetadata && fileSystemSize !== size)) {
|
||||
metadata.size = mergedSize ?? fileSystemSize ?? size
|
||||
|
||||
if (mergedSize) {
|
||||
// all disks are now key disk
|
||||
metadata.isVhdDifferencing = {}
|
||||
for (const id of Object.values(metadata.vdis ?? {})) {
|
||||
metadata.isVhdDifferencing[`${id}.vhd`] = false
|
||||
}
|
||||
}
|
||||
mustRegenerateCache = true
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
@@ -1,8 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { utcFormat, utcParse } = require('d3-time-format')
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
exports.formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
exports.parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
@@ -0,0 +1,6 @@
|
||||
import { utcFormat, utcParse } from 'd3-time-format'
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
export const formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
export const parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
||||
export function getOldEntries(retention, entries) {
|
||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { join } from 'node:path'
|
||||
import { mkdir, rmdir } from 'node:fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
const MAX_ATTEMPTS = 3
|
||||
|
||||
exports.getTmpDir = async function getTmpDir() {
|
||||
export async function getTmpDir() {
|
||||
for (let i = 0; true; ++i) {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
@@ -1,8 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const BACKUP_DIR = 'xo-vm-backups'
|
||||
exports.BACKUP_DIR = BACKUP_DIR
|
||||
|
||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
5
@xen-orchestra/backups/_getVmBackupDir.mjs
Normal file
5
@xen-orchestra/backups/_getVmBackupDir.mjs
Normal file
@@ -0,0 +1,5 @@
|
||||
export const BACKUP_DIR = 'xo-vm-backups'
|
||||
|
||||
export function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
@@ -1,39 +1,30 @@
|
||||
'use strict'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import omit from 'lodash/omit.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { CancelToken } from 'promise-toolbox'
|
||||
import { compareVersions } from 'compare-versions'
|
||||
import { createVhdStreamWithLength } from 'vhd-lib'
|
||||
import { defer } from 'golike-defer'
|
||||
|
||||
const find = require('lodash/find.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const omit = require('lodash/omit.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { CancelToken } = require('promise-toolbox')
|
||||
const { compareVersions } = require('compare-versions')
|
||||
const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
const { defer } = require('golike-defer')
|
||||
import { cancelableMap } from './_cancelableMap.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import pick from 'lodash/pick.js'
|
||||
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const pick = require('lodash/pick.js')
|
||||
// in `other_config` of an incrementally replicated VM, contains the UUID of the source VM
|
||||
export const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
// in `other_config` of an incrementally replicated VM, contains the UUID of the target SR used for replication
|
||||
//
|
||||
// added after the complete replication
|
||||
export const TAG_BACKUP_SR = 'xo:backup:sr'
|
||||
|
||||
const TAG_COPY_SRC = 'xo:copy_of'
|
||||
exports.TAG_COPY_SRC = TAG_COPY_SRC
|
||||
// in other_config of VDIs of an incrementally replicated VM, contains the UUID of the source VDI
|
||||
export const TAG_COPY_SRC = 'xo:copy_of'
|
||||
|
||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||
const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
if (uuid == null) {
|
||||
return uuid
|
||||
}
|
||||
let ref = cache.get(uuid)
|
||||
if (ref === undefined) {
|
||||
ref = await xapi.call(`${type}.get_by_uuid`, uuid)
|
||||
cache.set(uuid, ref)
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
exports.exportDeltaVm = async function exportDeltaVm(
|
||||
export async function exportIncrementalVm(
|
||||
vm,
|
||||
baseVm,
|
||||
{
|
||||
@@ -43,6 +34,8 @@ exports.exportDeltaVm = async function exportDeltaVm(
|
||||
fullVdisRequired = new Set(),
|
||||
|
||||
disableBaseTags = false,
|
||||
nbdConcurrency = 1,
|
||||
preferNbd,
|
||||
} = {}
|
||||
) {
|
||||
// refs of VM's VDIs → base's VDIs.
|
||||
@@ -90,6 +83,8 @@ exports.exportDeltaVm = async function exportDeltaVm(
|
||||
baseRef: baseVdi?.$ref,
|
||||
cancelToken,
|
||||
format: 'vhd',
|
||||
nbdConcurrency,
|
||||
preferNbd,
|
||||
})
|
||||
})
|
||||
|
||||
@@ -143,47 +138,21 @@ exports.exportDeltaVm = async function exportDeltaVm(
|
||||
)
|
||||
}
|
||||
|
||||
exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
export const importIncrementalVm = defer(async function importIncrementalVm(
|
||||
$defer,
|
||||
deltaVm,
|
||||
incrementalVm,
|
||||
sr,
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
|
||||
{ cancelToken = CancelToken.none, newMacAddresses = false } = {}
|
||||
) {
|
||||
const { version } = deltaVm
|
||||
const { version } = incrementalVm
|
||||
if (compareVersions(version, '1.0.0') < 0) {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
|
||||
const vmRecord = deltaVm.vm
|
||||
const vmRecord = incrementalVm.vm
|
||||
const xapi = sr.$xapi
|
||||
|
||||
let baseVm
|
||||
if (detectBase) {
|
||||
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVmUuid) {
|
||||
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
|
||||
|
||||
if (!baseVm) {
|
||||
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const cache = new Map()
|
||||
const mapVdisSrRefs = {}
|
||||
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
|
||||
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
|
||||
}
|
||||
|
||||
const baseVdis = {}
|
||||
baseVm &&
|
||||
baseVm.$VBDs.forEach(vbd => {
|
||||
const vdi = vbd.$VDI
|
||||
if (vdi !== undefined) {
|
||||
baseVdis[vbd.VDI] = vbd.$VDI
|
||||
}
|
||||
})
|
||||
const vdiRecords = deltaVm.vdis
|
||||
const vdiRecords = incrementalVm.vdis
|
||||
|
||||
// 0. Create suspend_VDI
|
||||
let suspendVdi
|
||||
@@ -194,18 +163,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
|
||||
})
|
||||
} else {
|
||||
suspendVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
sr: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
suspendVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
|
||||
$defer.onFailure(() => suspendVdi.$destroy())
|
||||
}
|
||||
}
|
||||
@@ -223,10 +181,6 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
ha_always_run: false,
|
||||
is_a_template: false,
|
||||
name_label: '[Importing…] ' + vmRecord.name_label,
|
||||
other_config: {
|
||||
...vmRecord.other_config,
|
||||
[TAG_COPY_SRC]: vmRecord.uuid,
|
||||
},
|
||||
},
|
||||
{
|
||||
bios_strings: vmRecord.bios_strings,
|
||||
@@ -240,21 +194,15 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
await asyncMap(await xapi.getField('VM', vmRef, 'VBDs'), ref => ignoreErrors.call(xapi.call('VBD.destroy', ref)))
|
||||
|
||||
// 3. Create VDIs & VBDs.
|
||||
const vbdRecords = deltaVm.vbds
|
||||
const vbdRecords = incrementalVm.vbds
|
||||
const vbds = groupBy(vbdRecords, 'VDI')
|
||||
const newVdis = {}
|
||||
await asyncMap(Object.keys(vdiRecords), async vdiRef => {
|
||||
const vdi = vdiRecords[vdiRef]
|
||||
let newVdi
|
||||
|
||||
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
|
||||
if (remoteBaseVdiUuid) {
|
||||
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
|
||||
if (!baseVdi) {
|
||||
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
|
||||
}
|
||||
|
||||
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
|
||||
if (vdi.baseVdi !== undefined) {
|
||||
newVdi = await xapi.getRecord('VDI', await vdi.baseVdi.$clone())
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
|
||||
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
|
||||
@@ -265,18 +213,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
// suspendVDI has already created
|
||||
newVdi = suspendVdi
|
||||
} else {
|
||||
newVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
SR: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
newVdi = await xapi.getRecord('VDI', await xapi.VDI_create(vdi))
|
||||
$defer.onFailure(() => newVdi.$destroy())
|
||||
}
|
||||
|
||||
@@ -309,24 +246,30 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
}
|
||||
})
|
||||
|
||||
const { streams } = deltaVm
|
||||
const { streams } = incrementalVm
|
||||
|
||||
await Promise.all([
|
||||
// Import VDI contents.
|
||||
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
|
||||
for (let stream of ensureArray(streams[`${id}.vhd`])) {
|
||||
if (stream === null) {
|
||||
// we restore a backup and reuse completly a local snapshot
|
||||
continue
|
||||
}
|
||||
if (typeof stream === 'function') {
|
||||
stream = await stream()
|
||||
}
|
||||
if (stream.length === undefined) {
|
||||
stream = await createVhdStreamWithLength(stream)
|
||||
}
|
||||
await xapi.setField('VDI', vdi.$ref, 'name_label', `[Importing] ${vdiRecords[id].name_label}`)
|
||||
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
|
||||
await xapi.setField('VDI', vdi.$ref, 'name_label', vdiRecords[id].name_label)
|
||||
}
|
||||
}),
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(Object.values(deltaVm.vifs), vif => {
|
||||
asyncMap(Object.values(incrementalVm.vifs), vif => {
|
||||
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)
|
||||
|
||||
if (network === undefined) {
|
||||
@@ -358,8 +301,8 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
])
|
||||
|
||||
await Promise.all([
|
||||
deltaVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', deltaVm.vm.name_label),
|
||||
incrementalVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', incrementalVm.vm.name_label),
|
||||
])
|
||||
|
||||
return vmRef
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
import assert from 'node:assert'
|
||||
|
||||
const COMPRESSED_MAGIC_NUMBERS = [
|
||||
// https://tools.ietf.org/html/rfc1952.html#page-5
|
||||
@@ -47,7 +45,7 @@ const isValidTar = async (handler, size, fd) => {
|
||||
}
|
||||
|
||||
// TODO: find an heuristic for compressed files
|
||||
async function isValidXva(path) {
|
||||
export async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
|
||||
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
||||
@@ -74,6 +72,5 @@ async function isValidXva(path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
exports.isValidXva = isValidXva
|
||||
|
||||
const noop = Function.prototype
|
||||
@@ -1,9 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { execFile } from 'child_process'
|
||||
|
||||
const { debug } = createLogger('xo:backups:listPartitions')
|
||||
|
||||
@@ -24,8 +22,7 @@ const IGNORED_PARTITION_TYPES = {
|
||||
0x82: true, // swap
|
||||
}
|
||||
|
||||
const LVM_PARTITION_TYPE = 0x8e
|
||||
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
|
||||
export const LVM_PARTITION_TYPE = 0x8e
|
||||
|
||||
const parsePartxLine = createParser({
|
||||
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
|
||||
@@ -33,7 +30,7 @@ const parsePartxLine = createParser({
|
||||
})
|
||||
|
||||
// returns an empty array in case of a non-partitioned disk
|
||||
exports.listPartitions = async function listPartitions(devicePath) {
|
||||
export async function listPartitions(devicePath) {
|
||||
const parts = await fromCallback(execFile, 'partx', [
|
||||
'--bytes',
|
||||
'--output=NR,START,SIZE,NAME,UUID,TYPE',
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const { createParser } = require('parse-pairs')
|
||||
const { execFile } = require('child_process')
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import { createParser } from 'parse-pairs'
|
||||
import { execFile } from 'child_process'
|
||||
|
||||
// ===================================================================
|
||||
|
||||
@@ -29,5 +27,5 @@ const makeFunction =
|
||||
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
|
||||
}
|
||||
|
||||
exports.lvs = makeFunction('lvs')
|
||||
exports.pvs = makeFunction('pvs')
|
||||
export const lvs = makeFunction('lvs')
|
||||
export const pvs = makeFunction('pvs')
|
||||
132
@xen-orchestra/backups/_runners/Metadata.mjs
Normal file
132
@xen-orchestra/backups/_runners/Metadata.mjs
Normal file
@@ -0,0 +1,132 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { PoolMetadataBackup } from './_PoolMetadataBackup.mjs'
|
||||
import { XoMetadataBackup } from './_XoMetadataBackup.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
export const Metadata = class MetadataBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
96
@xen-orchestra/backups/_runners/VmsRemote.mjs
Normal file
96
@xen-orchestra/backups/_runners/VmsRemote.mjs
Normal file
@@ -0,0 +1,96 @@
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { FullRemote } from './_vmRunners/FullRemote.mjs'
|
||||
import { IncrementalRemote } from './_vmRunners/IncrementalRemote.mjs'
|
||||
|
||||
const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
timeout: 0,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
export const VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_REMOTE_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const job = this._job
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
() => this._getAdapter(job.sourceRemote),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id => id !== job.sourceRemote && this._getAdapter(id))
|
||||
),
|
||||
async ({ adapter: sourceRemoteAdapter }, healthCheckSr, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => !!_)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmsUuids = await sourceRemoteAdapter.listAllVms()
|
||||
|
||||
Task.info('vms', { vms: vmsUuids })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vmUuid] },
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalRemote(opts)
|
||||
} else if (job.mode === 'full') {
|
||||
vmBackup = new FullRemote(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented for mirror backup`)
|
||||
}
|
||||
|
||||
return runTask(taskStart, () => vmBackup.run())
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmsUuids, !concurrency ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
137
@xen-orchestra/backups/_runners/VmsXapi.mjs
Normal file
137
@xen-orchestra/backups/_runners/VmsXapi.mjs
Normal file
@@ -0,0 +1,137 @@
|
||||
import { asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
|
||||
import { extractIdsFromSimplePattern } from '../extractIdsFromSimplePattern.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
import createStreamThrottle from './_createStreamThrottle.mjs'
|
||||
import { DEFAULT_SETTINGS, Abstract } from './_Abstract.mjs'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { getAdaptersByRemote } from './_getAdaptersByRemote.mjs'
|
||||
import { IncrementalXapi } from './_vmRunners/IncrementalXapi.mjs'
|
||||
import { FullXapi } from './_vmRunners/FullXapi.mjs'
|
||||
|
||||
const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
diskPerVmConcurrency: 0, // not limited by default
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
export const VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_XAPI_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
return this._getRecord('VM', vmUuid).then(
|
||||
disposableVm =>
|
||||
Disposable.use(disposableVm, vm => {
|
||||
taskStart.data.name_label = vm.name_label
|
||||
return runTask(taskStart, () => {
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalXapi(opts)
|
||||
} else {
|
||||
if (job.mode === 'full') {
|
||||
vmBackup = new FullXapi(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented`)
|
||||
}
|
||||
}
|
||||
return vmBackup.run()
|
||||
})
|
||||
}),
|
||||
error =>
|
||||
runTask(taskStart, () => {
|
||||
throw error
|
||||
})
|
||||
)
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
49
@xen-orchestra/backups/_runners/_Abstract.mjs
Normal file
49
@xen-orchestra/backups/_runners/_Abstract.mjs
Normal file
@@ -0,0 +1,49 @@
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import pTimeout from 'promise-toolbox/timeout'
|
||||
import { compileTemplate } from '@xen-orchestra/template'
|
||||
import { runTask } from './_runTask.mjs'
|
||||
import { RemoteTimeoutError } from './_RemoteTimeoutError.mjs'
|
||||
|
||||
export const DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
export const Abstract = class AbstractRunner {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
|
||||
const baseSettings = this._computeBaseSettings(config, job)
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
|
||||
const { getRemoteTimeout } = this._settings
|
||||
this._getAdapter = async function (remoteId) {
|
||||
try {
|
||||
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
|
||||
|
||||
return new Disposable(() => disposable.dispose(), {
|
||||
adapter: disposable.value,
|
||||
remoteId,
|
||||
})
|
||||
} catch (error) {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id: remoteId },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,13 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { forkStreamUnpipe } from './_forkStreamUnpipe.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
export const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
|
||||
exports.PoolMetadataBackup = class PoolMetadataBackup {
|
||||
export class PoolMetadataBackup {
|
||||
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
6
@xen-orchestra/backups/_runners/_RemoteTimeoutError.mjs
Normal file
6
@xen-orchestra/backups/_runners/_RemoteTimeoutError.mjs
Normal file
@@ -0,0 +1,6 @@
|
||||
export class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,11 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { join } from '@xen-orchestra/fs/path'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
import { DIR_XO_CONFIG_BACKUPS } from '../RemoteAdapter.mjs'
|
||||
import { formatFilenameDate } from '../_filenameDate.mjs'
|
||||
import { Task } from '../Task.mjs'
|
||||
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
export class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
this._config = config
|
||||
this._job = job
|
||||
@@ -23,10 +22,17 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
|
||||
|
||||
const data = job.xoMetadata
|
||||
const fileName = `${dir}/data.json`
|
||||
let dataBaseName = './data'
|
||||
|
||||
// JSON data is sent as plain string, binary data is sent as an object with `data` and `encoding properties
|
||||
const isJson = typeof data === 'string'
|
||||
if (isJson) {
|
||||
dataBaseName += '.json'
|
||||
}
|
||||
|
||||
const metadata = JSON.stringify(
|
||||
{
|
||||
data: dataBaseName,
|
||||
jobId: job.id,
|
||||
jobName: job.name,
|
||||
scheduleId: schedule.id,
|
||||
@@ -36,6 +42,8 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
null,
|
||||
2
|
||||
)
|
||||
|
||||
const dataFileName = join(dir, dataBaseName)
|
||||
const metaDataFileName = `${dir}/metadata.json`
|
||||
|
||||
await asyncMap(
|
||||
@@ -52,7 +60,7 @@ exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
async () => {
|
||||
const handler = adapter.handler
|
||||
const dirMode = this._config.dirMode
|
||||
await handler.outputFile(fileName, data, { dirMode })
|
||||
await handler.outputFile(dataFileName, isJson ? data : Buffer.from(data.data, data.encoding), { dirMode })
|
||||
await handler.outputFile(metaDataFileName, metadata, {
|
||||
dirMode,
|
||||
})
|
||||
@@ -1,12 +1,10 @@
|
||||
'use strict'
|
||||
|
||||
const { pipeline } = require('node:stream')
|
||||
const { ThrottleGroup } = require('@kldzj/stream-throttle')
|
||||
const identity = require('lodash/identity.js')
|
||||
import { pipeline } from 'node:stream'
|
||||
import { ThrottleGroup } from '@kldzj/stream-throttle'
|
||||
import identity from 'lodash/identity.js'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
module.exports = function createStreamThrottle(rate) {
|
||||
export default function createStreamThrottle(rate) {
|
||||
if (rate === 0) {
|
||||
return identity
|
||||
}
|
||||
@@ -1,14 +1,13 @@
|
||||
'use strict'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { finished, PassThrough } from 'node:stream'
|
||||
|
||||
const { finished, PassThrough } = require('node:stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
const { debug } = createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
|
||||
export function forkStreamUnpipe(source) {
|
||||
const { forks = 0 } = source
|
||||
source.forks = forks + 1
|
||||
|
||||
7
@xen-orchestra/backups/_runners/_getAdaptersByRemote.mjs
Normal file
7
@xen-orchestra/backups/_runners/_getAdaptersByRemote.mjs
Normal file
@@ -0,0 +1,7 @@
|
||||
export function getAdaptersByRemote(adapters) {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
5
@xen-orchestra/backups/_runners/_runTask.mjs
Normal file
5
@xen-orchestra/backups/_runners/_runTask.mjs
Normal file
@@ -0,0 +1,5 @@
|
||||
import { Task } from '../Task.mjs'
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
export const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user