Compare commits
926 Commits
feat_block
...
updateChan
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
969b64d575 | ||
|
|
fa56e8453a | ||
|
|
eb64937bc6 | ||
|
|
1502ac317d | ||
|
|
8bfe293414 | ||
|
|
2e634a9d1c | ||
|
|
bea771ca90 | ||
|
|
99e3622f31 | ||
|
|
a16522241e | ||
|
|
b86cb12649 | ||
|
|
2af74008b2 | ||
|
|
2e689592f1 | ||
|
|
3f8436b58b | ||
|
|
e3dd59d684 | ||
|
|
549d9b70a9 | ||
|
|
3bf6aae103 | ||
|
|
afb110c473 | ||
|
|
8727c3cf96 | ||
|
|
b13302ddeb | ||
|
|
e89ed06314 | ||
|
|
e3f57998f7 | ||
|
|
8cdb5ee31b | ||
|
|
5b734db656 | ||
|
|
e853f9d04f | ||
|
|
2a5e09719e | ||
|
|
3c0477e0da | ||
|
|
060d1c5297 | ||
|
|
55dd7bfb9c | ||
|
|
b00cf13029 | ||
|
|
73755e4ccf | ||
|
|
a1bd96da6a | ||
|
|
0e934c1413 | ||
|
|
eb69234a8e | ||
|
|
7659d9c0be | ||
|
|
2ba81d55f8 | ||
|
|
2e1abad255 | ||
|
|
c7d5b4b063 | ||
|
|
cc5f4b0996 | ||
|
|
55f627ed83 | ||
|
|
988179a3f0 | ||
|
|
ce617e0732 | ||
|
|
f0f429a473 | ||
|
|
bb6e158301 | ||
|
|
7ff304a042 | ||
|
|
7df1994d7f | ||
|
|
a3a2fda157 | ||
|
|
d8530f9518 | ||
|
|
d3062ac35c | ||
|
|
b11f11f4db | ||
|
|
79d48f3b56 | ||
|
|
869f7ffab0 | ||
|
|
6665d6a8e6 | ||
|
|
8eb0bdbda7 | ||
|
|
710689db0b | ||
|
|
801eea7e75 | ||
|
|
7885e1e6e7 | ||
|
|
d384c746ca | ||
|
|
a30d962b1d | ||
|
|
b6e078716b | ||
|
|
34b69c7ee8 | ||
|
|
70bf8d9620 | ||
|
|
c8bfda9cf5 | ||
|
|
1eb4c20844 | ||
|
|
e5c5f19219 | ||
|
|
db92f0e365 | ||
|
|
570de7c0fe | ||
|
|
90e0f26845 | ||
|
|
c714bc3518 | ||
|
|
48e0acda32 | ||
|
|
013cdbcd96 | ||
|
|
fdd886f213 | ||
|
|
de70ef3064 | ||
|
|
9142a95f79 | ||
|
|
1c6aebf997 | ||
|
|
7b9ec4b7a7 | ||
|
|
decb87f0c9 | ||
|
|
e17470f56c | ||
|
|
99ddbcdc67 | ||
|
|
6953e2fe7b | ||
|
|
beb1063ba1 | ||
|
|
7773edd590 | ||
|
|
0104649b84 | ||
|
|
1c9d1049e0 | ||
|
|
d992a4cb87 | ||
|
|
52114ad4b0 | ||
|
|
bcc62cfcaf | ||
|
|
60434b136a | ||
|
|
13f3c8851d | ||
|
|
f386f94dc2 | ||
|
|
fda1fd1a04 | ||
|
|
0b17bdd9bc | ||
|
|
2c5706a89b | ||
|
|
5448452b71 | ||
|
|
22e7c126e6 | ||
|
|
750fefe957 | ||
|
|
025e671989 | ||
|
|
df0ed5e794 | ||
|
|
da45ace7c1 | ||
|
|
2a623b8ae7 | ||
|
|
f034ec45f3 | ||
|
|
970bc0ac5d | ||
|
|
3abbc8d57e | ||
|
|
06570d78a0 | ||
|
|
6a0df7aec2 | ||
|
|
30aeb95f3a | ||
|
|
36d6d53a26 | ||
|
|
895773b6c6 | ||
|
|
8ebc0dba4f | ||
|
|
006f12f17f | ||
|
|
b22239804a | ||
|
|
afd174ca21 | ||
|
|
27c6c1b896 | ||
|
|
311b420b74 | ||
|
|
e403298140 | ||
|
|
9c7fd94a9b | ||
|
|
8cdae83150 | ||
|
|
5b1cc7415e | ||
|
|
f5d3bc1f2d | ||
|
|
ba81d0e08a | ||
|
|
3b3f927e4b | ||
|
|
5e8539865f | ||
|
|
3a3fa2882c | ||
|
|
3baa37846e | ||
|
|
999fba2030 | ||
|
|
785a5857ef | ||
|
|
067f4ac882 | ||
|
|
8a26e08102 | ||
|
|
42aa202f7a | ||
|
|
403d2c8e7b | ||
|
|
ad46bde302 | ||
|
|
1b6ec2c545 | ||
|
|
56388557cb | ||
|
|
1ddbe87d0f | ||
|
|
3081810450 | ||
|
|
155be7fd95 | ||
|
|
ef960e94d3 | ||
|
|
bfd99a48fe | ||
|
|
a13fda5fe9 | ||
|
|
66bee59774 | ||
|
|
685400bbf8 | ||
|
|
5bef8fc411 | ||
|
|
aa7ff1449a | ||
|
|
3dca7f2a71 | ||
|
|
3dc2f649f6 | ||
|
|
9eb537c2f9 | ||
|
|
dfd5f6882f | ||
|
|
7214016338 | ||
|
|
606e3c4ce5 | ||
|
|
fb04d3d25d | ||
|
|
db8c042131 | ||
|
|
fd9005fba8 | ||
|
|
2d25413b8d | ||
|
|
035679800a | ||
|
|
abd0a3035a | ||
|
|
d307730c68 | ||
|
|
1b44de4958 | ||
|
|
ec78a1ce8b | ||
|
|
19c82ab30d | ||
|
|
9986f3fb18 | ||
|
|
d24e9c093d | ||
|
|
70c8b24fac | ||
|
|
9c9c11104b | ||
|
|
cba90b27f4 | ||
|
|
46cbced570 | ||
|
|
52cf2d1514 | ||
|
|
e51351be8d | ||
|
|
2a42e0ff94 | ||
|
|
3a824a2bfc | ||
|
|
fc1c809a18 | ||
|
|
221cd40199 | ||
|
|
aca19d9a81 | ||
|
|
0601bbe18d | ||
|
|
2d52aee952 | ||
|
|
99605bf185 | ||
|
|
91b19d9bc4 | ||
|
|
562401ebe4 | ||
|
|
6fd2f2610d | ||
|
|
6ae19b0640 | ||
|
|
6b936d8a8c | ||
|
|
8f2cfaae00 | ||
|
|
5c215e1a8a | ||
|
|
e3cb98124f | ||
|
|
90c3319880 | ||
|
|
348db876d2 | ||
|
|
408fd7ec03 | ||
|
|
1fd84836b1 | ||
|
|
522204795f | ||
|
|
e29c422ac9 | ||
|
|
152cf09b7e | ||
|
|
ff728099dc | ||
|
|
706d94221d | ||
|
|
340e9af7f4 | ||
|
|
40e536ba61 | ||
|
|
fd4c56c8c2 | ||
|
|
20d04ba956 | ||
|
|
3b1bcc67ae | ||
|
|
1add3fbf9d | ||
|
|
97f0759de0 | ||
|
|
005ab47d9b | ||
|
|
14a0caa4c6 | ||
|
|
1c23bd5ff7 | ||
|
|
49c161b17a | ||
|
|
18dce3fce6 | ||
|
|
d6fc86b6bc | ||
|
|
61d960d4b1 | ||
|
|
02d3465832 | ||
|
|
4bbadc9515 | ||
|
|
78586291ca | ||
|
|
945dec94bf | ||
|
|
003140d96b | ||
|
|
363d7cf0d0 | ||
|
|
f0c94496bf | ||
|
|
de217eabd9 | ||
|
|
7c80d0c1e1 | ||
|
|
9fb749b1db | ||
|
|
ad9c59669a | ||
|
|
76a038e403 | ||
|
|
0e12072922 | ||
|
|
158a8e14a2 | ||
|
|
0c97910349 | ||
|
|
8347ac6ed8 | ||
|
|
996abd6e7e | ||
|
|
de8abd5b63 | ||
|
|
3de928c488 | ||
|
|
a2a514e483 | ||
|
|
ff432e04b0 | ||
|
|
4502590bb0 | ||
|
|
6d440a5af5 | ||
|
|
0840b4c359 | ||
|
|
696ee7dbe5 | ||
|
|
5e23e356ce | ||
|
|
c705051a89 | ||
|
|
ce2b918a29 | ||
|
|
df740b1e8e | ||
|
|
c3e0308ad0 | ||
|
|
1005e295b2 | ||
|
|
b3cf58b8c0 | ||
|
|
2652c87917 | ||
|
|
9e0b5575a4 | ||
|
|
56c089dc01 | ||
|
|
3b94da1790 | ||
|
|
ec39a8e9fe | ||
|
|
6339f971ca | ||
|
|
2978ad1486 | ||
|
|
c0d6dc48de | ||
|
|
f327422254 | ||
|
|
938d15d31b | ||
|
|
5ab1ddb9cb | ||
|
|
01302d7a60 | ||
|
|
c68630e2d6 | ||
|
|
db082bfbe9 | ||
|
|
650d88db46 | ||
|
|
7d1ecca669 | ||
|
|
5f71e629ae | ||
|
|
68205d4676 | ||
|
|
cdb466225d | ||
|
|
0e7fbd598f | ||
|
|
99147c893d | ||
|
|
c63fb6173d | ||
|
|
5932ada717 | ||
|
|
0d579748d6 | ||
|
|
8c5ee4eafe | ||
|
|
b03935ad2f | ||
|
|
38439cbc43 | ||
|
|
161c20b534 | ||
|
|
603696dad1 | ||
|
|
6b2ad5a7cc | ||
|
|
88063d4d87 | ||
|
|
8956a99745 | ||
|
|
0f0c0ec0d0 | ||
|
|
e5932e2c33 | ||
|
|
84ec8f5f3c | ||
|
|
661c5a269f | ||
|
|
5c6d7cae66 | ||
|
|
fcc73859b7 | ||
|
|
36645b0319 | ||
|
|
a62575e3cf | ||
|
|
d7af3d3c03 | ||
|
|
130ebb7d5f | ||
|
|
2af845ebd3 | ||
|
|
8e4d1701e6 | ||
|
|
4d16b6708f | ||
|
|
34ee08be25 | ||
|
|
d66a76a09e | ||
|
|
0d801c9766 | ||
|
|
b82b676fdb | ||
|
|
3494c0f64f | ||
|
|
311098adc2 | ||
|
|
58182e2083 | ||
|
|
a62ae43274 | ||
|
|
f256610e08 | ||
|
|
983d048219 | ||
|
|
3c6033f904 | ||
|
|
ef2bd2b59d | ||
|
|
04d70e9aa8 | ||
|
|
a2587ffc0a | ||
|
|
6776e7bb3d | ||
|
|
4c05064294 | ||
|
|
c135f1394f | ||
|
|
d68f4215f1 | ||
|
|
af562f3c3a | ||
|
|
7b949716bc | ||
|
|
d3e256289b | ||
|
|
3688e762b1 | ||
|
|
249f1a7af4 | ||
|
|
2de26030ff | ||
|
|
fcc76fb8d0 | ||
|
|
88d5b7095e | ||
|
|
b0e55d88de | ||
|
|
370ad3e928 | ||
|
|
07bf77d2dd | ||
|
|
a5ec65f3c0 | ||
|
|
522b318fd9 | ||
|
|
9eb2a4033f | ||
|
|
e87b0c393a | ||
|
|
1fb7e665fa | ||
|
|
7ea476d787 | ||
|
|
8260d07d61 | ||
|
|
ac0b4e6514 | ||
|
|
27b2f8cf27 | ||
|
|
27b5737f65 | ||
|
|
55b2e0292f | ||
|
|
464d83e70f | ||
|
|
614255a73a | ||
|
|
90d15e1346 | ||
|
|
b0e2ea64e9 | ||
|
|
1da05e239d | ||
|
|
fe7f0db81f | ||
|
|
983153e620 | ||
|
|
6fe791dcf2 | ||
|
|
1ad406c7dd | ||
|
|
4e032e11b1 | ||
|
|
ea34516d73 | ||
|
|
e1145f35ee | ||
|
|
6864775b8a | ||
|
|
f28721b847 | ||
|
|
2dc174fd9d | ||
|
|
07142d0410 | ||
|
|
41bb16ca30 | ||
|
|
d8f1034858 | ||
|
|
52b3c49cdb | ||
|
|
c5cb1a5e96 | ||
|
|
92d9d3232c | ||
|
|
9c4e0464f0 | ||
|
|
72d25754fd | ||
|
|
1465a0ba59 | ||
|
|
ac8ce28286 | ||
|
|
c4b06e1915 | ||
|
|
f77675a8a3 | ||
|
|
b907c1fd03 | ||
|
|
fba86bf653 | ||
|
|
b18ebcc38d | ||
|
|
4f7f18458e | ||
|
|
d412196052 | ||
|
|
1d140d8fd2 | ||
|
|
6948a25b09 | ||
|
|
26131917e3 | ||
|
|
44a0ab6d0a | ||
|
|
2b8b033ad7 | ||
|
|
3ee0b3e7df | ||
|
|
927a55ab30 | ||
|
|
b70721cb60 | ||
|
|
f71c820f15 | ||
|
|
74e0405a5e | ||
|
|
79b55ba30a | ||
|
|
ee0adaebc5 | ||
|
|
83c5c976e3 | ||
|
|
18bd2c607e | ||
|
|
e2695ce327 | ||
|
|
3f316fcaea | ||
|
|
8b7b162c76 | ||
|
|
aa36629def | ||
|
|
ca345bd6d8 | ||
|
|
61324d10f9 | ||
|
|
92fd92ae63 | ||
|
|
e48bfa2c88 | ||
|
|
cd5762fa19 | ||
|
|
71f7a6cd6c | ||
|
|
b8cade8b7a | ||
|
|
696c6f13f0 | ||
|
|
b8d923d3ba | ||
|
|
1a96c1bf0f | ||
|
|
14a01d0141 | ||
|
|
74a2a4d2e5 | ||
|
|
b13b44cfd0 | ||
|
|
50a164423a | ||
|
|
a40d50a3bd | ||
|
|
529e33140a | ||
|
|
132b1a41db | ||
|
|
75948b2977 | ||
|
|
eb84d4a7ef | ||
|
|
1816d0240e | ||
|
|
2c6d36b63e | ||
|
|
d9776ae8ed | ||
|
|
b456394663 | ||
|
|
94f599bdbd | ||
|
|
d466ca143a | ||
|
|
78ed85a49f | ||
|
|
c24e7f9ecd | ||
|
|
98caa89625 | ||
|
|
8e176eadb1 | ||
|
|
444268406f | ||
|
|
7e062977d0 | ||
|
|
f4bf56f159 | ||
|
|
9f3b020361 | ||
|
|
ef35021a44 | ||
|
|
b74ebd050a | ||
|
|
8a16d6aa3b | ||
|
|
cf7393992c | ||
|
|
c576114dad | ||
|
|
deeb399046 | ||
|
|
9cf8f8f492 | ||
|
|
28b7e99ebc | ||
|
|
0ba729e5b9 | ||
|
|
ac8c146cf7 | ||
|
|
2ba437be31 | ||
|
|
bd8bb73309 | ||
|
|
485c2f4669 | ||
|
|
6fb562d92f | ||
|
|
85efdcf7b9 | ||
|
|
fc1357d5d6 | ||
|
|
88b015bda4 | ||
|
|
b46f76cccf | ||
|
|
c3bb2185c2 | ||
|
|
a240853fe0 | ||
|
|
d7ce609940 | ||
|
|
1b0ec9839e | ||
|
|
77b166bb3b | ||
|
|
76bd54d7de | ||
|
|
684282f0a4 | ||
|
|
2459f46c19 | ||
|
|
5f0466e4d8 | ||
|
|
3738edfa83 | ||
|
|
769e27e2cb | ||
|
|
8ec5461338 | ||
|
|
4a2843cb67 | ||
|
|
a0e69a79ab | ||
|
|
3da94f18df | ||
|
|
17cb59b898 | ||
|
|
315e5c9289 | ||
|
|
01ba10fedb | ||
|
|
13e7594560 | ||
|
|
f9ac2ac84d | ||
|
|
09cfac1111 | ||
|
|
008f7a30fd | ||
|
|
ff65dbcba7 | ||
|
|
264a0d1678 | ||
|
|
7dcaf454ed | ||
|
|
17b2756291 | ||
|
|
57e48b5d34 | ||
|
|
57ed984e5a | ||
|
|
100122f388 | ||
|
|
12d4b3396e | ||
|
|
ab35c710cb | ||
|
|
4bd5b38aeb | ||
|
|
836db1b807 | ||
|
|
73d88cc5f1 | ||
|
|
3def66d968 | ||
|
|
3f73138fc3 | ||
|
|
bfe621a21d | ||
|
|
32fa792eeb | ||
|
|
a833050fc2 | ||
|
|
e7e6294bc3 | ||
|
|
7c71884e27 | ||
|
|
3e822044f2 | ||
|
|
d457f5fca4 | ||
|
|
1837e01719 | ||
|
|
f17f5abf0f | ||
|
|
82c229c755 | ||
|
|
c7e3ba3184 | ||
|
|
470c9bb6c8 | ||
|
|
bb3ab20b2a | ||
|
|
90ce1c4d1e | ||
|
|
5c436f3870 | ||
|
|
159339625d | ||
|
|
87e6f7fded | ||
|
|
fd2c7c2fc3 | ||
|
|
7fc76c1df4 | ||
|
|
f2758d036d | ||
|
|
ac670da793 | ||
|
|
c0465eb4d9 | ||
|
|
cea55b03e5 | ||
|
|
d78d802066 | ||
|
|
a562c74492 | ||
|
|
d1f2e0a84b | ||
|
|
49e2d128ad | ||
|
|
f587798fb0 | ||
|
|
3430ee743b | ||
|
|
83299587b0 | ||
|
|
7c0ecf9b06 | ||
|
|
abfd84d32c | ||
|
|
0583a978be | ||
|
|
75989cf92d | ||
|
|
f1cc284b6f | ||
|
|
0444cf0b3b | ||
|
|
226f9ad964 | ||
|
|
a956cb2ac9 | ||
|
|
76a91cc5e9 | ||
|
|
f012d126b9 | ||
|
|
bae0b52893 | ||
|
|
a24512cea9 | ||
|
|
84b75e8a58 | ||
|
|
6e25b7a83a | ||
|
|
136718df7e | ||
|
|
d48ef1f810 | ||
|
|
9e60c53750 | ||
|
|
f3c5e817a3 | ||
|
|
60f6e54da1 | ||
|
|
f5a59caca2 | ||
|
|
6ea671a434 | ||
|
|
036f3f6bd0 | ||
|
|
12552a1391 | ||
|
|
e9b658b60d | ||
|
|
15f69a19f5 | ||
|
|
54d885fa9c | ||
|
|
11cc299940 | ||
|
|
091b0a3ef3 | ||
|
|
87874a4b81 | ||
|
|
86aaa50946 | ||
|
|
68b2c287eb | ||
|
|
61f1316c42 | ||
|
|
afadc8f95a | ||
|
|
955ef6806c | ||
|
|
4d55c5ae48 | ||
|
|
5c6ae1912b | ||
|
|
083483645e | ||
|
|
c077e9a699 | ||
|
|
280b60808f | ||
|
|
eb9608b893 | ||
|
|
a29f3d67ea | ||
|
|
6b150dc8a8 | ||
|
|
8f55884602 | ||
|
|
2fdba2eb0f | ||
|
|
7e4bd30f04 | ||
|
|
eb8f098aaf | ||
|
|
5237fdd387 | ||
|
|
8a07b7a3db | ||
|
|
a41037833c | ||
|
|
6a780d94a3 | ||
|
|
506ef0b44f | ||
|
|
a4d1d41b6a | ||
|
|
4e9477f34a | ||
|
|
43b6285437 | ||
|
|
c26a7a3e51 | ||
|
|
93eb42785d | ||
|
|
02bb622e92 | ||
|
|
b873c147a6 | ||
|
|
5e7fb7a881 | ||
|
|
97790313eb | ||
|
|
954b29cb61 | ||
|
|
dc6a13962f | ||
|
|
23da202790 | ||
|
|
f237101b4a | ||
|
|
8a99326a76 | ||
|
|
8c95974e65 | ||
|
|
3f7454efad | ||
|
|
e5c890e29b | ||
|
|
53e0f17c55 | ||
|
|
34f6be868e | ||
|
|
c84b899276 | ||
|
|
266a26fa31 | ||
|
|
bbf92be652 | ||
|
|
e19c7b949d | ||
|
|
5ce6f1fe4d | ||
|
|
9c36520c79 | ||
|
|
a85a8ea208 | ||
|
|
c2e0c97d94 | ||
|
|
a5447fda3c | ||
|
|
507e9a55c2 | ||
|
|
5bd0eb3362 | ||
|
|
458496a09e | ||
|
|
f13a98b6b8 | ||
|
|
dde32724b1 | ||
|
|
63b76fdb50 | ||
|
|
1b9cd56e9f | ||
|
|
784b0dded8 | ||
|
|
4a658787de | ||
|
|
4beb49041d | ||
|
|
1f6e29084f | ||
|
|
7c6cb2454b | ||
|
|
96720d186c | ||
|
|
a45fb88c48 | ||
|
|
b4b0a925af | ||
|
|
72822c9529 | ||
|
|
ca6cdbf9cf | ||
|
|
74cd35f527 | ||
|
|
010866a0ef | ||
|
|
5885df4ae9 | ||
|
|
89bc6da5f4 | ||
|
|
d87f698512 | ||
|
|
08dd871cb8 | ||
|
|
b5578eadf7 | ||
|
|
aadc1bb84c | ||
|
|
2823af9441 | ||
|
|
d7da83359f | ||
|
|
a143cd3427 | ||
|
|
3c4dcde1d4 | ||
|
|
7adfc195dc | ||
|
|
5a2c315b20 | ||
|
|
299803f03c | ||
|
|
1eac62a26e | ||
|
|
f1b5416d0b | ||
|
|
65168c8532 | ||
|
|
35f6476d0f | ||
|
|
36fabe194f | ||
|
|
921c700fab | ||
|
|
2dbe35a31c | ||
|
|
656d13d79b | ||
|
|
77b1adae37 | ||
|
|
c18373bb0e | ||
|
|
d4e7563272 | ||
|
|
86d6052c89 | ||
|
|
c5ae0dc4ca | ||
|
|
e979a2be9b | ||
|
|
586b84f434 | ||
|
|
56b9d22d49 | ||
|
|
69aa241dc9 | ||
|
|
1335e12b97 | ||
|
|
d1b1fa7ffd | ||
|
|
d6a3492e90 | ||
|
|
4af57810d6 | ||
|
|
6555cc4639 | ||
|
|
3f57287d79 | ||
|
|
1713e311f3 | ||
|
|
8a14e78d2d | ||
|
|
1942e55f76 | ||
|
|
d83f41d0ff | ||
|
|
0f09240fb2 | ||
|
|
3731b49ea8 | ||
|
|
209223f77e | ||
|
|
7a5f5ee31d | ||
|
|
a148cb6c9b | ||
|
|
e9ac049744 | ||
|
|
e06d4bd841 | ||
|
|
6cad4f5839 | ||
|
|
86f5f9eba3 | ||
|
|
473d091fa8 | ||
|
|
aec5ad4099 | ||
|
|
f14f98f7c1 | ||
|
|
e3d9a7ddf2 | ||
|
|
58e4f9b7b4 | ||
|
|
ef1f09cd4a | ||
|
|
617619eb31 | ||
|
|
00a135b00f | ||
|
|
c71104db4f | ||
|
|
eef7940fbc | ||
|
|
da4b3db17a | ||
|
|
c0d20f04b6 | ||
|
|
8fda8668b7 | ||
|
|
ea2c641604 | ||
|
|
84e38505c5 | ||
|
|
6584eb0827 | ||
|
|
467d897e05 | ||
|
|
2e6ea202cd | ||
|
|
27f17551ad | ||
|
|
48bfc4e3cd | ||
|
|
61b9a4cf28 | ||
|
|
c95448bf25 | ||
|
|
f1ca60c182 | ||
|
|
52e79f78e5 | ||
|
|
586d6876f1 | ||
|
|
25759ecf0a | ||
|
|
1fbe870884 | ||
|
|
9fcd497c42 | ||
|
|
63ee6b7f0e | ||
|
|
73c0cd6934 | ||
|
|
e6c95a0913 | ||
|
|
af11cae29c | ||
|
|
b984a9ff00 | ||
|
|
13837e0bf3 | ||
|
|
f5d19fd28a | ||
|
|
24ac3ea37d | ||
|
|
13cb33cc4a | ||
|
|
949a4697fe | ||
|
|
3bbb828284 | ||
|
|
942b0f3dc9 | ||
|
|
208d8845c4 | ||
|
|
24cac9dcd5 | ||
|
|
c8b29da677 | ||
|
|
4f63d14529 | ||
|
|
cef6248650 | ||
|
|
774e443a79 | ||
|
|
1166807434 | ||
|
|
99cd502b65 | ||
|
|
d959e72a9c | ||
|
|
ee83788b43 | ||
|
|
62dd5f8ed7 | ||
|
|
2de9984945 | ||
|
|
890b46b697 | ||
|
|
5419957e06 | ||
|
|
39d4667916 | ||
|
|
083db67df9 | ||
|
|
8dceb6032b | ||
|
|
c300dad316 | ||
|
|
45b07f46f1 | ||
|
|
4023127c87 | ||
|
|
ab96c549ae | ||
|
|
bc0afb589e | ||
|
|
b42127f083 | ||
|
|
61d5a964ee | ||
|
|
f8fd6b78f5 | ||
|
|
4546ef6619 | ||
|
|
1f4457d9ca | ||
|
|
65cbbf78bc | ||
|
|
a73a24c1df | ||
|
|
31f850c19c | ||
|
|
6d90d7bc82 | ||
|
|
d2a1c02b92 | ||
|
|
6d96452ef8 | ||
|
|
833589e6e7 | ||
|
|
8bb566e189 | ||
|
|
38d2117752 | ||
|
|
914decd4f9 | ||
|
|
873c38f9e1 | ||
|
|
a3e37eca62 | ||
|
|
817911a41e | ||
|
|
9f4fce9daa | ||
|
|
9ff305d5db | ||
|
|
055c3e098f | ||
|
|
bc61dd85c6 | ||
|
|
db6f1405e9 | ||
|
|
3dc3376aec | ||
|
|
55920a58a3 | ||
|
|
2a70ebf667 | ||
|
|
2f65a86aa0 | ||
|
|
4bf81ac33b | ||
|
|
263c23ae8f | ||
|
|
bf51b945c5 | ||
|
|
9d7a461550 | ||
|
|
bbf60818eb | ||
|
|
103b22ebb2 | ||
|
|
cf4a1d7d40 | ||
|
|
e94f036aca | ||
|
|
675405f7ac | ||
|
|
f8a3536a88 | ||
|
|
e527a13b50 | ||
|
|
3be03451f8 | ||
|
|
9fa15d9c84 | ||
|
|
9c3d39b4a7 | ||
|
|
28800f43ee | ||
|
|
5c0b29c51f | ||
|
|
62d9d0208b | ||
|
|
4bf871e52f | ||
|
|
103972808c | ||
|
|
dc65bb87b5 | ||
|
|
bfa0282ecc | ||
|
|
aa66ec0ccd | ||
|
|
18fe19c680 | ||
|
|
ab0e411ac0 | ||
|
|
79671e6d61 | ||
|
|
71ad9773da | ||
|
|
34ecc2bcbb | ||
|
|
53f4f265dc | ||
|
|
97624ef836 | ||
|
|
fb8d0ed924 | ||
|
|
fedbdba13d | ||
|
|
a281682f7a | ||
|
|
07e9f09692 | ||
|
|
29d6e590de | ||
|
|
3e351f8529 | ||
|
|
bfbfb9379a | ||
|
|
4f31b7007a | ||
|
|
fe0cc2ebb9 | ||
|
|
2fd6f521f8 | ||
|
|
ec00728112 | ||
|
|
7174c1edeb | ||
|
|
7bd27e7437 | ||
|
|
0a28e30003 | ||
|
|
246c793c28 | ||
|
|
5f0ea4d586 | ||
|
|
3c7d316b3c | ||
|
|
645c8f32e3 | ||
|
|
adc5e7d0c0 | ||
|
|
b9b74ab1ac | ||
|
|
64298c04f2 | ||
|
|
3dfb7db039 | ||
|
|
b64d8f5cbf | ||
|
|
c2e5225728 | ||
|
|
6c44a94bf4 | ||
|
|
a2d9310d0a | ||
|
|
05197b93ee | ||
|
|
448d115d49 | ||
|
|
ae993dff45 | ||
|
|
1bc4805f3d | ||
|
|
98fe8f3955 | ||
|
|
e902bcef67 | ||
|
|
cb2a6e43a8 | ||
|
|
b73a0992f8 | ||
|
|
d0b3d78639 | ||
|
|
e6b8939772 | ||
|
|
bc372a982c | ||
|
|
3ff8064f1b | ||
|
|
834459186d | ||
|
|
12220ad4cf | ||
|
|
f6fd1db1ef | ||
|
|
a1050882ae | ||
|
|
687df5ead4 | ||
|
|
b057881ad0 | ||
|
|
2b23550996 | ||
|
|
afeb20e589 | ||
|
|
d7794518a2 | ||
|
|
fee61a43e3 | ||
|
|
b201afd192 | ||
|
|
feef1f8b0a | ||
|
|
1a5e2fde4f | ||
|
|
609e957a55 | ||
|
|
5c18404174 | ||
|
|
866a1dd8ae | ||
|
|
3bfd6c6979 | ||
|
|
06564e9091 | ||
|
|
1702783cfb | ||
|
|
4ea0cbaa37 | ||
|
|
2246e065f7 | ||
|
|
29a38cdf1a | ||
|
|
960c569e86 | ||
|
|
fa183fc97e | ||
|
|
a1d63118c0 | ||
|
|
f95a20173c | ||
|
|
b82d0fadc3 | ||
|
|
0635b3316e | ||
|
|
113235aec3 | ||
|
|
3921401e96 | ||
|
|
2e514478a4 | ||
|
|
b3d53b230e | ||
|
|
45dcb914ba | ||
|
|
711087b686 | ||
|
|
b100a59d1d | ||
|
|
109b2b0055 | ||
|
|
9dda99eb20 | ||
|
|
fa0f75b474 | ||
|
|
2d93e0d4be | ||
|
|
fe6406336d | ||
|
|
1037d44089 | ||
|
|
a8c3669f43 | ||
|
|
d91753aa82 | ||
|
|
b548514d44 | ||
|
|
ba782d2698 | ||
|
|
0552dc23a5 | ||
|
|
574bbbf5ff | ||
|
|
df11a92cdb | ||
|
|
33ae59adf7 | ||
|
|
e0a115b41d | ||
|
|
f838d6c179 | ||
|
|
6c3229f517 | ||
|
|
6973928b1a | ||
|
|
a5daba2a4d | ||
|
|
40ef83416e | ||
|
|
8518146455 | ||
|
|
d58f563de5 | ||
|
|
ad2454adab | ||
|
|
1f32557743 | ||
|
|
e95aae2129 | ||
|
|
9176171f20 | ||
|
|
d4f2249a4d | ||
|
|
e0b4069c17 | ||
|
|
6b25a21151 | ||
|
|
716dc45d85 | ||
|
|
57850230c8 | ||
|
|
362d597031 | ||
|
|
e89b84b37b | ||
|
|
ae6f6bf536 | ||
|
|
6f765bdd6f | ||
|
|
1982c6e6e6 | ||
|
|
527dceb43f | ||
|
|
f5a3d68d07 | ||
|
|
6c904fbc96 | ||
|
|
295036a1e3 | ||
|
|
5601d61b49 | ||
|
|
1c35c1a61a | ||
|
|
4143014466 | ||
|
|
90fea69b7e | ||
|
|
625663d619 | ||
|
|
403afc7aaf | ||
|
|
d295524c3c | ||
|
|
5eb4294e70 | ||
|
|
90598522a6 | ||
|
|
519fa1bcf8 | ||
|
|
7b0e5afe37 | ||
|
|
0b6b3a47a2 | ||
|
|
75db810508 | ||
|
|
2f52c564f5 | ||
|
|
011d582b80 | ||
|
|
32d21b2308 | ||
|
|
45971ca622 | ||
|
|
f3a09f2dad | ||
|
|
552a9c7b9f | ||
|
|
ed34d9cbc0 | ||
|
|
187ee99931 | ||
|
|
ff78dd8f7c | ||
|
|
b0eadb8ea4 | ||
|
|
a95754715a | ||
|
|
18ece4b90c | ||
|
|
3862fb2664 | ||
|
|
72c69d791a | ||
|
|
d6192a4a7a | ||
|
|
0f824ffa70 | ||
|
|
f6c227e7f5 | ||
|
|
9d5bc8af6e | ||
|
|
9480079770 | ||
|
|
54fe9147ac | ||
|
|
b6a0477232 | ||
|
|
c60644c578 | ||
|
|
abdce94c5f | ||
|
|
d7dee04013 | ||
|
|
dfc62132b7 | ||
|
|
36f7f193aa | ||
|
|
ca4a82ec38 | ||
|
|
37aea1888d | ||
|
|
92f3b4ddd7 | ||
|
|
647995428c | ||
|
|
407e9c25f3 | ||
|
|
1612ab7335 | ||
|
|
b952c36210 | ||
|
|
96b5cb2c61 | ||
|
|
c5b3acfce2 | ||
|
|
20a01bf266 | ||
|
|
a33b88cf1c | ||
|
|
09a2f45ada | ||
|
|
83a7dd7ea1 | ||
|
|
afc1b6a5c0 | ||
|
|
7f4f860735 | ||
|
|
d789e3aa0d | ||
|
|
f5b91cd45d | ||
|
|
92ab4b3309 | ||
|
|
2c456e4c89 | ||
|
|
1460e63449 |
1
.commitlintrc.json
Normal file
1
.commitlintrc.json
Normal file
@@ -0,0 +1 @@
|
||||
{ "extends": ["@commitlint/config-conventional"] }
|
||||
@@ -28,7 +28,7 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['*.{spec,test}.{,c,m}js'],
|
||||
files: ['*.{integ,spec,test}.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-unpublished-require': 'off',
|
||||
'n/no-unpublished-import': 'off',
|
||||
|
||||
32
.github/workflows/ci.yml
vendored
Normal file
32
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Continous Integration
|
||||
on: push
|
||||
jobs:
|
||||
CI:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# https://github.com/actions/checkout
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install packages
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y curl qemu-utils python3-vmdkstream git libxml2-utils libfuse2 nbdkit
|
||||
- name: Cache Turbo
|
||||
# https://github.com/actions/cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: '**/node_modules/.cache/turbo'
|
||||
key: ${{ runner.os }}-turbo-cache
|
||||
- name: Setup Node environment
|
||||
# https://github.com/actions/setup-node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '18'
|
||||
cache: 'yarn'
|
||||
- name: Install project dependencies
|
||||
run: yarn
|
||||
- name: Build the project
|
||||
run: yarn build
|
||||
- name: Lint tests
|
||||
run: yarn test-lint
|
||||
- name: Integration tests
|
||||
run: sudo yarn test-integration
|
||||
12
.github/workflows/push.yml
vendored
12
.github/workflows/push.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Build docker image
|
||||
run: docker-compose -f docker/docker-compose.dev.yml build
|
||||
- name: Create the container and start the tests
|
||||
run: docker-compose -f docker/docker-compose.dev.yml up --exit-code-from xo
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -34,3 +34,4 @@ yarn-error.log.*
|
||||
# code coverage
|
||||
.nyc_output/
|
||||
coverage/
|
||||
.turbo/
|
||||
|
||||
11
.husky/commit-msg
Executable file
11
.husky/commit-msg
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
# Only check commit message if commit on master or first commit on another
|
||||
# branch to avoid bothering fix commits after reviews
|
||||
#
|
||||
# FIXME: does not properly run with git commit --amend
|
||||
if [ "$(git rev-parse --abbrev-ref HEAD)" = master ] || [ "$(git rev-list --count master..)" -eq 0 ]
|
||||
then
|
||||
npx --no -- commitlint --edit "$1"
|
||||
fi
|
||||
4
.husky/pre-commit
Executable file
4
.husky/pre-commit
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
npx lint-staged
|
||||
@@ -1,8 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = {
|
||||
arrowParens: 'avoid',
|
||||
jsxSingleQuote: true,
|
||||
semi: false,
|
||||
singleQuote: true,
|
||||
trailingComma: 'es5',
|
||||
|
||||
// 2020-11-24: Requested by nraynaud and approved by the rest of the team
|
||||
//
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
|
||||
|
||||
```
|
||||
> npm install --save @vates/async-each
|
||||
```sh
|
||||
npm install --save @vates/async-each
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^14.0.1",
|
||||
"sinon": "^16.0.0",
|
||||
"tap": "^16.3.0",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/cached-dns.lookup):
|
||||
|
||||
```
|
||||
> npm install --save @vates/cached-dns.lookup
|
||||
```sh
|
||||
npm install --save @vates/cached-dns.lookup
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/coalesce-calls):
|
||||
|
||||
```
|
||||
> npm install --save @vates/coalesce-calls
|
||||
```sh
|
||||
npm install --save @vates/coalesce-calls
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/compose):
|
||||
|
||||
```
|
||||
> npm install --save @vates/compose
|
||||
```sh
|
||||
npm install --save @vates/compose
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/decorate-with):
|
||||
|
||||
```
|
||||
> npm install --save @vates/decorate-with
|
||||
```sh
|
||||
npm install --save @vates/decorate-with
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -13,12 +13,15 @@ describe('decorateWith', () => {
|
||||
const expectedFn = Function.prototype
|
||||
const newFn = () => {}
|
||||
|
||||
const decorator = decorateWith(function wrapper(fn, ...args) {
|
||||
assert.deepStrictEqual(fn, expectedFn)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
const decorator = decorateWith(
|
||||
function wrapper(fn, ...args) {
|
||||
assert.deepStrictEqual(fn, expectedFn)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
|
||||
return newFn
|
||||
}, ...expectedArgs)
|
||||
return newFn
|
||||
},
|
||||
...expectedArgs
|
||||
)
|
||||
|
||||
const descriptor = {
|
||||
configurable: true,
|
||||
|
||||
32
@vates/diff/.USAGE.md
Normal file
32
@vates/diff/.USAGE.md
Normal file
@@ -0,0 +1,32 @@
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
1
@vates/diff/.npmignore
Symbolic link
1
@vates/diff/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
65
@vates/diff/README.md
Normal file
65
@vates/diff/README.md
Normal file
@@ -0,0 +1,65 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/diff
|
||||
|
||||
[](https://npmjs.org/package/@vates/diff)  [](https://bundlephobia.com/result?p=@vates/diff) [](https://npmjs.org/package/@vates/diff)
|
||||
|
||||
> Computes differences between two arrays, buffers or strings
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/diff):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/diff
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
37
@vates/diff/index.js
Normal file
37
@vates/diff/index.js
Normal file
@@ -0,0 +1,37 @@
|
||||
'use strict'
|
||||
|
||||
/**
|
||||
* Compare two data arrays, buffers or strings and invoke the provided callback function for each difference.
|
||||
*
|
||||
* @template {Array|Buffer|string} T
|
||||
* @param {Array|Buffer|string} data1 - The first data array or buffer to compare.
|
||||
* @param {T} data2 - The second data array or buffer to compare.
|
||||
* @param {(index: number, diff: T) => void} [cb] - The callback function to invoke for each difference. If not provided, an array of differences will be returned.
|
||||
* @returns {Array<number|T>|undefined} - An array of differences if no callback is provided, otherwise undefined.
|
||||
*/
|
||||
module.exports = function diff(data1, data2, cb) {
|
||||
let result
|
||||
if (cb === undefined) {
|
||||
result = []
|
||||
cb = result.push.bind(result)
|
||||
}
|
||||
|
||||
const n1 = data1.length
|
||||
const n2 = data2.length
|
||||
const n = Math.min(n1, n2)
|
||||
for (let i = 0; i < n; ++i) {
|
||||
if (data1[i] !== data2[i]) {
|
||||
let j = i + 1
|
||||
while (j < n && data1[j] !== data2[j]) {
|
||||
++j
|
||||
}
|
||||
cb(i, data2.slice(i, j))
|
||||
i = j
|
||||
}
|
||||
}
|
||||
if (n1 !== n2) {
|
||||
cb(n, n1 < n2 ? data2.slice(n) : data2.slice(0, 0))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
51
@vates/diff/index.test.js
Normal file
51
@vates/diff/index.test.js
Normal file
@@ -0,0 +1,51 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert/strict')
|
||||
const test = require('test')
|
||||
|
||||
const diff = require('./index.js')
|
||||
|
||||
test('data of equal length', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz', 8, 'foo'])
|
||||
})
|
||||
|
||||
test('data1 is longer', function () {
|
||||
const data1 = 'foo bar'
|
||||
const data2 = 'foo'
|
||||
assert.deepEqual(diff(data1, data2), [3, ''])
|
||||
})
|
||||
|
||||
test('data2 is longer', function () {
|
||||
const data1 = 'foo'
|
||||
const data2 = 'foo bar'
|
||||
assert.deepEqual(diff(data1, data2), [3, ' bar'])
|
||||
})
|
||||
|
||||
test('with arrays', function () {
|
||||
const data1 = 'foo bar baz'.split('')
|
||||
const data2 = 'baz bar foo'.split('')
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz'.split(''), 8, 'foo'.split('')])
|
||||
})
|
||||
|
||||
test('with buffers', function () {
|
||||
const data1 = Buffer.from('foo bar baz')
|
||||
const data2 = Buffer.from('baz bar foo')
|
||||
assert.deepEqual(diff(data1, data2), [0, Buffer.from('baz'), 8, Buffer.from('foo')])
|
||||
})
|
||||
|
||||
test('cb param', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
|
||||
const calls = []
|
||||
const cb = (...args) => calls.push(args)
|
||||
|
||||
diff(data1, data2, cb)
|
||||
|
||||
assert.deepEqual(calls, [
|
||||
[0, 'baz'],
|
||||
[8, 'foo'],
|
||||
])
|
||||
})
|
||||
36
@vates/diff/package.json
Normal file
36
@vates/diff/package.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/diff",
|
||||
"description": "Computes differences between two arrays, buffers or strings",
|
||||
"keywords": [
|
||||
"array",
|
||||
"binary",
|
||||
"buffer",
|
||||
"diff",
|
||||
"differences",
|
||||
"string"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/diff",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/diff",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/disposable):
|
||||
|
||||
```
|
||||
> npm install --save @vates/disposable
|
||||
```sh
|
||||
npm install --save @vates/disposable
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.2",
|
||||
"version": "0.1.4",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -25,11 +25,11 @@
|
||||
"dependencies": {
|
||||
"@vates/multi-key-map": "^0.1.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.4.0",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"ensure-array": "^1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^14.0.1",
|
||||
"sinon": "^16.0.0",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/event-listeners-manager):
|
||||
|
||||
```
|
||||
> npm install --save @vates/event-listeners-manager
|
||||
```sh
|
||||
npm install --save @vates/event-listeners-manager
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const LRU = require('lru-cache')
|
||||
const Fuse = require('fuse-native')
|
||||
const { VhdSynthetic } = require('vhd-lib')
|
||||
const { Disposable, fromCallback } = require('promise-toolbox')
|
||||
import LRU from 'lru-cache'
|
||||
import Fuse from 'fuse-native'
|
||||
import { VhdSynthetic } from 'vhd-lib'
|
||||
import { Disposable, fromCallback } from 'promise-toolbox'
|
||||
|
||||
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
|
||||
const stat = st => ({
|
||||
@@ -16,7 +14,7 @@ const stat = st => ({
|
||||
gid: st.gid !== undefined ? st.gid : process.getgid(),
|
||||
})
|
||||
|
||||
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
export const mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
|
||||
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
|
||||
|
||||
const cache = new LRU({
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@vates/fuse-vhd",
|
||||
"version": "1.0.0",
|
||||
"version": "2.0.0",
|
||||
"license": "ISC",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
|
||||
@@ -15,13 +15,14 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10.0"
|
||||
"node": ">=14"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.1.1"
|
||||
"vhd-lib": "^4.6.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/multi-key-map):
|
||||
|
||||
```
|
||||
> npm install --save @vates/multi-key-map
|
||||
```sh
|
||||
npm install --save @vates/multi-key-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/nbd-client):
|
||||
|
||||
```
|
||||
> npm install --save @vates/nbd-client
|
||||
```sh
|
||||
npm install --save @vates/nbd-client
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
'use strict'
|
||||
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
exports.NBD_OPT_EXPORT_NAME = 1
|
||||
exports.NBD_OPT_ABORT = 2
|
||||
exports.NBD_OPT_LIST = 3
|
||||
exports.NBD_OPT_STARTTLS = 5
|
||||
exports.NBD_OPT_INFO = 6
|
||||
exports.NBD_OPT_GO = 7
|
||||
|
||||
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
exports.NBD_FLAG_READ_ONLY = 1 << 1
|
||||
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
exports.NBD_FLAG_SEND_FUA = 1 << 3
|
||||
exports.NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
exports.NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
exports.NBD_CMD_FLAG_FUA = 1 << 0
|
||||
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
exports.NBD_CMD_FLAG_DF = 1 << 2
|
||||
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
exports.NBD_CMD_READ = 0
|
||||
exports.NBD_CMD_WRITE = 1
|
||||
exports.NBD_CMD_DISC = 2
|
||||
exports.NBD_CMD_FLUSH = 3
|
||||
exports.NBD_CMD_TRIM = 4
|
||||
exports.NBD_CMD_CACHE = 5
|
||||
exports.NBD_CMD_WRITE_ZEROES = 6
|
||||
exports.NBD_CMD_BLOCK_STATUS = 7
|
||||
exports.NBD_CMD_RESIZE = 8
|
||||
|
||||
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
exports.NBD_REPLY_ACK = 1
|
||||
|
||||
exports.NBD_DEFAULT_PORT = 10809
|
||||
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
41
@vates/nbd-client/constants.mjs
Normal file
41
@vates/nbd-client/constants.mjs
Normal file
@@ -0,0 +1,41 @@
|
||||
export const INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
|
||||
export const OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
|
||||
export const NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
|
||||
export const NBD_OPT_EXPORT_NAME = 1
|
||||
export const NBD_OPT_ABORT = 2
|
||||
export const NBD_OPT_LIST = 3
|
||||
export const NBD_OPT_STARTTLS = 5
|
||||
export const NBD_OPT_INFO = 6
|
||||
export const NBD_OPT_GO = 7
|
||||
|
||||
export const NBD_FLAG_HAS_FLAGS = 1 << 0
|
||||
export const NBD_FLAG_READ_ONLY = 1 << 1
|
||||
export const NBD_FLAG_SEND_FLUSH = 1 << 2
|
||||
export const NBD_FLAG_SEND_FUA = 1 << 3
|
||||
export const NBD_FLAG_ROTATIONAL = 1 << 4
|
||||
export const NBD_FLAG_SEND_TRIM = 1 << 5
|
||||
|
||||
export const NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
|
||||
|
||||
export const NBD_CMD_FLAG_FUA = 1 << 0
|
||||
export const NBD_CMD_FLAG_NO_HOLE = 1 << 1
|
||||
export const NBD_CMD_FLAG_DF = 1 << 2
|
||||
export const NBD_CMD_FLAG_REQ_ONE = 1 << 3
|
||||
export const NBD_CMD_FLAG_FAST_ZERO = 1 << 4
|
||||
|
||||
export const NBD_CMD_READ = 0
|
||||
export const NBD_CMD_WRITE = 1
|
||||
export const NBD_CMD_DISC = 2
|
||||
export const NBD_CMD_FLUSH = 3
|
||||
export const NBD_CMD_TRIM = 4
|
||||
export const NBD_CMD_CACHE = 5
|
||||
export const NBD_CMD_WRITE_ZEROES = 6
|
||||
export const NBD_CMD_BLOCK_STATUS = 7
|
||||
export const NBD_CMD_RESIZE = 8
|
||||
|
||||
export const NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
|
||||
export const NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
|
||||
export const NBD_REPLY_ACK = 1
|
||||
|
||||
export const NBD_DEFAULT_PORT = 10809
|
||||
export const NBD_DEFAULT_BLOCK_SIZE = 64 * 1024
|
||||
@@ -1,8 +1,11 @@
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
const { Socket } = require('node:net')
|
||||
const { connect } = require('node:tls')
|
||||
const {
|
||||
import assert from 'node:assert'
|
||||
import { Socket } from 'node:net'
|
||||
import { connect } from 'node:tls'
|
||||
import { fromCallback, pRetry, pDelay, pTimeout } from 'promise-toolbox'
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
|
||||
import {
|
||||
INIT_PASSWD,
|
||||
NBD_CMD_READ,
|
||||
NBD_DEFAULT_BLOCK_SIZE,
|
||||
@@ -16,13 +19,14 @@ const {
|
||||
NBD_REPLY_MAGIC,
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
} = require('./constants.js')
|
||||
const { fromCallback } = require('promise-toolbox')
|
||||
const { readChunkStrict } = require('@vates/read-chunk')
|
||||
NBD_CMD_DISC,
|
||||
} from './constants.mjs'
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
module.exports = class NbdClient {
|
||||
export default class NbdClient {
|
||||
#serverAddress
|
||||
#serverCert
|
||||
#serverPort
|
||||
@@ -31,18 +35,34 @@ module.exports = class NbdClient {
|
||||
#exportName
|
||||
#exportSize
|
||||
|
||||
#waitBeforeReconnect
|
||||
#readAhead
|
||||
#readBlockRetries
|
||||
#reconnectRetry
|
||||
#connectTimeout
|
||||
|
||||
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
||||
// so we handle a backlog of command waiting for response and handle concurrency manually
|
||||
|
||||
#waitingForResponse // there is already a listenner waiting for a response
|
||||
#nextCommandQueryId = BigInt(0)
|
||||
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
|
||||
#connected = false
|
||||
|
||||
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert }) {
|
||||
#reconnectingPromise
|
||||
constructor(
|
||||
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
||||
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
|
||||
) {
|
||||
this.#serverAddress = address
|
||||
this.#serverPort = port
|
||||
this.#exportName = exportname
|
||||
this.#serverCert = cert
|
||||
this.#waitBeforeReconnect = waitBeforeReconnect
|
||||
this.#readAhead = readAhead
|
||||
this.#readBlockRetries = readBlockRetries
|
||||
this.#reconnectRetry = reconnectRetry
|
||||
this.#connectTimeout = connectTimeout
|
||||
}
|
||||
|
||||
get exportSize() {
|
||||
@@ -77,19 +97,55 @@ module.exports = class NbdClient {
|
||||
})
|
||||
}
|
||||
|
||||
async connect() {
|
||||
// first we connect to the serve without tls, and then we upgrade the connection
|
||||
async #connect() {
|
||||
// first we connect to the server without tls, and then we upgrade the connection
|
||||
// to tls during the handshake
|
||||
await this.#unsecureConnect()
|
||||
await this.#handshake()
|
||||
|
||||
this.#connected = true
|
||||
// reset internal state if we reconnected a nbd client
|
||||
this.#commandQueryBacklog = new Map()
|
||||
this.#waitingForResponse = false
|
||||
}
|
||||
async connect() {
|
||||
return pTimeout.call(this.#connect(), this.#connectTimeout)
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
if (!this.#connected) {
|
||||
return
|
||||
}
|
||||
|
||||
const buffer = Buffer.alloc(28)
|
||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
||||
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
||||
await this.#write(buffer)
|
||||
await this.#serverSocket.destroy()
|
||||
this.#serverSocket = undefined
|
||||
this.#connected = false
|
||||
}
|
||||
|
||||
#clearReconnectPromise = () => {
|
||||
this.#reconnectingPromise = undefined
|
||||
}
|
||||
|
||||
async #reconnect() {
|
||||
await this.disconnect().catch(() => {})
|
||||
await pDelay(this.#waitBeforeReconnect) // need to let the xapi clean things on its side
|
||||
await this.connect()
|
||||
}
|
||||
|
||||
async reconnect() {
|
||||
// we need to ensure reconnections do not occur in parallel
|
||||
if (this.#reconnectingPromise === undefined) {
|
||||
this.#reconnectingPromise = pRetry(() => this.#reconnect(), {
|
||||
tries: this.#reconnectRetry,
|
||||
})
|
||||
this.#reconnectingPromise.then(this.#clearReconnectPromise, this.#clearReconnectPromise)
|
||||
}
|
||||
|
||||
return this.#reconnectingPromise
|
||||
}
|
||||
|
||||
// we can use individual read/write from the socket here since there is no concurrency
|
||||
@@ -167,7 +223,6 @@ module.exports = class NbdClient {
|
||||
this.#commandQueryBacklog.forEach(({ reject }) => {
|
||||
reject(error)
|
||||
})
|
||||
await this.disconnect()
|
||||
}
|
||||
|
||||
async #readBlockResponse() {
|
||||
@@ -175,7 +230,6 @@ module.exports = class NbdClient {
|
||||
if (this.#waitingForResponse) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
this.#waitingForResponse = true
|
||||
const magic = await this.#readInt32()
|
||||
@@ -200,7 +254,8 @@ module.exports = class NbdClient {
|
||||
query.resolve(data)
|
||||
this.#waitingForResponse = false
|
||||
if (this.#commandQueryBacklog.size > 0) {
|
||||
await this.#readBlockResponse()
|
||||
// it doesn't throw directly but will throw all relevant promise on failure
|
||||
this.#readBlockResponse()
|
||||
}
|
||||
} catch (error) {
|
||||
// reject all the promises
|
||||
@@ -211,6 +266,11 @@ module.exports = class NbdClient {
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
// we don't want to add anything in backlog while reconnecting
|
||||
if (this.#reconnectingPromise) {
|
||||
await this.#reconnectingPromise
|
||||
}
|
||||
|
||||
const queryId = this.#nextCommandQueryId
|
||||
this.#nextCommandQueryId++
|
||||
|
||||
@@ -225,19 +285,67 @@ module.exports = class NbdClient {
|
||||
buffer.writeInt32BE(size, 24)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
function decoratedReject(error) {
|
||||
error.index = index
|
||||
error.size = size
|
||||
reject(error)
|
||||
}
|
||||
|
||||
// this will handle one block response, but it can be another block
|
||||
// since server does not guaranty to handle query in order
|
||||
this.#commandQueryBacklog.set(queryId, {
|
||||
size,
|
||||
resolve,
|
||||
reject,
|
||||
reject: decoratedReject,
|
||||
})
|
||||
// really send the command to the server
|
||||
this.#write(buffer).catch(reject)
|
||||
this.#write(buffer).catch(decoratedReject)
|
||||
|
||||
// #readBlockResponse never throws directly
|
||||
// but if it fails it will reject all the promises in the backlog
|
||||
this.#readBlockResponse()
|
||||
})
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
if (indexGenerator === undefined) {
|
||||
const exportSize = this.#exportSize
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(Number(exportSize / BigInt(chunkSize)))
|
||||
for (let index = 0; BigInt(index) < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
'use strict'
|
||||
const NbdClient = require('./index.js')
|
||||
const { spawn } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const FILE_SIZE = 2 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
|
||||
const client = new NbdClient({
|
||||
address: 'localhost',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
secure: false,
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 128 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
// read mutiple blocks in parallel
|
||||
await asyncEach(
|
||||
indexes,
|
||||
async i => {
|
||||
const block = await client.readBlock(i, CHUNK_SIZE)
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
},
|
||||
{ concurrency: 8 }
|
||||
)
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -13,16 +13,18 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"version": "2.0.0",
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"main": "./index.mjs",
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/read-chunk": "^1.0.1",
|
||||
"@vates/read-chunk": "^1.2.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^1.2.2"
|
||||
"xen-api": "^1.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0",
|
||||
@@ -30,6 +32,6 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap *.spec.js"
|
||||
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.mjs"
|
||||
}
|
||||
}
|
||||
|
||||
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
@@ -0,0 +1,182 @@
|
||||
Public Key Info:
|
||||
Public Key Algorithm: RSA
|
||||
Key Security Level: High (3072 bits)
|
||||
|
||||
modulus:
|
||||
00:be:92:be:df:de:0a:ab:38:fc:1a:c0:1a:58:4d:86
|
||||
b8:1f:25:10:7d:19:05:17:bf:02:3d:e9:ef:f8:c0:04
|
||||
5d:6f:98:de:5c:dd:c3:0f:e2:61:61:e4:b5:9c:42:ac
|
||||
3e:af:fd:30:10:e1:54:32:66:75:f6:80:90:85:05:a0
|
||||
6a:14:a2:6f:a7:2e:f0:f3:52:94:2a:f2:34:fc:0d:b4
|
||||
fb:28:5d:1c:11:5c:59:6e:63:34:ba:b3:fd:73:b1:48
|
||||
35:00:84:53:da:6a:9b:84:ab:64:b1:a1:2b:3a:d1:5a
|
||||
d7:13:7c:12:2a:4e:72:e9:96:d6:30:74:c5:71:05:14
|
||||
4b:2d:01:94:23:67:4e:37:3c:1e:c1:a0:bc:34:04:25
|
||||
21:11:fb:4b:6b:53:74:8f:90:93:57:af:7f:3b:78:d6
|
||||
a4:87:fe:7d:ed:20:11:8b:70:54:67:b8:c9:f5:c0:6b
|
||||
de:4e:e7:a5:79:ff:f7:ad:cf:10:57:f5:51:70:7b:54
|
||||
68:28:9e:b9:c2:10:7b:ab:aa:11:47:9f:ec:e6:2f:09
|
||||
44:4a:88:5b:dd:8c:10:b4:c4:03:25:06:d9:e0:9f:a0
|
||||
0d:cf:94:4b:3b:fa:a5:17:2c:e4:67:c4:17:6a:ab:d8
|
||||
c8:7a:16:41:b9:91:b7:9c:ae:8c:94:be:26:61:51:71
|
||||
c1:a6:39:39:97:75:28:a9:0e:21:ea:f0:bd:71:4a:8c
|
||||
e1:f8:1d:a9:22:2f:10:a8:1b:e5:a4:9a:fd:0f:fa:c6
|
||||
20:bc:96:99:79:c6:ba:a4:1f:3e:d4:91:c5:af:bb:71
|
||||
0a:5a:ef:69:9c:64:69:ce:5a:fe:3f:c2:24:f4:26:d4
|
||||
3d:ab:ab:9a:f0:f6:f1:b1:64:a9:f4:e2:34:6a:ab:2e
|
||||
95:47:b9:07:5a:39:c6:95:9c:a9:e8:ed:71:dd:c1:21
|
||||
16:c8:2d:4c:2c:af:06:9d:c6:fa:fe:c5:2a:6c:b4:c3
|
||||
d5:96:fc:5e:fd:ec:1c:30:b4:9d:cb:29:ef:a8:50:1c
|
||||
21:
|
||||
|
||||
public exponent:
|
||||
01:00:01:
|
||||
|
||||
private exponent:
|
||||
25:37:c5:7d:35:01:02:65:73:9e:c9:cb:9b:59:30:a9
|
||||
3e:b3:df:5f:7f:06:66:97:d0:19:45:59:af:4b:d8:ce
|
||||
62:a0:09:35:3b:bd:ff:99:27:89:95:bf:fe:0f:6b:52
|
||||
26:ce:9c:97:7f:5a:11:29:bf:79:ef:ab:c9:be:ca:90
|
||||
4d:0d:58:1e:df:65:01:30:2c:6d:a2:b5:c4:4f:ec:fb
|
||||
6b:eb:9b:32:ac:c5:6e:70:83:78:be:f4:0d:a7:1e:c1
|
||||
f3:22:e4:b9:70:3e:85:0f:6f:ef:dc:d8:f3:78:b5:73
|
||||
f1:83:36:8c:fa:9b:28:91:63:ad:3c:f0:de:5c:ae:94
|
||||
eb:ea:36:03:20:06:bf:74:c7:50:eb:52:36:1a:65:21
|
||||
eb:40:17:7f:93:61:dd:33:d0:02:bc:ec:6d:31:f1:41
|
||||
5a:a9:d1:f0:00:66:4c:c4:18:47:d5:67:e3:cd:bb:83
|
||||
44:07:ab:62:83:21:dc:d8:e6:89:37:08:bb:9d:ea:62
|
||||
c2:5d:ce:85:c2:dc:48:27:0c:a4:23:61:b7:30:e7:26
|
||||
44:dc:1e:5c:2e:16:35:2b:2e:a6:e6:a4:ce:1f:9b:e9
|
||||
fe:96:fa:49:1d:fb:2a:df:bc:bf:46:da:52:f8:37:8a
|
||||
84:ab:e4:73:e6:46:56:b5:b4:3d:e1:63:eb:02:8e:d7
|
||||
67:96:c4:dc:28:6d:6b:b6:0c:a3:0b:db:87:29:ad:f9
|
||||
ec:73:b6:55:a3:40:32:13:84:c7:2f:33:74:04:dc:42
|
||||
00:11:9c:fb:fc:62:35:b3:82:c3:3c:28:80:e8:09:a8
|
||||
97:c7:c1:2e:3d:27:fa:4f:9b:fc:c2:34:58:41:5c:a1
|
||||
e2:70:2e:2f:82:ad:bd:bd:8e:dd:23:12:25:de:89:70
|
||||
60:75:48:90:80:ac:55:74:51:6f:49:9e:7f:63:41:8b
|
||||
3c:b1:f5:c3:6b:4b:5a:50:a6:4d:38:e8:82:c2:04:c8
|
||||
30:fd:06:9b:c1:04:27:b6:63:3a:5e:f5:4d:00:c3:d1
|
||||
|
||||
|
||||
prime1:
|
||||
00:f6:00:2e:7d:89:61:24:16:5e:87:ca:18:6c:03:b8
|
||||
b4:33:df:4a:a7:7f:db:ed:39:15:41:12:61:4f:4e:b4
|
||||
de:ab:29:d9:0c:6c:01:7e:53:2e:ee:e7:5f:a2:e4:6d
|
||||
c6:4b:07:4e:d8:a3:ae:45:06:97:bd:18:a3:e9:dd:29
|
||||
54:64:6d:f0:af:08:95:ae:ae:3e:71:63:76:2a:a1:18
|
||||
c4:b1:fc:bc:3d:42:15:74:b3:c5:38:1f:5d:92:f1:b2
|
||||
c6:3f:10:fe:35:1a:c6:b1:ce:70:38:ff:08:5c:de:61
|
||||
79:c7:50:91:22:4d:e9:c8:18:49:e2:5c:91:84:86:e2
|
||||
4d:0f:6e:9b:0d:81:df:aa:f3:59:75:56:e9:33:18:dd
|
||||
ab:39:da:e2:25:01:05:a1:6e:23:59:15:2c:89:35:c7
|
||||
ae:9c:c7:ea:88:9a:1a:f3:48:07:11:82:59:79:8c:62
|
||||
53:06:37:30:14:b3:82:b1:50:fc:ae:b8:f7:1c:57:44
|
||||
7d:
|
||||
|
||||
prime2:
|
||||
00:c6:51:cc:dc:88:2e:cf:98:90:10:19:e0:d3:a4:d1
|
||||
3f:dc:b0:29:d3:bb:26:ee:eb:00:17:17:d1:d1:bb:9b
|
||||
34:b1:4e:af:b5:6c:1c:54:53:b4:bb:55:da:f7:78:cd
|
||||
38:b4:2e:3a:8c:63:80:3b:64:9c:b4:2b:cd:dd:50:0b
|
||||
05:d2:00:7a:df:8e:c3:e6:29:e0:9c:d8:40:b7:11:09
|
||||
f4:38:df:f6:ed:93:1e:18:d4:93:fa:8d:ee:82:9c:0f
|
||||
c1:88:26:84:9d:4f:ae:8a:17:d5:55:54:4c:c6:0a:ac
|
||||
4d:ec:33:51:68:0f:4b:92:2e:04:57:fe:15:f5:00:46
|
||||
5c:8e:ad:09:2c:e7:df:d5:36:7a:4e:bd:da:21:22:d7
|
||||
58:b4:72:93:94:af:34:cc:e2:b8:d0:4f:0b:5d:97:08
|
||||
12:19:17:34:c5:15:49:00:48:56:13:b8:45:4e:3b:f8
|
||||
bc:d5:ab:d9:6d:c2:4a:cc:01:1a:53:4d:46:50:49:3b
|
||||
75:
|
||||
|
||||
coefficient:
|
||||
63:67:50:29:10:6a:85:a3:dc:51:90:20:76:86:8c:83
|
||||
8e:d5:ff:aa:75:fd:b5:f8:31:b0:96:6c:18:1d:5b:ed
|
||||
a4:2e:47:8d:9c:c2:1e:2c:a8:6d:4b:10:a5:c2:53:46
|
||||
8a:9a:84:91:d7:fc:f5:cc:03:ce:b9:3d:5c:01:d2:27
|
||||
99:7b:79:89:4f:a1:12:e3:05:5d:ee:10:f6:8c:e6:ce
|
||||
5e:da:32:56:6d:6f:eb:32:b4:75:7b:94:49:d8:2d:9e
|
||||
4d:19:59:2e:e4:0b:bc:95:df:df:65:67:a1:dd:c6:2b
|
||||
99:f4:76:e8:9f:fa:57:1d:ca:f9:58:a9:ce:9b:30:5c
|
||||
42:8a:ba:05:e7:e2:15:45:25:bc:e9:68:c1:8b:1a:37
|
||||
cc:e1:aa:45:2e:94:f5:81:47:1e:64:7f:c0:c1:b7:a8
|
||||
21:58:18:a9:a0:ed:e0:27:75:bf:65:81:6b:e4:1d:5a
|
||||
b7:7e:df:d8:28:c6:36:21:19:c8:6e:da:ca:9e:da:84
|
||||
|
||||
|
||||
exp1:
|
||||
00:ba:d7:fe:77:a9:0d:98:2c:49:56:57:c0:5e:e2:20
|
||||
ba:f6:1f:26:03:bc:d0:5d:08:9b:45:16:61:c4:ab:e2
|
||||
22:b1:dc:92:17:a6:3d:28:26:a4:22:1e:a8:7b:ff:86
|
||||
05:33:5d:74:9c:85:0d:cb:2d:ab:b8:9b:6b:7c:28:57
|
||||
c8:da:92:ca:59:17:6b:21:07:05:34:78:37:fb:3e:ea
|
||||
a2:13:12:04:23:7e:fa:ee:ed:cf:e0:c5:a9:fb:ff:0a
|
||||
2b:1b:21:9c:02:d7:b8:8c:ba:60:70:59:fc:8f:14:f4
|
||||
f2:5a:d9:ad:b2:61:7d:2c:56:8e:5f:98:b1:89:f8:2d
|
||||
10:1c:a5:84:ad:28:b4:aa:92:34:a3:34:04:e1:a3:84
|
||||
52:16:1a:52:e3:8a:38:2d:99:8a:cd:91:90:87:12:ca
|
||||
fc:ab:e6:08:14:03:00:6f:41:88:e4:da:9d:7c:fd:8c
|
||||
7c:c4:de:cb:ed:1d:3f:29:d0:7a:6b:76:df:71:ae:32
|
||||
bd:
|
||||
|
||||
exp2:
|
||||
4a:e9:d3:6c:ea:b4:64:0e:c9:3c:8b:c9:f5:a8:a8:b2
|
||||
6a:f6:d0:95:fe:78:32:7f:ea:c4:ce:66:9f:c7:32:55
|
||||
b1:34:7c:03:18:17:8b:73:23:2e:30:bc:4a:07:03:de
|
||||
8b:91:7a:e4:55:21:b7:4d:c6:33:f8:e8:06:d5:99:94
|
||||
55:43:81:26:b9:93:1e:7a:6b:32:54:2d:fd:f9:1d:bd
|
||||
77:4e:82:c4:33:72:87:06:a5:ef:5b:75:e1:38:7a:6b
|
||||
2c:b7:00:19:3c:64:3e:1d:ca:a4:34:f7:db:47:64:d6
|
||||
fa:86:58:15:ea:d1:2d:22:dc:d9:30:4d:b3:02:ab:91
|
||||
83:03:b2:17:98:6f:60:e6:f7:44:8f:4a:ba:81:a2:bf
|
||||
0b:4a:cc:9c:b9:a2:44:52:d0:65:3f:b6:97:5f:d9:d8
|
||||
9c:49:bb:d1:46:bd:10:b2:42:71:a8:85:e5:8b:99:e6
|
||||
1b:00:93:5d:76:ab:32:6c:a8:39:17:53:9c:38:4d:91
|
||||
|
||||
|
||||
|
||||
Public Key PIN:
|
||||
pin-sha256:ISh/UeFjUG5Gwrpx6hMUGQPvg9wOKjOkHmRbs4YjZqs=
|
||||
Public Key ID:
|
||||
sha256:21287f51e163506e46c2ba71ea13141903ef83dc0e2a33a41e645bb3862366ab
|
||||
sha1:1a48455111ac45fb5807c5cdb7b20b896c52f0b6
|
||||
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAvpK+394Kqzj8GsAaWE2GuB8lEH0ZBRe/Aj3p7/jABF1vmN5c
|
||||
3cMP4mFh5LWcQqw+r/0wEOFUMmZ19oCQhQWgahSib6cu8PNSlCryNPwNtPsoXRwR
|
||||
XFluYzS6s/1zsUg1AIRT2mqbhKtksaErOtFa1xN8EipOcumW1jB0xXEFFEstAZQj
|
||||
Z043PB7BoLw0BCUhEftLa1N0j5CTV69/O3jWpIf+fe0gEYtwVGe4yfXAa95O56V5
|
||||
//etzxBX9VFwe1RoKJ65whB7q6oRR5/s5i8JREqIW92MELTEAyUG2eCfoA3PlEs7
|
||||
+qUXLORnxBdqq9jIehZBuZG3nK6MlL4mYVFxwaY5OZd1KKkOIerwvXFKjOH4Haki
|
||||
LxCoG+Wkmv0P+sYgvJaZeca6pB8+1JHFr7txClrvaZxkac5a/j/CJPQm1D2rq5rw
|
||||
9vGxZKn04jRqqy6VR7kHWjnGlZyp6O1x3cEhFsgtTCyvBp3G+v7FKmy0w9WW/F79
|
||||
7BwwtJ3LKe+oUBwhAgMBAAECggGAJTfFfTUBAmVznsnLm1kwqT6z319/BmaX0BlF
|
||||
Wa9L2M5ioAk1O73/mSeJlb/+D2tSJs6cl39aESm/ee+ryb7KkE0NWB7fZQEwLG2i
|
||||
tcRP7Ptr65syrMVucIN4vvQNpx7B8yLkuXA+hQ9v79zY83i1c/GDNoz6myiRY608
|
||||
8N5crpTr6jYDIAa/dMdQ61I2GmUh60AXf5Nh3TPQArzsbTHxQVqp0fAAZkzEGEfV
|
||||
Z+PNu4NEB6tigyHc2OaJNwi7nepiwl3OhcLcSCcMpCNhtzDnJkTcHlwuFjUrLqbm
|
||||
pM4fm+n+lvpJHfsq37y/RtpS+DeKhKvkc+ZGVrW0PeFj6wKO12eWxNwobWu2DKML
|
||||
24cprfnsc7ZVo0AyE4THLzN0BNxCABGc+/xiNbOCwzwogOgJqJfHwS49J/pPm/zC
|
||||
NFhBXKHicC4vgq29vY7dIxIl3olwYHVIkICsVXRRb0mef2NBizyx9cNrS1pQpk04
|
||||
6ILCBMgw/QabwQQntmM6XvVNAMPRAoHBAPYALn2JYSQWXofKGGwDuLQz30qnf9vt
|
||||
ORVBEmFPTrTeqynZDGwBflMu7udfouRtxksHTtijrkUGl70Yo+ndKVRkbfCvCJWu
|
||||
rj5xY3YqoRjEsfy8PUIVdLPFOB9dkvGyxj8Q/jUaxrHOcDj/CFzeYXnHUJEiTenI
|
||||
GEniXJGEhuJND26bDYHfqvNZdVbpMxjdqzna4iUBBaFuI1kVLIk1x66cx+qImhrz
|
||||
SAcRgll5jGJTBjcwFLOCsVD8rrj3HFdEfQKBwQDGUczciC7PmJAQGeDTpNE/3LAp
|
||||
07sm7usAFxfR0bubNLFOr7VsHFRTtLtV2vd4zTi0LjqMY4A7ZJy0K83dUAsF0gB6
|
||||
347D5ingnNhAtxEJ9Djf9u2THhjUk/qN7oKcD8GIJoSdT66KF9VVVEzGCqxN7DNR
|
||||
aA9Lki4EV/4V9QBGXI6tCSzn39U2ek692iEi11i0cpOUrzTM4rjQTwtdlwgSGRc0
|
||||
xRVJAEhWE7hFTjv4vNWr2W3CSswBGlNNRlBJO3UCgcEAutf+d6kNmCxJVlfAXuIg
|
||||
uvYfJgO80F0Im0UWYcSr4iKx3JIXpj0oJqQiHqh7/4YFM110nIUNyy2ruJtrfChX
|
||||
yNqSylkXayEHBTR4N/s+6qITEgQjfvru7c/gxan7/worGyGcAte4jLpgcFn8jxT0
|
||||
8lrZrbJhfSxWjl+YsYn4LRAcpYStKLSqkjSjNATho4RSFhpS44o4LZmKzZGQhxLK
|
||||
/KvmCBQDAG9BiOTanXz9jHzE3svtHT8p0Hprdt9xrjK9AoHASunTbOq0ZA7JPIvJ
|
||||
9aiosmr20JX+eDJ/6sTOZp/HMlWxNHwDGBeLcyMuMLxKBwPei5F65FUht03GM/jo
|
||||
BtWZlFVDgSa5kx56azJULf35Hb13ToLEM3KHBqXvW3XhOHprLLcAGTxkPh3KpDT3
|
||||
20dk1vqGWBXq0S0i3NkwTbMCq5GDA7IXmG9g5vdEj0q6gaK/C0rMnLmiRFLQZT+2
|
||||
l1/Z2JxJu9FGvRCyQnGoheWLmeYbAJNddqsybKg5F1OcOE2RAoHAY2dQKRBqhaPc
|
||||
UZAgdoaMg47V/6p1/bX4MbCWbBgdW+2kLkeNnMIeLKhtSxClwlNGipqEkdf89cwD
|
||||
zrk9XAHSJ5l7eYlPoRLjBV3uEPaM5s5e2jJWbW/rMrR1e5RJ2C2eTRlZLuQLvJXf
|
||||
32Vnod3GK5n0duif+lcdyvlYqc6bMFxCiroF5+IVRSW86WjBixo3zOGqRS6U9YFH
|
||||
HmR/wMG3qCFYGKmg7eAndb9lgWvkHVq3ft/YKMY2IRnIbtrKntqE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
168
@vates/nbd-client/tests/nbdclient.integ.mjs
Normal file
168
@vates/nbd-client/tests/nbdclient.integ.mjs
Normal file
@@ -0,0 +1,168 @@
|
||||
import NbdClient from '../index.mjs'
|
||||
import { spawn, exec } from 'node:child_process'
|
||||
import fs from 'node:fs/promises'
|
||||
import { test } from 'tap'
|
||||
import tmp from 'tmp'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { Socket } from 'node:net'
|
||||
import { NBD_DEFAULT_PORT } from '../constants.mjs'
|
||||
import assert from 'node:assert'
|
||||
|
||||
const FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
async function spawnNbdKit(path) {
|
||||
let tries = 5
|
||||
// wait for server to be ready
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
'--tls=on',
|
||||
'--tls-certificates=./tests/',
|
||||
// '--tls-verify-peer',
|
||||
// '--verbose',
|
||||
'--exit-with-parent',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
nbdServer.on('error', err => {
|
||||
console.error(err)
|
||||
})
|
||||
do {
|
||||
try {
|
||||
const socket = new Socket()
|
||||
await new Promise((resolve, reject) => {
|
||||
socket.connect(NBD_DEFAULT_PORT, 'localhost')
|
||||
socket.once('error', reject)
|
||||
socket.once('connect', resolve)
|
||||
})
|
||||
socket.destroy()
|
||||
break
|
||||
} catch (err) {
|
||||
tries--
|
||||
if (tries <= 0) {
|
||||
throw err
|
||||
} else {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
}
|
||||
} while (true)
|
||||
return nbdServer
|
||||
}
|
||||
|
||||
async function killNbdKit() {
|
||||
return new Promise((resolve, reject) =>
|
||||
exec('pkill -9 -f -o nbdkit', err => {
|
||||
err ? reject(err) : resolve()
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
let nbdServer = await spawnNbdKit(path)
|
||||
const client = new NbdClient(
|
||||
{
|
||||
address: '127.0.0.1',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: `-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
{
|
||||
readAhead: 2,
|
||||
}
|
||||
)
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
const nbdIterator = client.readBlocks(function* () {
|
||||
for (const index of indexes) {
|
||||
yield { index, size: CHUNK_SIZE }
|
||||
}
|
||||
})
|
||||
let i = 0
|
||||
for await (const block of nbdIterator) {
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
i++
|
||||
|
||||
// flaky server is flaky
|
||||
if (i % 7 === 0) {
|
||||
// kill the older nbdkit process
|
||||
await killNbdKit()
|
||||
nbdServer = await spawnNbdKit(path)
|
||||
}
|
||||
}
|
||||
|
||||
// we can reuse the conneciton to read other blocks
|
||||
// default iterator
|
||||
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
|
||||
let nb = 0
|
||||
for await (const block of nbdIteratorWithDefaultBlockIterator) {
|
||||
nb++
|
||||
tap.equal(block.length, 2 * 1024 * 1024)
|
||||
}
|
||||
|
||||
tap.equal(nb, 5)
|
||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||
|
||||
await client.disconnect()
|
||||
// double disconnection shouldn't pose any problem
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
21
@vates/nbd-client/tests/server-cert.pem
Normal file
21
@vates/nbd-client/tests/server-cert.pem
Normal file
@@ -0,0 +1,21 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
28
@vates/nbd-client/tests/server-key.pem
Normal file
28
@vates/nbd-client/tests/server-key.pem
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/8wLopj/iZY6i
|
||||
jmpvgCJsl+zY0hQZQcIoaCs0H75u8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZol
|
||||
evaSJLNT2Iolscvc2W9NCF4N1V6yzs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh
|
||||
67u+uI40732AfQqD01BNCTD/uHRBlKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y
|
||||
2SJVTeT4a1sSJixl6I1YPmt80FJhgq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULw
|
||||
dJOGgmqGRDzgZKJS5UUpxe/ViEO459I18vIkgibaRYhENgmnP3lIzTOLlUe07tbS
|
||||
ML5RGBbBAgMBAAECggEATLYiafcTHfgnZmjTOad0WoDnC4n9tVBV948WARlUooLS
|
||||
duL3RQRHCLz9/ZaTuFA1XDpNcYyc/B/IZoU7aJGZR3+JSmJBjowpUphu+klVNNG4
|
||||
i6lDRrzYlUI0hfdLjHsDTDBIKi91KcB0lix/VkvsrVQvDHwsiR2ZAIiVWAWQFKrR
|
||||
5O3DhSTHbqyq47uR58rWr4Zf3zvZaUl841AS1yELzCiZqz7AenvyWphim0c0XA5d
|
||||
I63CEShntHnEAA9OMcP8+BNf/3AmqB4welY+m8elB3aJNH+j7DKq/AWqaM5nl2PC
|
||||
cS6qgpxwOyTxEOyj1xhwK5ZMRR3heW3NfutIxSOPlwKBgQDB9ZkrBeeGVtCISO7C
|
||||
eCANzSLpeVrahTvaCSQLdPHsLRLDUc+5mxdpi3CaRlzYs3S1OWdAtyWX9mBryltF
|
||||
qDPhCNjFDyHok4D3wLEWdS9oUVwEKUM8fOPW3tXLLiMM7p4862Qo7LqnqHzPqsnz
|
||||
22iZo5yjcc7aLJ+VmFrbAowwOwKBgQD9WNCvczTd7Ymn7zEvdiAyNoS0OZ0orwEJ
|
||||
zGaxtjqVguGklNfrb/UB+eKNGE80+YnMiSaFc9IQPetLntZdV0L7kWYdCI8kGDNA
|
||||
DbVRCOp+z8DwAojlrb/zsYu23anQozT3WeHxVU66lNuyEQvSW2tJa8gN1htrD7uY
|
||||
5KLibYrBMwKBgEM0iiHyJcrSgeb2/mO7o7+keJhVSDm3OInP6QFfQAQJihrLWiKB
|
||||
rpcPjbCm+LzNUX8JqNEvpIMHB1nR/9Ye9frfSdzd5W3kzicKSVHywL5wkmWOtpFa
|
||||
5Mcq5wFDtzlf5MxO86GKhRJauwRptRgdyhySKFApuva1x4XaCIEiXNjJAoGBAN82
|
||||
t3c+HCBEv3o05rMYcrmLC1T3Rh6oQlPtwbVmByvfywsFEVCgrc/16MPD3VWhXuXV
|
||||
GRmPuE8THxLbead30M5xhvShq+xzXgRbj5s8Lc9ZIHbW5OLoOS1vCtgtaQcoJOyi
|
||||
Rs4pCVqe+QpktnO6lEZ2Libys+maTQEiwNibBxu9AoGAUG1V5aKMoXa7pmGeuFR6
|
||||
ES+1NDiCt6yDq9BsLZ+e2uqvWTkvTGLLwvH6xf9a0pnnILd0AUTKAAaoUdZS6++E
|
||||
cGob7fxMwEE+UETp0QBgLtfjtExMOFwr2avw8PV4CYEUkPUAm2OFB2Twh+d/PNfr
|
||||
FAxF1rN47SBPNbFI8N4TFsg=
|
||||
-----END PRIVATE KEY-----
|
||||
1
@vates/node-vsphere-soap/.npmignore
Symbolic link
1
@vates/node-vsphere-soap/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
22
@vates/node-vsphere-soap/LICENSE
Normal file
22
@vates/node-vsphere-soap/LICENSE
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 reedog117
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
127
@vates/node-vsphere-soap/README.md
Normal file
127
@vates/node-vsphere-soap/README.md
Normal file
@@ -0,0 +1,127 @@
|
||||
forked from https://github.com/reedog117/node-vsphere-soap
|
||||
|
||||
# node-vsphere-soap
|
||||
|
||||
[](https://gitter.im/reedog117/node-vsphere-soap?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
This is a Node.js module to connect to VMware vCenter servers and/or ESXi hosts and perform operations using the [vSphere Web Services API]. If you're feeling really adventurous, you can use this module to port vSphere operations from other languages (such as the Perl, Python, and Go libraries that exist) and have fully native Node.js code controlling your VMware virtual infrastructure!
|
||||
|
||||
This is very much in alpha.
|
||||
|
||||
## Authors
|
||||
|
||||
- Patrick C - [@reedog117]
|
||||
|
||||
## Version
|
||||
|
||||
0.0.2-5
|
||||
|
||||
## Installation
|
||||
|
||||
```sh
|
||||
$ npm install node-vsphere-soap --save
|
||||
```
|
||||
|
||||
## Sample Code
|
||||
|
||||
### To connect to a vCenter server:
|
||||
|
||||
var nvs = require('node-vsphere-soap');
|
||||
var vc = new nvs.Client(host, user, password, sslVerify);
|
||||
vc.once('ready', function() {
|
||||
// perform work here
|
||||
});
|
||||
vc.once('error', function(err) {
|
||||
// handle error here
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- host = hostname or IP of vCenter/ESX/ESXi server
|
||||
- user = username
|
||||
- password = password
|
||||
- sslVerify = true|false - set to false if you have self-signed/unverified certificates
|
||||
|
||||
#### Events
|
||||
|
||||
- ready = emits when session authenticated with server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
#### Client instance variables
|
||||
|
||||
- serviceContent - ServiceContent object retrieved by RetrieveServiceContent API call
|
||||
- userName - username of authenticated user
|
||||
- fullName - full name of authenticated user
|
||||
|
||||
### To run a command:
|
||||
|
||||
var vcCmd = vc.runCommand( commandToRun, arguments );
|
||||
vcCmd.once('result', function( result, raw, soapHeader) {
|
||||
// handle results
|
||||
});
|
||||
vcCmd.once('error', function( err) {
|
||||
// handle errors
|
||||
});
|
||||
|
||||
#### Arguments
|
||||
|
||||
- commandToRun = Method from the vSphere API
|
||||
- arguments = JSON document containing arguments to send
|
||||
|
||||
#### Events
|
||||
|
||||
- result = emits when session authenticated with server
|
||||
- _result_ contains the JSON-formatted result from the server
|
||||
- _raw_ contains the raw SOAP XML response from the server
|
||||
- _soapHeader_ contains any soapHeaders from the server
|
||||
- error = emits when there's an error
|
||||
- _err_ contains the error
|
||||
|
||||
Make sure you check out tests/vsphere-soap.test.js for examples on how to create commands to run
|
||||
|
||||
## Development
|
||||
|
||||
node-vsphere-soap uses a number of open source projects to work properly:
|
||||
|
||||
- [node.js] - evented I/O for the backend
|
||||
- [node-soap] - SOAP client for Node.js
|
||||
- [soap-cookie] - cookie authentication for the node-soap module
|
||||
- [lodash] - for quickly manipulating JSON
|
||||
- [lab] - testing engine
|
||||
- [code] - assertion engine used with lab
|
||||
|
||||
Want to contribute? Great!
|
||||
|
||||
### Todo's
|
||||
|
||||
- Write More Tests
|
||||
- Create Travis CI test harness with a fake vCenter Instance
|
||||
- Add Code Comments
|
||||
|
||||
### Testing
|
||||
|
||||
I have been testing on a Mac with node v0.10.36 and both ESXi and vCenter 5.5.
|
||||
|
||||
To edit tests, edit the file **test/vsphere-soap.test.js**
|
||||
|
||||
To point the module at your own vCenter/ESXi host, edit **config-test.stub.js** and save it as **config-test.js**
|
||||
|
||||
To run test scripts:
|
||||
|
||||
```sh
|
||||
$ npm test
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
[vSphere Web Services API]: http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/right-pane.html
|
||||
[node-soap]: https://github.com/vpulim/node-soap
|
||||
[node.js]: http://nodejs.org/
|
||||
[soap-cookie]: https://github.com/shanestillwell/soap-cookie
|
||||
[code]: https://github.com/hapijs/code
|
||||
[lab]: https://github.com/hapijs/lab
|
||||
[lodash]: https://lodash.com/
|
||||
[@reedog117]: http://www.twitter.com/reedog117
|
||||
230
@vates/node-vsphere-soap/lib/client.mjs
Normal file
230
@vates/node-vsphere-soap/lib/client.mjs
Normal file
@@ -0,0 +1,230 @@
|
||||
/*
|
||||
|
||||
node-vsphere-soap
|
||||
|
||||
client.js
|
||||
|
||||
This file creates the Client class
|
||||
|
||||
- when the class is instantiated, a connection will be made to the ESXi/vCenter server to verify that the creds are good
|
||||
- upon a bad login, the connnection will be terminated
|
||||
|
||||
*/
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
import axios from 'axios'
|
||||
import https from 'node:https'
|
||||
import util from 'util'
|
||||
import soap from 'soap'
|
||||
import Cookie from 'soap-cookie' // required for session persistence
|
||||
|
||||
// Client class
|
||||
// inherits from EventEmitter
|
||||
// possible events: connect, error, ready
|
||||
|
||||
export function Client(vCenterHostname, username, password, sslVerify) {
|
||||
this.status = 'disconnected'
|
||||
this.reconnectCount = 0
|
||||
|
||||
sslVerify = typeof sslVerify !== 'undefined' ? sslVerify : false
|
||||
|
||||
EventEmitter.call(this)
|
||||
|
||||
// sslVerify argument handling
|
||||
if (sslVerify) {
|
||||
this.clientopts = {}
|
||||
} else {
|
||||
this.clientopts = {
|
||||
request: axios.create({
|
||||
httpsAgent: new https.Agent({
|
||||
rejectUnauthorized: false,
|
||||
}),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
this.connectionInfo = {
|
||||
host: vCenterHostname,
|
||||
user: username,
|
||||
password,
|
||||
sslVerify,
|
||||
}
|
||||
|
||||
this._loginArgs = {
|
||||
userName: this.connectionInfo.user,
|
||||
password: this.connectionInfo.password,
|
||||
}
|
||||
|
||||
this._vcUrl = 'https://' + this.connectionInfo.host + '/sdk/vimService.wsdl'
|
||||
|
||||
// connect to the vCenter / ESXi host
|
||||
this.on('connect', this._connect)
|
||||
this.emit('connect')
|
||||
|
||||
// close session
|
||||
this.on('close', this._close)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
util.inherits(Client, EventEmitter)
|
||||
|
||||
Client.prototype.runCommand = function (command, args) {
|
||||
const self = this
|
||||
let cmdargs
|
||||
if (!args || args === null) {
|
||||
cmdargs = {}
|
||||
} else {
|
||||
cmdargs = args
|
||||
}
|
||||
|
||||
const emitter = new EventEmitter()
|
||||
|
||||
// check if client has successfully connected
|
||||
if (self.status === 'ready' || self.status === 'connecting') {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
} else {
|
||||
// if connection not ready or connecting, reconnect to instance
|
||||
if (self.status === 'disconnected') {
|
||||
self.emit('connect')
|
||||
}
|
||||
self.once('ready', function () {
|
||||
self.client.VimService.VimPort[command](cmdargs, function (err, result, raw, soapHeader) {
|
||||
if (err) {
|
||||
_soapErrorHandler(self, emitter, command, cmdargs, err)
|
||||
}
|
||||
if (command === 'Logout') {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
}
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return emitter
|
||||
}
|
||||
|
||||
Client.prototype.close = function () {
|
||||
const self = this
|
||||
|
||||
self.emit('close')
|
||||
}
|
||||
|
||||
Client.prototype._connect = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status !== 'disconnected') {
|
||||
return
|
||||
}
|
||||
|
||||
self.status = 'connecting'
|
||||
|
||||
soap.createClient(
|
||||
self._vcUrl,
|
||||
self.clientopts,
|
||||
function (err, client) {
|
||||
if (err) {
|
||||
self.emit('error', err)
|
||||
throw err
|
||||
}
|
||||
|
||||
self.client = client // save client for later use
|
||||
|
||||
self
|
||||
.runCommand('RetrieveServiceContent', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
if (!result.returnval) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', raw)
|
||||
return
|
||||
}
|
||||
|
||||
self.serviceContent = result.returnval
|
||||
self.sessionManager = result.returnval.sessionManager
|
||||
const loginArgs = { _this: self.sessionManager, ...self._loginArgs }
|
||||
|
||||
self
|
||||
.runCommand('Login', loginArgs)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
self.authCookie = new Cookie(client.lastResponseHeaders)
|
||||
self.client.setSecurity(self.authCookie) // needed since vSphere SOAP WS uses cookies
|
||||
|
||||
self.userName = result.returnval.userName
|
||||
self.fullName = result.returnval.fullName
|
||||
self.reconnectCount = 0
|
||||
|
||||
self.status = 'ready'
|
||||
self.emit('ready')
|
||||
process.once('beforeExit', self._close)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
self.status = 'disconnected'
|
||||
self.emit('error', err)
|
||||
})
|
||||
},
|
||||
self._vcUrl
|
||||
)
|
||||
}
|
||||
|
||||
Client.prototype._close = function () {
|
||||
const self = this
|
||||
|
||||
if (self.status === 'ready') {
|
||||
self
|
||||
.runCommand('Logout', { _this: self.sessionManager })
|
||||
.once('result', function () {
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
.once('error', function () {
|
||||
/* don't care of error during disconnection */
|
||||
self.status = 'disconnected'
|
||||
})
|
||||
} else {
|
||||
self.status = 'disconnected'
|
||||
}
|
||||
}
|
||||
|
||||
function _soapErrorHandler(self, emitter, command, args, err) {
|
||||
err = err || { body: 'general error' }
|
||||
|
||||
if (err.body.match(/session is not authenticated/)) {
|
||||
self.status = 'disconnected'
|
||||
process.removeAllListeners('beforeExit')
|
||||
|
||||
if (self.reconnectCount < 10) {
|
||||
self.reconnectCount += 1
|
||||
self
|
||||
.runCommand(command, args)
|
||||
.once('result', function (result, raw, soapHeader) {
|
||||
emitter.emit('result', result, raw, soapHeader)
|
||||
})
|
||||
.once('error', function (err) {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
})
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
} else {
|
||||
emitter.emit('error', err.body)
|
||||
throw err
|
||||
}
|
||||
}
|
||||
|
||||
// end
|
||||
38
@vates/node-vsphere-soap/package.json
Normal file
38
@vates/node-vsphere-soap/package.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"name": "@vates/node-vsphere-soap",
|
||||
"version": "2.0.0",
|
||||
"description": "interface to vSphere SOAP/WSDL from node for interfacing with vCenter or ESXi, forked from node-vsphere-soap",
|
||||
"main": "lib/client.mjs",
|
||||
"author": "reedog117",
|
||||
"repository": {
|
||||
"directory": "@vates/node-vsphere-soap",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"axios": "^1.4.0",
|
||||
"soap": "^1.0.0",
|
||||
"soap-cookie": "^0.10.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"keywords": [
|
||||
"vsphere",
|
||||
"vcenter",
|
||||
"api",
|
||||
"soap",
|
||||
"wsdl"
|
||||
],
|
||||
"preferGlobal": false,
|
||||
"license": "MIT",
|
||||
"private": false,
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/node-vsphere-soap",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
11
@vates/node-vsphere-soap/test/config-test.stub.mjs
Normal file
11
@vates/node-vsphere-soap/test/config-test.stub.mjs
Normal file
@@ -0,0 +1,11 @@
|
||||
// place your own credentials here for a vCenter or ESXi server
|
||||
// this information will be used for connecting to a vCenter instance
|
||||
// for module testing
|
||||
// name the file config-test.js
|
||||
|
||||
export const vCenterTestCreds = {
|
||||
vCenterIP: 'vcsa',
|
||||
vCenterUser: 'vcuser',
|
||||
vCenterPassword: 'vcpw',
|
||||
vCenter: true,
|
||||
}
|
||||
138
@vates/node-vsphere-soap/test/vsphere-soap.test.mjs
Normal file
138
@vates/node-vsphere-soap/test/vsphere-soap.test.mjs
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
vsphere-soap.test.js
|
||||
|
||||
tests for the vCenterConnectionInstance class
|
||||
*/
|
||||
|
||||
import assert from 'assert'
|
||||
import { describe, it } from 'test'
|
||||
|
||||
import * as vc from '../lib/client.mjs'
|
||||
|
||||
// eslint-disable-next-line n/no-missing-import
|
||||
import { vCenterTestCreds as TestCreds } from '../config-test.mjs'
|
||||
|
||||
const VItest = new vc.Client(TestCreds.vCenterIP, TestCreds.vCenterUser, TestCreds.vCenterPassword, false)
|
||||
|
||||
describe('Client object initialization:', function () {
|
||||
it('provides a successful login', { timeout: 5000 }, function (t, done) {
|
||||
VItest.once('ready', function () {
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
}).once('error', function (err) {
|
||||
console.error(err)
|
||||
// this should fail if there's a problem
|
||||
assert.notEqual(VItest.userName, null)
|
||||
assert.notEqual(VItest.fullName, null)
|
||||
assert.notEqual(VItest.serviceContent, null)
|
||||
done()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Client reconnection test:', function () {
|
||||
it('can successfully reconnect', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('Logout', { _this: VItest.serviceContent.sessionManager })
|
||||
.once('result', function (result) {
|
||||
// now we're logged out, so let's try running a command to test automatic re-login
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' })
|
||||
.once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error(err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// these tests don't work yet
|
||||
describe('Client tests - query commands:', function () {
|
||||
it('retrieves current time', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('retrieves current time 2 (check for event clobbering)', { timeout: 5000 }, function (t, done) {
|
||||
VItest.runCommand('CurrentTime', { _this: 'ServiceInstance' }).once('result', function (result) {
|
||||
assert(result.returnval instanceof Date)
|
||||
done()
|
||||
})
|
||||
})
|
||||
|
||||
it('can obtain the names of all Virtual Machines in the inventory', { timeout: 20000 }, function (t, done) {
|
||||
// get property collector
|
||||
const propertyCollector = VItest.serviceContent.propertyCollector
|
||||
// get view manager
|
||||
const viewManager = VItest.serviceContent.viewManager
|
||||
// get root folder
|
||||
const rootFolder = VItest.serviceContent.rootFolder
|
||||
|
||||
let containerView, objectSpec, traversalSpec, propertySpec, propertyFilterSpec
|
||||
// this is the equivalent to
|
||||
VItest.runCommand('CreateContainerView', {
|
||||
_this: viewManager,
|
||||
container: rootFolder,
|
||||
type: ['VirtualMachine'],
|
||||
recursive: true,
|
||||
}).once('result', function (result) {
|
||||
// build all the data structures needed to query all the vm names
|
||||
containerView = result.returnval
|
||||
|
||||
objectSpec = {
|
||||
attributes: { 'xsi:type': 'ObjectSpec' }, // setting attributes xsi:type is important or else the server may mis-recognize types!
|
||||
obj: containerView,
|
||||
skip: true,
|
||||
}
|
||||
|
||||
traversalSpec = {
|
||||
attributes: { 'xsi:type': 'TraversalSpec' },
|
||||
name: 'traverseEntities',
|
||||
type: 'ContainerView',
|
||||
path: 'view',
|
||||
skip: false,
|
||||
}
|
||||
|
||||
objectSpec = { ...objectSpec, selectSet: [traversalSpec] }
|
||||
|
||||
propertySpec = {
|
||||
attributes: { 'xsi:type': 'PropertySpec' },
|
||||
type: 'VirtualMachine',
|
||||
pathSet: ['name'],
|
||||
}
|
||||
|
||||
propertyFilterSpec = {
|
||||
attributes: { 'xsi:type': 'PropertyFilterSpec' },
|
||||
propSet: [propertySpec],
|
||||
objectSet: [objectSpec],
|
||||
}
|
||||
// TODO: research why it fails if propSet is declared after objectSet
|
||||
|
||||
VItest.runCommand('RetrievePropertiesEx', {
|
||||
_this: propertyCollector,
|
||||
specSet: [propertyFilterSpec],
|
||||
options: { attributes: { type: 'RetrieveOptions' } },
|
||||
})
|
||||
.once('result', function (result, raw) {
|
||||
assert.notEqual(result.returnval.objects, null)
|
||||
if (Array.isArray(result.returnval.objects)) {
|
||||
assert.strictEqual(result.returnval.objects[0].obj.attributes.type, 'VirtualMachine')
|
||||
} else {
|
||||
assert.strictEqual(result.returnval.objects.obj.attributes.type, 'VirtualMachine')
|
||||
}
|
||||
done()
|
||||
})
|
||||
.once('error', function (err) {
|
||||
console.error('\n\nlast request : ' + VItest.client.lastRequest, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/otp):
|
||||
|
||||
```
|
||||
> npm install --save @vates/otp
|
||||
```sh
|
||||
npm install --save @vates/otp
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/parse-duration):
|
||||
|
||||
```
|
||||
> npm install --save @vates/parse-duration
|
||||
```sh
|
||||
npm install --save @vates/parse-duration
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
|
||||
|
||||
```
|
||||
> npm install --save @vates/predicates
|
||||
```sh
|
||||
npm install --save @vates/predicates
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -24,3 +24,25 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
|
||||
|
||||
```
|
||||
> npm install --save @vates/read-chunk
|
||||
```sh
|
||||
npm install --save @vates/read-chunk
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -43,6 +43,28 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -1,11 +1,37 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const isUtf8 = require('isutf8')
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, `null` will be returned *unless* the stream has ended, in which case all of the data remaining will be returned.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown|null>} - A Promise that resolves to the read chunk if available, or null if end of stream is reached.
|
||||
*/
|
||||
const readChunk = (stream, size) =>
|
||||
stream.closed || stream.readableEnded
|
||||
stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: stream.closed || stream.readableEnded
|
||||
? Promise.resolve(null)
|
||||
: size === 0
|
||||
? Promise.resolve(Buffer.alloc(0))
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
@@ -33,6 +59,21 @@ const readChunk = (stream, size) =>
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, the returned promise is rejected.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown>} - A Promise that resolves to the read chunk.
|
||||
*/
|
||||
exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
const chunk = await readChunk(stream, size)
|
||||
if (chunk === null) {
|
||||
@@ -40,7 +81,14 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error('stream has ended with not enough data')
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
|
||||
// Buffer.isUtf8 is too recent for now
|
||||
// @todo : replace external package by Buffer.isUtf8 when the supported version of node reach 18
|
||||
|
||||
if (chunk.length < 1024 && isUtf8(chunk)) {
|
||||
error.text = chunk.toString('utf8')
|
||||
}
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
@@ -51,3 +99,69 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a readable stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<number>} A Promise that resolves to the number of bytes actually skipped. If the end of the stream is reached before all bytes are skipped, the Promise resolves to the number of bytes that were skipped before the end of the stream was reached. The Promise is rejected if there is an error while reading from the stream.
|
||||
*/
|
||||
async function skip(stream, size) {
|
||||
return stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: size === 0 || stream.closed || stream.readableEnded
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
exports.skip = skip
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<void>} - A Promise that resolves when the exact number of bytes have been skipped. The Promise is rejected if there is an error while reading from the stream or the stream ends before the exact number of bytes have been skipped.
|
||||
*/
|
||||
exports.skipStrict = async function skipStrict(stream, size) {
|
||||
const bytesSkipped = await skip(stream, size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,12 +5,58 @@ const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk, readChunkStrict } = require('./')
|
||||
const { readChunk, readChunkStrict, skip, skipStrict } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = fn => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(fn(stream, 10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
// only supported for Node >= 18
|
||||
if (process.versions.node.split('.')[0] >= 18) {
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(fn(stream, 10)), error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('rejects if size is greater than or equal to 1 GiB', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 1024 * 1024 * 1024))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
makeErrorTests(readChunk)
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await readChunk(makeStream([])), null)
|
||||
})
|
||||
@@ -38,10 +84,6 @@ describe('readChunk', () => {
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('returns an empty buffer if the specified size is 0', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 0), Buffer.alloc(0))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
@@ -52,14 +94,6 @@ describe('readChunk', () => {
|
||||
})
|
||||
})
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
@@ -68,10 +102,86 @@ describe('readChunkStrict', function () {
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
it('throws if stream ends with not enough data, utf8', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data')
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.strictEqual(error.text, 'foobar')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, non utf8 ', async () => {
|
||||
const source = [Buffer.alloc(10, 128), Buffer.alloc(10, 128)]
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(source), 30))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 20, expected: 30)')
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, Buffer.concat(source))
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data, utf8 , long data', async () => {
|
||||
const source = Buffer.from('a'.repeat(1500))
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([source]), 2000))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, `stream has ended with not enough data (actual: 1500, expected: 2000)`)
|
||||
assert.strictEqual(error.text, undefined)
|
||||
assert.deepEqual(error.chunk, source)
|
||||
})
|
||||
|
||||
it('succeed', async () => {
|
||||
const source = Buffer.from('a'.repeat(20))
|
||||
const chunk = await readChunkStrict(makeStream([source]), 10)
|
||||
assert.deepEqual(source.subarray(10), chunk)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip', function () {
|
||||
makeErrorTests(skip)
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await skip(makeStream(['foo']), 0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const stream = await makeStream([])
|
||||
await readChunk(stream)
|
||||
|
||||
assert.strictEqual(await skip(stream, 10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const stream = makeStream('foo bar')
|
||||
|
||||
assert.strictEqual(await skip(stream, 4), 4)
|
||||
assert.deepEqual(await readChunk(stream, 4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||
})
|
||||
|
||||
it('put back if it read too much', async () => {
|
||||
let source = makeStream(['foo', 'bar'])
|
||||
await skip(source, 1) // read part of data chunk
|
||||
const chunk = (await readChunkStrict(source, 2)).toString('utf-8')
|
||||
assert.strictEqual(chunk, 'oo')
|
||||
|
||||
source = makeStream(['foo', 'bar'])
|
||||
assert.strictEqual(await skip(source, 3), 3) // read aligned with data chunk
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(skipStrict(makeStream('foo bar'), 10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
it('succeed', async () => {
|
||||
const source = makeStream(['foo', 'bar', 'baz'])
|
||||
const res = await skipStrict(source, 4)
|
||||
assert.strictEqual(res, undefined)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.0.1",
|
||||
"version": "1.2.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -33,5 +33,8 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.2.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"isutf8": "^4.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
42
@vates/stream-reader/.USAGE.md
Normal file
42
@vates/stream-reader/.USAGE.md
Normal file
@@ -0,0 +1,42 @@
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
1
@vates/stream-reader/.npmignore
Symbolic link
1
@vates/stream-reader/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
75
@vates/stream-reader/README.md
Normal file
75
@vates/stream-reader/README.md
Normal file
@@ -0,0 +1,75 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/stream-reader
|
||||
|
||||
[](https://npmjs.org/package/@vates/stream-reader)  [](https://bundlephobia.com/result?p=@vates/stream-reader) [](https://npmjs.org/package/@vates/stream-reader)
|
||||
|
||||
> Efficiently reads and skips chunks of a given size in a stream
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/stream-reader):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/stream-reader
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
123
@vates/stream-reader/index.js
Normal file
123
@vates/stream-reader/index.js
Normal file
@@ -0,0 +1,123 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert')
|
||||
const { finished, Readable } = require('node:stream')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// Inspired by https://github.com/nodejs/node/blob/85705a47958c9ae5dbaa1f57456db19bdefdc494/lib/internal/streams/readable.js#L1107
|
||||
class StreamReader {
|
||||
#ended = false
|
||||
#error
|
||||
#executor = resolve => {
|
||||
this.#resolve = resolve
|
||||
}
|
||||
#stream
|
||||
#resolve = noop
|
||||
|
||||
constructor(stream) {
|
||||
stream = typeof stream.pipe === 'function' ? stream : Readable.from(stream)
|
||||
|
||||
this.#stream = stream
|
||||
|
||||
stream.on('readable', () => this.#resolve())
|
||||
|
||||
finished(stream, { writable: false }, error => {
|
||||
this.#error = error
|
||||
this.#ended = true
|
||||
this.#resolve()
|
||||
})
|
||||
}
|
||||
|
||||
async read(size) {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
}
|
||||
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const value = this.#stream.read(size)
|
||||
if (value !== null) {
|
||||
return value
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async readStrict(size) {
|
||||
const chunk = await this.read(size)
|
||||
if (chunk === null) {
|
||||
throw new Error('stream has ended without data')
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
async skip(size) {
|
||||
if (size === 0) {
|
||||
return size
|
||||
}
|
||||
|
||||
let toSkip = size
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return size - toSkip
|
||||
}
|
||||
|
||||
const data = this.#stream.read()
|
||||
if (data !== null) {
|
||||
toSkip -= data === null ? 0 : data.length
|
||||
if (toSkip > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (toSkip < 0) {
|
||||
this.#stream.unshift(data.slice(toSkip))
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async skipStrict(size) {
|
||||
const bytesSkipped = await this.skip(size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StreamReader.prototype[Symbol.asyncIterator] = async function* asyncIterator() {
|
||||
let chunk
|
||||
while ((chunk = await this.read()) !== null) {
|
||||
yield chunk
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = StreamReader
|
||||
141
@vates/stream-reader/index.test.js
Normal file
141
@vates/stream-reader/index.test.js
Normal file
@@ -0,0 +1,141 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const StreamReader = require('./index.js')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = method => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(new StreamReader(stream)[method](10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(new StreamReader(stream)[method](10)), error)
|
||||
})
|
||||
}
|
||||
|
||||
describe('read()', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).read(0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream([])).read(), null)
|
||||
})
|
||||
|
||||
makeErrorTests('read')
|
||||
|
||||
it('returns null if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.read(), null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(), Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(2), Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(4), Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(10), Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
assert.strictEqual(await new StreamReader(makeStream.obj(chunks)).read(), chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('readStrict()', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).readStrict())
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended without data')
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream(['foo', 'bar'])).readStrict(10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip()', function () {
|
||||
makeErrorTests('skip')
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream(['foo'])).skip(0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.skip(10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const reader = new StreamReader(makeStream('foo bar'))
|
||||
|
||||
assert.strictEqual(await reader.skip(4), 4)
|
||||
assert.deepEqual(await reader.read(4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream('foo bar')).skip(10), 7)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict()', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream('foo bar')).skipStrict(10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
})
|
||||
39
@vates/stream-reader/package.json
Normal file
39
@vates/stream-reader/package.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/stream-reader",
|
||||
"description": "Efficiently reads and skips chunks of a given size in a stream",
|
||||
"keywords": [
|
||||
"async",
|
||||
"chunk",
|
||||
"data",
|
||||
"node",
|
||||
"promise",
|
||||
"read",
|
||||
"reader",
|
||||
"skip",
|
||||
"stream"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/stream-reader",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/stream-reader",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=12.3"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
137
@vates/task/.USAGE.md
Normal file
137
@vates/task/.USAGE.md
Normal file
@@ -0,0 +1,137 @@
|
||||
```js
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
// if not defined and created inside an existing task, the new task is considered a subtask
|
||||
onProgress(event) {
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.info(message, data)
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.warning(message, data)
|
||||
|
||||
// attaches a property to the current task if any, otherwise does nothing
|
||||
//
|
||||
// the latest value takes precedence
|
||||
//
|
||||
// examples:
|
||||
// - progress
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
1
@vates/task/.npmignore
Symbolic link
1
@vates/task/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
168
@vates/task/README.md
Normal file
168
@vates/task/README.md
Normal file
@@ -0,0 +1,168 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/task
|
||||
|
||||
[](https://npmjs.org/package/@vates/task)  [](https://bundlephobia.com/result?p=@vates/task) [](https://npmjs.org/package/@vates/task)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/task):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/task
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
// if not defined and created inside an existing task, the new task is considered a subtask
|
||||
onProgress(event) {
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
task.status
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.info(message, data)
|
||||
|
||||
// sends an info on the current task if any, otherwise does nothing
|
||||
Task.warning(message, data)
|
||||
|
||||
// attaches a property to the current task if any, otherwise does nothing
|
||||
//
|
||||
// the latest value takes precedence
|
||||
//
|
||||
// examples:
|
||||
// - progress
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionnary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
61
@vates/task/combineEvents.js
Normal file
61
@vates/task/combineEvents.js
Normal file
@@ -0,0 +1,61 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
|
||||
const taskLogs = new Map()
|
||||
return function onProgress(event) {
|
||||
const { id, type } = event
|
||||
let taskLog
|
||||
if (type === 'start') {
|
||||
taskLog = {
|
||||
id,
|
||||
properties: { __proto__: null, ...event.properties },
|
||||
start: event.timestamp,
|
||||
status: 'pending',
|
||||
}
|
||||
taskLogs.set(id, taskLog)
|
||||
|
||||
const { parentId } = event
|
||||
if (parentId === undefined) {
|
||||
Object.defineProperty(taskLog, '$root', { value: taskLog })
|
||||
|
||||
// start of a root task
|
||||
onRootTaskStart(taskLog)
|
||||
} else {
|
||||
// start of a subtask
|
||||
const parent = taskLogs.get(parentId)
|
||||
assert.notEqual(parent, undefined)
|
||||
|
||||
// inject a (non-enumerable) reference to the parent and the root task
|
||||
Object.defineProperties(taskLog, { $parent: { value: parent }, $root: { value: parent.$root } })
|
||||
;(parent.tasks ?? (parent.tasks = [])).push(taskLog)
|
||||
}
|
||||
} else {
|
||||
taskLog = taskLogs.get(id)
|
||||
assert.notEqual(taskLog, undefined)
|
||||
|
||||
if (type === 'info' || type === 'warning') {
|
||||
const key = type + 's'
|
||||
const { data, message } = event
|
||||
;(taskLog[key] ?? (taskLog[key] = [])).push({ data, message })
|
||||
} else if (type === 'property') {
|
||||
;(taskLog.properties ?? (taskLog.properties = { __proto__: null }))[event.name] = event.value
|
||||
} else if (type === 'end') {
|
||||
taskLog.end = event.timestamp
|
||||
taskLog.result = event.result
|
||||
taskLog.status = event.status
|
||||
} else if (type === 'abortionRequested') {
|
||||
taskLog.abortionRequestedAt = event.timestamp
|
||||
}
|
||||
|
||||
if (type === 'end' && taskLog.$root === taskLog) {
|
||||
onRootTaskEnd(taskLog)
|
||||
}
|
||||
}
|
||||
|
||||
onTaskUpdate(taskLog)
|
||||
}
|
||||
}
|
||||
81
@vates/task/combineEvents.test.js
Normal file
81
@vates/task/combineEvents.test.js
Normal file
@@ -0,0 +1,81 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { makeOnProgress } = require('./combineEvents.js')
|
||||
const { Task } = require('./index.js')
|
||||
|
||||
describe('makeOnProgress()', function () {
|
||||
it('works', async function () {
|
||||
const events = []
|
||||
let log
|
||||
const task = new Task({
|
||||
properties: { name: 'task' },
|
||||
onProgress: makeOnProgress({
|
||||
onRootTaskStart(log_) {
|
||||
assert.equal(log, undefined)
|
||||
log = log_
|
||||
events.push('onRootTaskStart')
|
||||
},
|
||||
onRootTaskEnd(log_) {
|
||||
assert.equal(log_, log)
|
||||
events.push('onRootTaskEnd')
|
||||
},
|
||||
|
||||
onTaskUpdate(log_) {
|
||||
assert.equal(log_.$root, log)
|
||||
events.push('onTaskUpdate')
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
assert.equal(events.length, 0)
|
||||
|
||||
let i = 0
|
||||
|
||||
await task.run(async () => {
|
||||
assert.equal(events[i++], 'onRootTaskStart')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.id, task.id)
|
||||
assert.equal(log.properties.name, 'task')
|
||||
assert(Math.abs(log.start - Date.now()) < 10)
|
||||
|
||||
Task.set('name', 'new name')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.name, 'new name')
|
||||
|
||||
Task.set('progress', 0)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 0)
|
||||
|
||||
Task.info('foo', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.infos, [{ data: {}, message: 'foo' }])
|
||||
|
||||
const subtask = new Task({ properties: { name: 'subtask' } })
|
||||
await subtask.run(() => {
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].properties.name, 'subtask')
|
||||
|
||||
Task.warning('bar', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.tasks[0].warnings, [{ data: {}, message: 'bar' }])
|
||||
|
||||
subtask.abort()
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.tasks[0].abortionRequestedAt - Date.now()) < 10)
|
||||
})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].status, 'success')
|
||||
|
||||
Task.set('progress', 100)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 100)
|
||||
})
|
||||
assert.equal(events[i++], 'onRootTaskEnd')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.end - Date.now()) < 10)
|
||||
assert.equal(log.status, 'success')
|
||||
})
|
||||
})
|
||||
183
@vates/task/index.js
Normal file
183
@vates/task/index.js
Normal file
@@ -0,0 +1,183 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { AsyncLocalStorage } = require('node:async_hooks')
|
||||
|
||||
// define a read-only, non-enumerable, non-configurable property
|
||||
function define(object, property, value) {
|
||||
Object.defineProperty(object, property, { value })
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const FAILURE = 'failure'
|
||||
const PENDING = 'pending'
|
||||
const SUCCESS = 'success'
|
||||
exports.STATUS = { FAILURE, PENDING, SUCCESS }
|
||||
|
||||
// stored in the global context so that various versions of the library can interact.
|
||||
const asyncStorageKey = '@vates/task@0'
|
||||
const asyncStorage = global[asyncStorageKey] ?? (global[asyncStorageKey] = new AsyncLocalStorage())
|
||||
|
||||
const getTask = () => asyncStorage.getStore()
|
||||
|
||||
exports.Task = class Task {
|
||||
static get abortSignal() {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
return task.#abortController.signal
|
||||
}
|
||||
}
|
||||
|
||||
static info(message, data) {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
task.#emit('info', { data, message })
|
||||
}
|
||||
}
|
||||
|
||||
static run(opts, fn) {
|
||||
return new this(opts).run(fn)
|
||||
}
|
||||
|
||||
static set(name, value) {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
task.#emit('property', { name, value })
|
||||
}
|
||||
}
|
||||
|
||||
static warning(message, data) {
|
||||
const task = getTask()
|
||||
if (task !== undefined) {
|
||||
task.#emit('warning', { data, message })
|
||||
}
|
||||
}
|
||||
|
||||
static wrap(opts, fn) {
|
||||
// compatibility with @decorateWith
|
||||
if (typeof fn !== 'function') {
|
||||
;[fn, opts] = [opts, fn]
|
||||
}
|
||||
|
||||
return function taskRun() {
|
||||
return Task.run(typeof opts === 'function' ? opts.apply(this, arguments) : opts, () => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
|
||||
#abortController = new AbortController()
|
||||
#onProgress
|
||||
|
||||
get id() {
|
||||
return (this.id = Math.random().toString(36).slice(2))
|
||||
}
|
||||
set id(value) {
|
||||
define(this, 'id', value)
|
||||
}
|
||||
|
||||
#startData
|
||||
|
||||
#status = PENDING
|
||||
get status() {
|
||||
return this.#status
|
||||
}
|
||||
|
||||
constructor({ properties, onProgress } = {}) {
|
||||
this.#startData = { properties }
|
||||
|
||||
if (onProgress !== undefined) {
|
||||
this.#onProgress = onProgress
|
||||
} else {
|
||||
const parent = getTask()
|
||||
if (parent !== undefined) {
|
||||
const { signal } = parent.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
this.#abortController.abort(signal.reason)
|
||||
})
|
||||
|
||||
this.#onProgress = parent.#onProgress
|
||||
this.#startData.parentId = parent.id
|
||||
} else {
|
||||
this.#onProgress = noop
|
||||
}
|
||||
}
|
||||
|
||||
const { signal } = this.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.status === PENDING) {
|
||||
this.#maybeStart()
|
||||
|
||||
this.#emit('abortionRequested', { reason: signal.reason })
|
||||
|
||||
if (!this.#running) {
|
||||
const status = FAILURE
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
abort(reason) {
|
||||
this.#abortController.abort(reason)
|
||||
}
|
||||
|
||||
#emit(type, data) {
|
||||
data.id = this.id
|
||||
data.timestamp = Date.now()
|
||||
data.type = type
|
||||
this.#onProgress(data)
|
||||
}
|
||||
|
||||
#maybeStart() {
|
||||
const startData = this.#startData
|
||||
if (startData !== undefined) {
|
||||
this.#startData = undefined
|
||||
this.#emit('start', startData)
|
||||
}
|
||||
}
|
||||
|
||||
async run(fn) {
|
||||
const result = await this.runInside(fn)
|
||||
if (this.status === PENDING) {
|
||||
this.#status = SUCCESS
|
||||
this.#emit('end', { status: SUCCESS, result })
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
#running = false
|
||||
async runInside(fn) {
|
||||
assert.equal(this.status, PENDING)
|
||||
assert.equal(this.#running, false)
|
||||
this.#running = true
|
||||
|
||||
this.#maybeStart()
|
||||
|
||||
try {
|
||||
const result = await asyncStorage.run(this, fn)
|
||||
this.#running = false
|
||||
return result
|
||||
} catch (result) {
|
||||
const status = FAILURE
|
||||
|
||||
this.#status = status
|
||||
this.#emit('end', { status, result })
|
||||
throw result
|
||||
}
|
||||
}
|
||||
|
||||
wrap(fn) {
|
||||
const task = this
|
||||
return function taskRun() {
|
||||
return task.run(() => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
|
||||
wrapInside(fn) {
|
||||
const task = this
|
||||
return function taskRunInside() {
|
||||
return task.runInside(() => fn.apply(this, arguments))
|
||||
}
|
||||
}
|
||||
}
|
||||
347
@vates/task/index.test.js
Normal file
347
@vates/task/index.test.js
Normal file
@@ -0,0 +1,347 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { Task } = require('./index.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function assertEvent(task, expected, eventIndex = -1) {
|
||||
const logs = task.$events
|
||||
const actual = logs[eventIndex < 0 ? logs.length + eventIndex : eventIndex]
|
||||
|
||||
assert.equal(typeof actual, 'object')
|
||||
assert.equal(typeof actual.id, 'string')
|
||||
assert.equal(typeof actual.timestamp, 'number')
|
||||
for (const keys of Object.keys(expected)) {
|
||||
assert.deepEqual(actual[keys], expected[keys])
|
||||
}
|
||||
}
|
||||
|
||||
// like new Task() but with a custom onProgress which adds event to task.$events
|
||||
function createTask(opts) {
|
||||
const events = []
|
||||
const task = new Task({ ...opts, onProgress: events.push.bind(events) })
|
||||
task.$events = events
|
||||
return task
|
||||
}
|
||||
|
||||
describe('Task', function () {
|
||||
describe('contructor', function () {
|
||||
it('data properties are passed to the start event', async function () {
|
||||
const properties = { foo: 0, bar: 1 }
|
||||
const task = createTask({ properties })
|
||||
await task.run(noop)
|
||||
assertEvent(task, { type: 'start', properties }, 0)
|
||||
})
|
||||
})
|
||||
|
||||
it('subtasks events are passed to root task', async function () {
|
||||
const task = createTask()
|
||||
const result = {}
|
||||
|
||||
await task.run(async () => {
|
||||
await new Task().run(() => result)
|
||||
})
|
||||
|
||||
assert.equal(task.$events.length, 4)
|
||||
assertEvent(task, { type: 'start', parentId: task.id }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
describe('.abortSignal', function () {
|
||||
it('is undefined when run outside a task', function () {
|
||||
assert.equal(Task.abortSignal, undefined)
|
||||
})
|
||||
|
||||
it('is the current abort signal when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
const { abortSignal } = Task
|
||||
assert.equal(abortSignal.aborted, false)
|
||||
task.abort()
|
||||
assert.equal(abortSignal.aborted, true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.abort()', function () {
|
||||
it('aborts if the task throws fails with the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task fails without the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = new Error()
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
throw result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task succeed', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
return result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'success')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
it('aborts before task is running', function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
task.abort(reason)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.info()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.info('foo')
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.info('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'info',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.set('progress', 10)
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.set('progress', 10)
|
||||
assertEvent(task, {
|
||||
name: 'progress',
|
||||
type: 'property',
|
||||
value: 10,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.warning()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.warning('foo')
|
||||
})
|
||||
|
||||
it('emits an warning message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.warning('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'warning',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('#id', function () {
|
||||
it('can be set', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
assert.equal(task.id, 'foo')
|
||||
})
|
||||
|
||||
it('cannot be set more than once', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
|
||||
it('is randomly generated if not set', function () {
|
||||
assert.notEqual(createTask().id, createTask().id)
|
||||
})
|
||||
|
||||
it('cannot be set after being observed', function () {
|
||||
const task = createTask()
|
||||
noop(task.id)
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#status', function () {
|
||||
it('starts as pending', function () {
|
||||
assert.equal(createTask().status, 'pending')
|
||||
})
|
||||
|
||||
it('changes to success when finish without error', async function () {
|
||||
const task = createTask()
|
||||
await task.run(noop)
|
||||
assert.equal(task.status, 'success')
|
||||
})
|
||||
|
||||
it('changes to failure when finish with error', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
throw Error()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted after run is complete', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort()
|
||||
assert.equal(task.status, 'pending')
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted when not running', function () {
|
||||
const task = createTask()
|
||||
task.abort()
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
})
|
||||
|
||||
function makeRunTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('finishes the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'success')
|
||||
assertEvent(task, {
|
||||
status: 'success',
|
||||
result: 'foo',
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.run', function () {
|
||||
makeRunTests((task, fn) => task.run(fn))
|
||||
})
|
||||
describe('.wrap', function () {
|
||||
makeRunTests((task, fn) => task.wrap(fn)())
|
||||
})
|
||||
|
||||
function makeRunInsideTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('does not finish the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'pending')
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.runInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.runInside(fn))
|
||||
})
|
||||
describe('.wrapInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.wrapInside(fn)())
|
||||
})
|
||||
})
|
||||
31
@vates/task/package.json
Normal file
31
@vates/task/package.json
Normal file
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/task",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/task",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/task",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"exports": {
|
||||
".": "./index.js",
|
||||
"./combineEvents": "./combineEvents.js"
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/toggle-scripts):
|
||||
|
||||
```
|
||||
> npm install --save @vates/toggle-scripts
|
||||
```sh
|
||||
npm install --save @vates/toggle-scripts
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -30,6 +30,7 @@ if (args.length === 0) {
|
||||
|
||||
${name} v${version}
|
||||
`)
|
||||
// eslint-disable-next-line n/no-process-exit
|
||||
process.exit()
|
||||
}
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/async-map):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/async-map
|
||||
```sh
|
||||
npm install --save @xen-orchestra/async-map
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -35,7 +35,7 @@
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"sinon": "^14.0.1",
|
||||
"sinon": "^16.0.0",
|
||||
"test": "^3.2.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/audit-core):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/audit-core
|
||||
```sh
|
||||
npm install --save @xen-orchestra/audit-core
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.2.1",
|
||||
"version": "0.2.3",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
@@ -17,7 +17,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@xen-orchestra/log": "^0.4.0",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"object-hash": "^2.0.1"
|
||||
},
|
||||
|
||||
@@ -5,7 +5,6 @@ const PRESETS_RE = /^@babel\/preset-.+$/
|
||||
|
||||
const NODE_ENV = process.env.NODE_ENV || 'development'
|
||||
const __PROD__ = NODE_ENV === 'production'
|
||||
const __TEST__ = NODE_ENV === 'test'
|
||||
|
||||
const configs = {
|
||||
'@babel/plugin-proposal-decorators': {
|
||||
@@ -15,7 +14,7 @@ const configs = {
|
||||
proposal: 'minimal',
|
||||
},
|
||||
'@babel/preset-env': {
|
||||
debug: !__TEST__,
|
||||
debug: __PROD__,
|
||||
|
||||
// disabled until https://github.com/babel/babel/issues/8323 is resolved
|
||||
// loose: true,
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups-cli):
|
||||
|
||||
```
|
||||
> npm install --global @xen-orchestra/backups-cli
|
||||
```sh
|
||||
npm install --global @xen-orchestra/backups-cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
|
||||
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.mjs'
|
||||
import { getSyncedHandler } from '@xen-orchestra/fs'
|
||||
import getopts from 'getopts'
|
||||
import { basename, dirname } from 'path'
|
||||
@@ -22,7 +22,6 @@ export default async function cleanVms(args) {
|
||||
|
||||
await asyncMap(_, vmDir =>
|
||||
Disposable.use(getSyncedHandler({ url: pathToFileURL(dirname(vmDir)).href }), async handler => {
|
||||
console.log(handler, basename(vmDir))
|
||||
try {
|
||||
await new RemoteAdapter(handler).cleanVm(basename(vmDir), {
|
||||
fixMetadata: fix,
|
||||
|
||||
@@ -7,12 +7,12 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.29.0",
|
||||
"@xen-orchestra/fs": "^3.2.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"@xen-orchestra/backups": "^0.43.0",
|
||||
"@xen-orchestra/fs": "^4.1.0",
|
||||
"filenamify": "^6.0.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
"promise-toolbox":"^0.21.0"
|
||||
"promise-toolbox": "^0.21.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.7.8",
|
||||
"version": "1.0.13",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,292 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
const DEFAULT_SETTINGS = {
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
const DEFAULT_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxMergedDeltasPerRun: 2,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getAdapter = Disposable.factory(function* (remoteId) {
|
||||
return {
|
||||
adapter: yield getAdapter(remoteId),
|
||||
remoteId,
|
||||
}
|
||||
})
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
|
||||
const { type } = job
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
if (type === 'backup') {
|
||||
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
this.run = this._runVmBackup
|
||||
} else if (type === 'metadataBackup') {
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
this.run = this._runMetadataBackup
|
||||
} else {
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
remoteIds.map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _runVmBackup() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
|
||||
const config = this._config
|
||||
const settings = this._settings
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
17
@xen-orchestra/backups/Backup.mjs
Normal file
17
@xen-orchestra/backups/Backup.mjs
Normal file
@@ -0,0 +1,17 @@
|
||||
import { Metadata } from './_runners/Metadata.mjs'
|
||||
import { VmsRemote } from './_runners/VmsRemote.mjs'
|
||||
import { VmsXapi } from './_runners/VmsXapi.mjs'
|
||||
|
||||
export function createRunner(opts) {
|
||||
const { type } = opts.job
|
||||
switch (type) {
|
||||
case 'backup':
|
||||
return new VmsXapi(opts)
|
||||
case 'mirrorBackup':
|
||||
return new VmsRemote(opts)
|
||||
case 'metadataBackup':
|
||||
return new Metadata(opts)
|
||||
default:
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
'use strict'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
exports.DurablePartition = class DurablePartition {
|
||||
export class DurablePartition {
|
||||
// private resource API is used exceptionally to be able to separate resource creation and release
|
||||
#partitionDisposers = {}
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
#xapi
|
||||
#restoredVm
|
||||
|
||||
constructor({ restoredVm, xapi }) {
|
||||
this.#restoredVm = restoredVm
|
||||
this.#xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
return Task.run(
|
||||
{
|
||||
name: 'vmstart',
|
||||
},
|
||||
async () => {
|
||||
let restoredVm = this.#restoredVm
|
||||
const xapi = this.#xapi
|
||||
const restoredId = restoredVm.uuid
|
||||
|
||||
// remove vifs
|
||||
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
|
||||
|
||||
const start = new Date()
|
||||
// start Vm
|
||||
|
||||
await xapi.callAsync(
|
||||
'VM.start',
|
||||
restoredVm.$ref,
|
||||
false, // Start paused?
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
const started = new Date()
|
||||
const timeout = 10 * 60 * 1000
|
||||
const startDuration = started - start
|
||||
|
||||
let remainingTimeout = timeout - startDuration
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
// wait for the 'Running' event to be really stored in local xapi object cache
|
||||
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const running = new Date()
|
||||
remainingTimeout -= running - started
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
// wait for the guest tool version to be defined
|
||||
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
109
@xen-orchestra/backups/HealthCheckVmBackup.mjs
Normal file
109
@xen-orchestra/backups/HealthCheckVmBackup.mjs
Normal file
@@ -0,0 +1,109 @@
|
||||
import { Task } from './Task.mjs'
|
||||
|
||||
export class HealthCheckVmBackup {
|
||||
#restoredVm
|
||||
#timeout
|
||||
#xapi
|
||||
|
||||
constructor({ restoredVm, timeout = 10 * 60 * 1000, xapi }) {
|
||||
this.#restoredVm = restoredVm
|
||||
this.#xapi = xapi
|
||||
this.#timeout = timeout
|
||||
}
|
||||
|
||||
async run() {
|
||||
return Task.run(
|
||||
{
|
||||
name: 'vmstart',
|
||||
},
|
||||
async () => {
|
||||
let restoredVm = this.#restoredVm
|
||||
const xapi = this.#xapi
|
||||
const restoredId = restoredVm.uuid
|
||||
|
||||
// remove vifs
|
||||
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
|
||||
const waitForScript = restoredVm.tags.includes('xo-backup-health-check-xenstore')
|
||||
if (waitForScript) {
|
||||
await restoredVm.set_xenstore_data({
|
||||
'vm-data/xo-backup-health-check': 'planned',
|
||||
})
|
||||
}
|
||||
const start = new Date()
|
||||
// start Vm
|
||||
|
||||
await xapi.callAsync(
|
||||
'VM.start',
|
||||
restoredVm.$ref,
|
||||
false, // Start paused?
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
const started = new Date()
|
||||
const timeout = this.#timeout
|
||||
const startDuration = started - start
|
||||
|
||||
let remainingTimeout = timeout - startDuration
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
// wait for the 'Running' event to be really stored in local xapi object cache
|
||||
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const running = new Date()
|
||||
remainingTimeout -= running - started
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get Running state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
// wait for the guest tool version to be defined
|
||||
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const guestToolsReady = new Date()
|
||||
remainingTimeout -= guestToolsReady - running
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get he guest tools check ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
if (waitForScript) {
|
||||
const startedRestoredVm = await xapi.waitObjectState(
|
||||
restoredVm.$ref,
|
||||
vm =>
|
||||
vm?.xenstore_data !== undefined &&
|
||||
(vm.xenstore_data['vm-data/xo-backup-health-check'] === 'success' ||
|
||||
vm.xenstore_data['vm-data/xo-backup-health-check'] === 'failure'),
|
||||
{
|
||||
timeout: remainingTimeout,
|
||||
}
|
||||
)
|
||||
const scriptOk = new Date()
|
||||
remainingTimeout -= scriptOk - guestToolsReady
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(
|
||||
`Backup health check script did not update vm-data/xo-backup-health-check of ${restoredId} after ${
|
||||
timeout / 1000
|
||||
} second, got ${
|
||||
startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check']
|
||||
} instead of 'success' or 'failure'`
|
||||
)
|
||||
}
|
||||
|
||||
if (startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check'] !== 'success') {
|
||||
const message = startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check-error']
|
||||
if (message) {
|
||||
throw new Error(`Backup health check script failed with message ${message} for VM ${restoredId} `)
|
||||
} else {
|
||||
throw new Error(`Backup health check script failed for VM ${restoredId} `)
|
||||
}
|
||||
}
|
||||
Task.info('Backup health check script successfully executed')
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,14 @@
|
||||
'use strict'
|
||||
import assert from 'node:assert'
|
||||
|
||||
const assert = require('assert')
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { importIncrementalVm } from './_incrementalVm.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importDeltaVm } = require('./_deltaVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
export class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importDeltaVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._importIncrementalVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
@@ -31,11 +29,11 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(this._importDeltaVmSettings.mapVdisSrs)
|
||||
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
backup = await adapter.readDeltaVmBackup(metadata, ignoredVdis)
|
||||
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
@@ -49,8 +47,8 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importDeltaVmSettings,
|
||||
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importIncrementalVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backups):
|
||||
|
||||
```
|
||||
> npm install --save @xen-orchestra/backups
|
||||
```sh
|
||||
npm install --save @xen-orchestra/backups
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
@@ -1,42 +1,39 @@
|
||||
'use strict'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap, asyncMapSettled } from '@xen-orchestra/async-map'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } from 'vhd-lib'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { execFile } from 'child_process'
|
||||
import { mount } from '@vates/fuse-vhd'
|
||||
import { readdir, lstat } from 'node:fs/promises'
|
||||
import { synchronized } from 'decorator-synchronized'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { ZipFile } from 'yazl'
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import fromCallback from 'promise-toolbox/fromCallback'
|
||||
import fromEvent from 'promise-toolbox/fromEvent'
|
||||
import groupBy from 'lodash/groupBy.js'
|
||||
import pDefer from 'promise-toolbox/defer'
|
||||
import pickBy from 'lodash/pickBy.js'
|
||||
import tar from 'tar'
|
||||
import zlib from 'zlib'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const pickBy = require('lodash/pickBy.js')
|
||||
const { dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
import { BACKUP_DIR } from './_getVmBackupDir.mjs'
|
||||
import { cleanVm } from './_cleanVm.mjs'
|
||||
import { formatFilenameDate } from './_filenameDate.mjs'
|
||||
import { getTmpDir } from './_getTmpDir.mjs'
|
||||
import { isMetadataFile } from './_backupType.mjs'
|
||||
import { isValidXva } from './_isValidXva.mjs'
|
||||
import { listPartitions, LVM_PARTITION_TYPE } from './_listPartitions.mjs'
|
||||
import { lvs, pvs } from './_lvm.mjs'
|
||||
import { watchStreamSize } from './_watchStreamSize.mjs'
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
|
||||
const { lvs, pvs } = require('./_lvm.js')
|
||||
// @todo : this import is marked extraneous , sould be fixed when lib is published
|
||||
const { mount } = require('@vates/fuse-vhd')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
export const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
|
||||
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
|
||||
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
export const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
@@ -45,20 +42,23 @@ const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
const noop = Function.prototype
|
||||
|
||||
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
const makeRelative = path => resolve('/', path).slice(1)
|
||||
const resolveSubpath = (root, path) => resolve(root, makeRelative(path))
|
||||
|
||||
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
async function addZipEntries(zip, realBasePath, virtualBasePath, relativePaths) {
|
||||
for (const relativePath of relativePaths) {
|
||||
const realPath = join(realBasePath, relativePath)
|
||||
const virtualPath = join(virtualBasePath, relativePath)
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
})
|
||||
const stats = await lstat(realPath)
|
||||
const { mode, mtime } = stats
|
||||
const opts = { mode, mtime }
|
||||
if (stats.isDirectory()) {
|
||||
zip.addEmptyDirectory(virtualPath, opts)
|
||||
await addZipEntries(zip, realPath, virtualPath, await readdir(realPath))
|
||||
} else if (stats.isFile()) {
|
||||
zip.addFile(realPath, virtualPath, opts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ const debounceResourceFactory = factory =>
|
||||
return this._debounceResource(factory.apply(this, arguments))
|
||||
}
|
||||
|
||||
class RemoteAdapter {
|
||||
export class RemoteAdapter {
|
||||
constructor(
|
||||
handler,
|
||||
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
|
||||
@@ -186,17 +186,6 @@ class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
async *_usePartitionFiles(diskId, partitionId, paths) {
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
|
||||
const files = []
|
||||
await asyncMap(paths, file =>
|
||||
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
|
||||
)
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// check if we will be allowed to merge a a vhd created in this adapter
|
||||
// with the vhd at path `path`
|
||||
async isMergeableParent(packedParentUid, path) {
|
||||
@@ -208,20 +197,29 @@ class RemoteAdapter {
|
||||
|
||||
const isVhdDirectory = vhd instanceof VhdDirectory
|
||||
return isVhdDirectory
|
||||
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.#useVhdDirectory()
|
||||
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.useVhdDirectory()
|
||||
})
|
||||
}
|
||||
|
||||
fetchPartitionFiles(diskId, partitionId, paths) {
|
||||
fetchPartitionFiles(diskId, partitionId, paths, format) {
|
||||
const { promise, reject, resolve } = pDefer()
|
||||
Disposable.use(
|
||||
async function* () {
|
||||
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
|
||||
const zip = new ZipFile()
|
||||
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
|
||||
zip.end()
|
||||
const { outputStream } = zip
|
||||
const path = yield this.getPartition(diskId, partitionId)
|
||||
let outputStream
|
||||
|
||||
if (format === 'tgz') {
|
||||
outputStream = tar.c({ cwd: path, gzip: true }, paths.map(makeRelative))
|
||||
} else if (format === 'zip') {
|
||||
const zip = new ZipFile()
|
||||
await addZipEntries(zip, path, '', paths.map(makeRelative))
|
||||
zip.end()
|
||||
;({ outputStream } = zip)
|
||||
} else {
|
||||
throw new Error('unsupported format ' + format)
|
||||
}
|
||||
|
||||
resolve(outputStream)
|
||||
await fromEvent(outputStream, 'end')
|
||||
}.bind(this)
|
||||
@@ -232,21 +230,23 @@ class RemoteAdapter {
|
||||
return promise
|
||||
}
|
||||
|
||||
#removeVmBackupsFromCache(backups) {
|
||||
for (const [dir, filenames] of Object.entries(
|
||||
groupBy(
|
||||
backups.map(_ => _._filename),
|
||||
dirname
|
||||
)
|
||||
)) {
|
||||
// detached async action, will not reject
|
||||
this._updateCache(dir + '/cache.json.gz', backups => {
|
||||
for (const filename of filenames) {
|
||||
debug('removing cache entry', { entry: filename })
|
||||
delete backups[filename]
|
||||
}
|
||||
})
|
||||
}
|
||||
async #removeVmBackupsFromCache(backups) {
|
||||
await asyncEach(
|
||||
Object.entries(
|
||||
groupBy(
|
||||
backups.map(_ => _._filename),
|
||||
dirname
|
||||
)
|
||||
),
|
||||
([dir, filenames]) =>
|
||||
// will not reject
|
||||
this._updateCache(dir + '/cache.json.gz', backups => {
|
||||
for (const filename of filenames) {
|
||||
debug('removing cache entry', { entry: filename })
|
||||
delete backups[filename]
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
@@ -255,7 +255,7 @@ class RemoteAdapter {
|
||||
// this will delete the json, unused VHDs will be detected by `cleanVm`
|
||||
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
|
||||
|
||||
this.#removeVmBackupsFromCache(backups)
|
||||
await this.#removeVmBackupsFromCache(backups)
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(backupId) {
|
||||
@@ -284,7 +284,7 @@ class RemoteAdapter {
|
||||
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
|
||||
)
|
||||
|
||||
this.#removeVmBackupsFromCache(backups)
|
||||
await this.#removeVmBackupsFromCache(backups)
|
||||
}
|
||||
|
||||
deleteVmBackup(file) {
|
||||
@@ -318,19 +318,19 @@ class RemoteAdapter {
|
||||
return this._vhdDirectoryCompression
|
||||
}
|
||||
|
||||
#useVhdDirectory() {
|
||||
useVhdDirectory() {
|
||||
return this.handler.useVhdDirectory()
|
||||
}
|
||||
|
||||
#useAlias() {
|
||||
return this.#useVhdDirectory()
|
||||
return this.useVhdDirectory()
|
||||
}
|
||||
|
||||
async *#getDiskLegacy(diskId) {
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const diskPath = handler.getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
@@ -401,20 +401,27 @@ class RemoteAdapter {
|
||||
return `${baseName}.vhd`
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
async listAllVms() {
|
||||
const handler = this._handler
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
|
||||
const vmsUuids = []
|
||||
await asyncEach(await handler.list(BACKUP_DIR), async entry => {
|
||||
// ignore hidden and lock files
|
||||
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
|
||||
const vmBackups = await this.listVmBackups(entry)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[entry] = vmBackups
|
||||
}
|
||||
vmsUuids.push(entry)
|
||||
}
|
||||
})
|
||||
return vmsUuids
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const vmsUuids = await this.listAllVms()
|
||||
const backups = { __proto__: null }
|
||||
await asyncEach(vmsUuids, async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[vmUuid] = vmBackups
|
||||
}
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
@@ -508,7 +515,7 @@ class RemoteAdapter {
|
||||
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
|
||||
}
|
||||
|
||||
async #readCache(path) {
|
||||
async _readCache(path) {
|
||||
try {
|
||||
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
|
||||
} catch (error) {
|
||||
@@ -521,15 +528,15 @@ class RemoteAdapter {
|
||||
_updateCache = synchronized.withKey()(this._updateCache)
|
||||
// eslint-disable-next-line no-dupe-class-members
|
||||
async _updateCache(path, fn) {
|
||||
const cache = await this.#readCache(path)
|
||||
const cache = await this._readCache(path)
|
||||
if (cache !== undefined) {
|
||||
fn(cache)
|
||||
|
||||
await this.#writeCache(path, cache)
|
||||
await this._writeCache(path, cache)
|
||||
}
|
||||
}
|
||||
|
||||
async #writeCache(path, data) {
|
||||
async _writeCache(path, data) {
|
||||
try {
|
||||
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
|
||||
} catch (error) {
|
||||
@@ -537,10 +544,6 @@ class RemoteAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
async invalidateVmBackupListCache(vmUuid) {
|
||||
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
|
||||
}
|
||||
|
||||
async #getCachabledDataListVmBackups(dir) {
|
||||
debug('generating cache', { path: dir })
|
||||
|
||||
@@ -581,7 +584,7 @@ class RemoteAdapter {
|
||||
async _readCacheListVmBackups(vmUuid) {
|
||||
const path = this.#getVmBackupsCache(vmUuid)
|
||||
|
||||
const cache = await this.#readCache(path)
|
||||
const cache = await this._readCache(path)
|
||||
if (cache !== undefined) {
|
||||
debug('found VM backups cache, using it', { path })
|
||||
return cache
|
||||
@@ -594,7 +597,7 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
// detached async action, will not reject
|
||||
this.#writeCache(path, backups)
|
||||
this._writeCache(path, backups)
|
||||
|
||||
return backups
|
||||
}
|
||||
@@ -645,7 +648,7 @@ class RemoteAdapter {
|
||||
})
|
||||
|
||||
// will not throw
|
||||
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
|
||||
await this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
|
||||
debug('adding cache entry', { entry: path })
|
||||
backups[path] = {
|
||||
...metadata,
|
||||
@@ -659,26 +662,27 @@ class RemoteAdapter {
|
||||
return path
|
||||
}
|
||||
|
||||
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
|
||||
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
|
||||
const handler = this._handler
|
||||
if (this.#useVhdDirectory()) {
|
||||
if (this.useVhdDirectory()) {
|
||||
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
|
||||
await createVhdDirectoryFromStream(handler, dataPath, input, {
|
||||
const size = await createVhdDirectoryFromStream(handler, dataPath, input, {
|
||||
concurrency: writeBlockConcurrency,
|
||||
compression: this.#getCompressionType(),
|
||||
async validator() {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
nbdClient,
|
||||
})
|
||||
await VhdAbstract.createAlias(handler, path, dataPath)
|
||||
return size
|
||||
} else {
|
||||
await this.outputStream(path, input, { checksum, validator })
|
||||
return this.outputStream(path, input, { checksum, validator })
|
||||
}
|
||||
}
|
||||
|
||||
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
|
||||
const container = watchStreamSize(input)
|
||||
await this._handler.outputStream(path, input, {
|
||||
checksum,
|
||||
dirMode: this._dirMode,
|
||||
@@ -687,11 +691,12 @@ class RemoteAdapter {
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
})
|
||||
return container.size
|
||||
}
|
||||
|
||||
// open the hierarchy of ancestors until we find a full one
|
||||
async _createSyntheticStream(handler, path) {
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
|
||||
async _createVhdStream(handler, path, { useChain }) {
|
||||
const disposableSynthetic = useChain ? await VhdSynthetic.fromVhdChain(handler, path) : await openVhd(handler, path)
|
||||
// I don't want the vhds to be disposed on return
|
||||
// but only when the stream is done ( or failed )
|
||||
|
||||
@@ -716,15 +721,15 @@ class RemoteAdapter {
|
||||
return stream
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata, ignoredVdis) {
|
||||
async readIncrementalVmBackup(metadata, ignoredVdis, { useChain = true } = {}) {
|
||||
const handler = this._handler
|
||||
const { vbds, vhds, vifs, vm } = metadata
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async ref => {
|
||||
streams[`${ref}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[ref]))
|
||||
streams[`${ref}.vhd`] = await this._createVhdStream(handler, join(dir, vhds[ref]), { useChain })
|
||||
})
|
||||
|
||||
return {
|
||||
@@ -733,7 +738,7 @@ class RemoteAdapter {
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm,
|
||||
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -745,7 +750,49 @@ class RemoteAdapter {
|
||||
// _filename is a private field used to compute the backup id
|
||||
//
|
||||
// it's enumerable to make it cacheable
|
||||
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
|
||||
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
||||
if (typeof metadata.vm.is_a_template === 'number') {
|
||||
const properties = {
|
||||
vbds: ['bootable', 'unpluggable', 'storage_lock', 'empty', 'currently_attached'],
|
||||
vdis: [
|
||||
'sharable',
|
||||
'read_only',
|
||||
'storage_lock',
|
||||
'managed',
|
||||
'missing',
|
||||
'is_a_snapshot',
|
||||
'allow_caching',
|
||||
'metadata_latest',
|
||||
],
|
||||
vifs: ['currently_attached', 'MAC_autogenerated'],
|
||||
vm: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_a_snapshot', 'is_snapshot_from_vmpp'],
|
||||
vmSnapshot: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_snapshot_from_vmpp'],
|
||||
}
|
||||
|
||||
function fixBooleans(obj, properties) {
|
||||
properties.forEach(property => {
|
||||
if (typeof obj[property] === 'number') {
|
||||
obj[property] = obj[property] === 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for (const [key, propertiesInKey] of Object.entries(properties)) {
|
||||
const value = metadata[key]
|
||||
if (value !== undefined) {
|
||||
// some properties of the metadata are collections indexed by the opaqueRef
|
||||
const isCollection = Object.keys(value).some(subKey => subKey.startsWith('OpaqueRef:'))
|
||||
if (isCollection) {
|
||||
Object.values(value).forEach(subValue => fixBooleans(subValue, propertiesInKey))
|
||||
} else {
|
||||
fixBooleans(value, propertiesInKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
}
|
||||
|
||||
@@ -779,11 +826,7 @@ decorateMethodsWith(RemoteAdapter, {
|
||||
debounceResourceFactory,
|
||||
]),
|
||||
|
||||
_usePartitionFiles: Disposable.factory,
|
||||
|
||||
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
|
||||
|
||||
getPartition: Disposable.factory,
|
||||
})
|
||||
|
||||
exports.RemoteAdapter = RemoteAdapter
|
||||
@@ -1,26 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
return String(await handler.readFile(`${backupId}/data.json`))
|
||||
}
|
||||
}
|
||||
}
|
||||
32
@xen-orchestra/backups/RestoreMetadataBackup.mjs
Normal file
32
@xen-orchestra/backups/RestoreMetadataBackup.mjs
Normal file
@@ -0,0 +1,32 @@
|
||||
import { join, resolve } from 'node:path/posix'
|
||||
|
||||
import { DIR_XO_POOL_METADATA_BACKUPS } from './RemoteAdapter.mjs'
|
||||
import { PATH_DB_DUMP } from './_runners/_PoolMetadataBackup.mjs'
|
||||
|
||||
export class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
this._backupId = backupId
|
||||
this._handler = handler
|
||||
this._xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
const backupId = this._backupId
|
||||
const handler = this._handler
|
||||
const xapi = this._xapi
|
||||
|
||||
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
|
||||
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
|
||||
task: xapi.task_create('Import pool metadata'),
|
||||
})
|
||||
} else {
|
||||
const metadata = JSON.parse(await handler.readFile(join(backupId, 'metadata.json')))
|
||||
const dataFileName = resolve(backupId, metadata.data ?? 'data.json')
|
||||
const data = await handler.readFile(dataFileName)
|
||||
|
||||
// if data is JSON, sent it as a plain string, otherwise, consider the data as binary and encode it
|
||||
const isJson = dataFileName.endsWith('.json')
|
||||
return isJson ? data.toString() : { encoding: 'base64', data: data.toString('base64') }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const Zone = require('node-zone')
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
import Zone from 'node-zone'
|
||||
|
||||
const logAfterEnd = log => {
|
||||
const error = new Error('task has already ended')
|
||||
@@ -30,7 +28,7 @@ const serializeError = error =>
|
||||
|
||||
const $$task = Symbol('@xen-orchestra/backups/Task')
|
||||
|
||||
class Task {
|
||||
export class Task {
|
||||
static get cancelToken() {
|
||||
const task = Zone.current.data[$$task]
|
||||
return task !== undefined ? task.#cancelToken : CancelToken.none
|
||||
@@ -100,7 +98,7 @@ class Task {
|
||||
* In case of error, the task will be failed.
|
||||
*
|
||||
* @typedef Result
|
||||
* @param {() => Result)} fn
|
||||
* @param {() => Result} fn
|
||||
* @param {boolean} last - Whether the task should succeed if there is no error
|
||||
* @returns Result
|
||||
*/
|
||||
@@ -151,7 +149,6 @@ class Task {
|
||||
})
|
||||
}
|
||||
}
|
||||
exports.Task = Task
|
||||
|
||||
for (const method of ['info', 'warning']) {
|
||||
Task[method] = (...args) => Zone.current.data[$$task]?.[method](...args)
|
||||
@@ -1,496 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
const { exportDeltaVm } = require('./_deltaVm.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { FullBackupWriter } = require('./writers/FullBackupWriter.js')
|
||||
const { FullReplicationWriter } = require('./writers/FullReplicationWriter.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
class VmBackup {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
vm,
|
||||
}) {
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
// don't match replicated VMs created by this very job otherwise they
|
||||
// will be replicated again and again
|
||||
throw new Error('cannot backup a VM created by this very job')
|
||||
}
|
||||
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, DeltaReplicationWriter]
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot =
|
||||
settings.unconditionalSnapshot ||
|
||||
this._isDelta ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
ignoreNobakVdis: true,
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
unplugVusbs: true,
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this.vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const [baseUuid, snapshotOf] = await Promise.all([
|
||||
xapi.getField('VDI', baseRef, 'uuid'),
|
||||
xapi.getField('VDI', baseRef, 'snapshot_of'),
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
async _healthCheck() {
|
||||
const settings = this._settings
|
||||
|
||||
if (this._healthCheckSr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const { tags } = this.vm
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
return
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
await this._selectBaseVm()
|
||||
}
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.VmBackup = VmBackup
|
||||
|
||||
decorateMethodsWith(VmBackup, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -1,6 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
4
@xen-orchestra/backups/_backupType.mjs
Normal file
4
@xen-orchestra/backups/_backupType.mjs
Normal file
@@ -0,0 +1,4 @@
|
||||
export const isMetadataFile = filename => filename.endsWith('.json')
|
||||
export const isVhdFile = filename => filename.endsWith('.vhd')
|
||||
export const isXvaFile = filename => filename.endsWith('.xva')
|
||||
export const isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
@@ -1,24 +1,26 @@
|
||||
'use strict'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { catchGlobalErrors } from '@xen-orchestra/log/configure'
|
||||
|
||||
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
|
||||
require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
)
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { compose } from '@vates/compose'
|
||||
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
|
||||
import { createRunner } from './Backup.mjs'
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { deduped } from '@vates/disposable/deduped.js'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { parseDuration } from '@vates/parse-duration'
|
||||
import { Xapi } from '@xen-orchestra/xapi'
|
||||
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { Task } from './Task.mjs'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
createCachedLookup().patchGlobal()
|
||||
|
||||
const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const logger = createLogger('xo:backups:worker')
|
||||
catchGlobalErrors(logger)
|
||||
const { debug } = logger
|
||||
|
||||
class BackupWorker {
|
||||
#config
|
||||
@@ -46,7 +48,7 @@ class BackupWorker {
|
||||
}
|
||||
|
||||
run() {
|
||||
return new Backup({
|
||||
return createRunner({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
@@ -122,6 +124,11 @@ decorateMethodsWith(BackupWorker, {
|
||||
]),
|
||||
})
|
||||
|
||||
const emitMessage = message => {
|
||||
debug('message emitted', { message })
|
||||
process.send(message)
|
||||
}
|
||||
|
||||
// Received message:
|
||||
//
|
||||
// Message {
|
||||
@@ -139,6 +146,8 @@ decorateMethodsWith(BackupWorker, {
|
||||
// result?: any
|
||||
// }
|
||||
process.on('message', async message => {
|
||||
debug('message received', { message })
|
||||
|
||||
if (message.action === 'run') {
|
||||
const backupWorker = new BackupWorker(message.data)
|
||||
try {
|
||||
@@ -147,7 +156,7 @@ process.on('message', async message => {
|
||||
{
|
||||
name: 'backup run',
|
||||
onLog: data =>
|
||||
process.send({
|
||||
emitMessage({
|
||||
data,
|
||||
type: 'log',
|
||||
}),
|
||||
@@ -156,13 +165,13 @@ process.on('message', async message => {
|
||||
)
|
||||
: await backupWorker.run()
|
||||
|
||||
process.send({
|
||||
emitMessage({
|
||||
type: 'result',
|
||||
result,
|
||||
status: 'success',
|
||||
})
|
||||
} catch (error) {
|
||||
process.send({
|
||||
emitMessage({
|
||||
type: 'result',
|
||||
result: error,
|
||||
status: 'failure',
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
const cancelable = require('promise-toolbox/cancelable')
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
import cancelable from 'promise-toolbox/cancelable'
|
||||
import CancelToken from 'promise-toolbox/CancelToken'
|
||||
|
||||
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
|
||||
//
|
||||
// If any of the executions fails, the cancel token will be triggered and the
|
||||
// first reason will be rejected.
|
||||
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
export const cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
|
||||
const { cancel, token } = CancelToken.source([$cancelToken])
|
||||
try {
|
||||
return await Promise.all(
|
||||
@@ -1,19 +1,19 @@
|
||||
'use strict'
|
||||
import test from 'test'
|
||||
import { strict as assert } from 'node:assert'
|
||||
|
||||
const { beforeEach, afterEach, test, describe } = require('test')
|
||||
const assert = require('assert').strict
|
||||
import tmp from 'tmp'
|
||||
import fs from 'fs-extra'
|
||||
import * as uuid from 'uuid'
|
||||
import { getHandler } from '@xen-orchestra/fs'
|
||||
import { pFromCallback } from 'promise-toolbox'
|
||||
import { RemoteAdapter } from './RemoteAdapter.mjs'
|
||||
import { VHDFOOTER, VHDHEADER } from './tests.fixtures.mjs'
|
||||
import { VhdFile, Constants, VhdDirectory, VhdAbstract } from 'vhd-lib'
|
||||
import { checkAliases } from './_cleanVm.mjs'
|
||||
import { dirname, basename } from 'node:path'
|
||||
import { rimraf } from 'rimraf'
|
||||
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const uuid = require('uuid')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
const { checkAliases } = require('./_cleanVm')
|
||||
const { dirname, basename } = require('path')
|
||||
const { beforeEach, afterEach, describe } = test
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||
@@ -31,7 +31,7 @@ beforeEach(async () => {
|
||||
})
|
||||
|
||||
afterEach(async () => {
|
||||
await pFromCallback(cb => rimraf(tempDir, cb))
|
||||
await rimraf(tempDir)
|
||||
await handler.forget()
|
||||
})
|
||||
|
||||
@@ -221,7 +221,7 @@ test('it merges delta of non destroyed chain', async () => {
|
||||
loggued.push(message)
|
||||
}
|
||||
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
assert.equal(loggued[0], `incorrect backup size in metadata`)
|
||||
assert.equal(loggued[0], `unexpected number of entries in backup cache`)
|
||||
|
||||
loggued = []
|
||||
await adapter.cleanVm(rootPath, { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
|
||||
@@ -378,7 +378,19 @@ describe('tests multiple combination ', () => {
|
||||
],
|
||||
})
|
||||
)
|
||||
|
||||
if (!useAlias && vhdMode === 'directory') {
|
||||
try {
|
||||
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
} catch (err) {
|
||||
assert.strictEqual(
|
||||
err.code,
|
||||
'NOT_SUPPORTED',
|
||||
'Merging directory without alias should raise a not supported error'
|
||||
)
|
||||
return
|
||||
}
|
||||
assert.strictEqual(true, false, 'Merging directory without alias should raise an error')
|
||||
}
|
||||
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
|
||||
@@ -1,19 +1,18 @@
|
||||
'use strict'
|
||||
import * as UUID from 'uuid'
|
||||
import sum from 'lodash/sum.js'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { Constants, openVhd, VhdAbstract, VhdFile } from 'vhd-lib'
|
||||
import { isVhdAlias, resolveVhdAlias } from 'vhd-lib/aliases.js'
|
||||
import { dirname, resolve } from 'node:path'
|
||||
import { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } from './_backupType.mjs'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { mergeVhdChain } from 'vhd-lib/merge.js'
|
||||
|
||||
import { Task } from './Task.mjs'
|
||||
import { Disposable } from 'promise-toolbox'
|
||||
import handlerPath from '@xen-orchestra/fs/path'
|
||||
|
||||
const sum = require('lodash/sum')
|
||||
const UUID = require('uuid')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const handlerPath = require('@xen-orchestra/fs/path')
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
@@ -117,7 +116,7 @@ const listVhds = async (handler, vmDir, logWarn) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
async function checkAliases(
|
||||
export async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
@@ -176,11 +175,9 @@ async function checkAliases(
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
exports.cleanVm = async function cleanVm(
|
||||
export async function cleanVm(
|
||||
vmDir,
|
||||
{
|
||||
fixMetadata,
|
||||
@@ -311,7 +308,6 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
|
||||
const jsons = new Set()
|
||||
let mustInvalidateCache = false
|
||||
const xvas = new Set()
|
||||
const xvaSums = []
|
||||
const entries = await handler.list(vmDir, {
|
||||
@@ -327,6 +323,20 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
})
|
||||
|
||||
const cachePath = vmDir + '/cache.json.gz'
|
||||
|
||||
let mustRegenerateCache
|
||||
{
|
||||
const cache = await this._readCache(cachePath)
|
||||
const actual = cache === undefined ? 0 : Object.keys(cache).length
|
||||
const expected = jsons.size
|
||||
|
||||
mustRegenerateCache = actual !== expected
|
||||
if (mustRegenerateCache) {
|
||||
logWarn('unexpected number of entries in backup cache', { path: cachePath, actual, expected })
|
||||
}
|
||||
}
|
||||
|
||||
await asyncMap(xvas, async path => {
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
@@ -338,6 +348,8 @@ exports.cleanVm = async function cleanVm(
|
||||
const unusedVhds = new Set(vhds)
|
||||
const unusedXvas = new Set(xvas)
|
||||
|
||||
const backups = new Map()
|
||||
|
||||
// compile the list of unused XVAs and VHDs, and remove backup metadata which
|
||||
// reference a missing XVA/VHD
|
||||
await asyncMap(jsons, async json => {
|
||||
@@ -350,19 +362,16 @@ exports.cleanVm = async function cleanVm(
|
||||
return
|
||||
}
|
||||
|
||||
let isBackupComplete
|
||||
|
||||
const { mode } = metadata
|
||||
if (mode === 'full') {
|
||||
const linkedXva = resolve('/', vmDir, metadata.xva)
|
||||
if (xvas.has(linkedXva)) {
|
||||
isBackupComplete = xvas.has(linkedXva)
|
||||
if (isBackupComplete) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { path: json })
|
||||
jsons.delete(json)
|
||||
mustInvalidateCache = true
|
||||
await handler.unlink(json)
|
||||
}
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = (() => {
|
||||
@@ -371,22 +380,28 @@ exports.cleanVm = async function cleanVm(
|
||||
})()
|
||||
|
||||
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
|
||||
isBackupComplete = missingVhds.length === 0
|
||||
|
||||
// FIXME: find better approach by keeping as much of the backup as
|
||||
// possible (existing disks) even if one disk is missing
|
||||
if (missingVhds.length === 0) {
|
||||
if (isBackupComplete) {
|
||||
linkedVhds.forEach(_ => unusedVhds.delete(_))
|
||||
linkedVhds.forEach(path => {
|
||||
vhdsToJSons[path] = json
|
||||
})
|
||||
} else {
|
||||
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { path: json })
|
||||
mustInvalidateCache = true
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (isBackupComplete) {
|
||||
backups.set(json, metadata)
|
||||
} else {
|
||||
jsons.delete(json)
|
||||
if (remove) {
|
||||
logInfo('deleting incomplete backup', { backup: json })
|
||||
mustRegenerateCache = true
|
||||
await handler.unlink(json)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -496,7 +511,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// check for the other that the size is the same as the real file size
|
||||
|
||||
await asyncMap(jsons, async metadataPath => {
|
||||
const metadata = JSON.parse(await handler.readFile(metadataPath))
|
||||
const metadata = backups.get(metadataPath)
|
||||
|
||||
let fileSystemSize
|
||||
const merged = metadataWithMergedVhd[metadataPath] !== undefined
|
||||
@@ -523,7 +538,8 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
logWarn('incorrect backup size in metadata', {
|
||||
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
|
||||
console.warn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
@@ -538,6 +554,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// systematically update size after a merge
|
||||
if ((merged || fixMetadata) && size !== fileSystemSize) {
|
||||
metadata.size = fileSystemSize
|
||||
mustRegenerateCache = true
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
} catch (error) {
|
||||
@@ -546,9 +563,16 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
})
|
||||
|
||||
// purge cache if a metadata file has been deleted
|
||||
if (mustInvalidateCache) {
|
||||
await handler.unlink(vmDir + '/cache.json.gz')
|
||||
if (mustRegenerateCache) {
|
||||
const cache = {}
|
||||
for (const [path, content] of backups.entries()) {
|
||||
cache[path] = {
|
||||
_filename: path,
|
||||
id: path,
|
||||
...content,
|
||||
}
|
||||
}
|
||||
await this._writeCache(cachePath, cache)
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -1,8 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { utcFormat, utcParse } = require('d3-time-format')
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
exports.formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
exports.parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
6
@xen-orchestra/backups/_filenameDate.mjs
Normal file
@@ -0,0 +1,6 @@
|
||||
import { utcFormat, utcParse } from 'd3-time-format'
|
||||
|
||||
// Format a date in ISO 8601 in a safe way to be used in filenames
|
||||
// (even on Windows).
|
||||
export const formatFilenameDate = utcFormat('%Y%m%dT%H%M%SZ')
|
||||
export const parseFilenameDate = utcParse('%Y%m%dT%H%M%SZ')
|
||||
@@ -1,37 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const eos = require('end-of-stream')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
|
||||
const { forks = 0 } = stream
|
||||
stream.forks = forks + 1
|
||||
|
||||
debug('forking', { forks: stream.forks })
|
||||
|
||||
const proxy = new PassThrough()
|
||||
stream.pipe(proxy)
|
||||
eos(stream, error => {
|
||||
if (error !== undefined) {
|
||||
debug('error on original stream, destroying fork', { error })
|
||||
proxy.destroy(error)
|
||||
}
|
||||
})
|
||||
eos(proxy, error => {
|
||||
debug('end of stream, unpiping', { error, forks: --stream.forks })
|
||||
|
||||
stream.unpipe(proxy)
|
||||
|
||||
if (stream.forks === 0) {
|
||||
debug('no more forks, destroying original stream')
|
||||
stream.destroy(new Error('no more consumers for this stream'))
|
||||
}
|
||||
})
|
||||
return proxy
|
||||
}
|
||||
@@ -1,6 +1,4 @@
|
||||
'use strict'
|
||||
|
||||
// returns all entries but the last retention-th
|
||||
exports.getOldEntries = function getOldEntries(retention, entries) {
|
||||
export function getOldEntries(retention, entries) {
|
||||
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
'use strict'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { join } = require('path')
|
||||
const { mkdir, rmdir } = require('fs-extra')
|
||||
const { tmpdir } = require('os')
|
||||
import Disposable from 'promise-toolbox/Disposable'
|
||||
import { join } from 'node:path'
|
||||
import { mkdir, rmdir } from 'node:fs/promises'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
const MAX_ATTEMPTS = 3
|
||||
|
||||
exports.getTmpDir = async function getTmpDir() {
|
||||
export async function getTmpDir() {
|
||||
for (let i = 0; true; ++i) {
|
||||
const path = join(tmpdir(), Math.random().toString(36).slice(2))
|
||||
try {
|
||||
@@ -1,8 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const BACKUP_DIR = 'xo-vm-backups'
|
||||
exports.BACKUP_DIR = BACKUP_DIR
|
||||
|
||||
exports.getVmBackupDir = function getVmBackupDir(uuid) {
|
||||
return `${BACKUP_DIR}/${uuid}`
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user