Compare commits
395 Commits
better-xen
...
feat_thin_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec82acef29 | ||
|
|
27b5737f65 | ||
|
|
55b2e0292f | ||
|
|
464d83e70f | ||
|
|
614255a73a | ||
|
|
90d15e1346 | ||
|
|
b0e2ea64e9 | ||
|
|
1da05e239d | ||
|
|
fe7f0db81f | ||
|
|
983153e620 | ||
|
|
6fe791dcf2 | ||
|
|
1ad406c7dd | ||
|
|
4e032e11b1 | ||
|
|
ea34516d73 | ||
|
|
e1145f35ee | ||
|
|
6864775b8a | ||
|
|
f28721b847 | ||
|
|
2dc174fd9d | ||
|
|
07142d0410 | ||
|
|
41bb16ca30 | ||
|
|
d8f1034858 | ||
|
|
52b3c49cdb | ||
|
|
c5cb1a5e96 | ||
|
|
92d9d3232c | ||
|
|
9c4e0464f0 | ||
|
|
72d25754fd | ||
|
|
1465a0ba59 | ||
|
|
ac8ce28286 | ||
|
|
c4b06e1915 | ||
|
|
f77675a8a3 | ||
|
|
b907c1fd03 | ||
|
|
fba86bf653 | ||
|
|
b18ebcc38d | ||
|
|
4f7f18458e | ||
|
|
d412196052 | ||
|
|
1d140d8fd2 | ||
|
|
6948a25b09 | ||
|
|
26131917e3 | ||
|
|
44a0ab6d0a | ||
|
|
2b8b033ad7 | ||
|
|
3ee0b3e7df | ||
|
|
927a55ab30 | ||
|
|
b70721cb60 | ||
|
|
f71c820f15 | ||
|
|
74e0405a5e | ||
|
|
79b55ba30a | ||
|
|
ee0adaebc5 | ||
|
|
83c5c976e3 | ||
|
|
18bd2c607e | ||
|
|
e2695ce327 | ||
|
|
3f316fcaea | ||
|
|
8b7b162c76 | ||
|
|
aa36629def | ||
|
|
ca345bd6d8 | ||
|
|
61324d10f9 | ||
|
|
92fd92ae63 | ||
|
|
e48bfa2c88 | ||
|
|
cd5762fa19 | ||
|
|
71f7a6cd6c | ||
|
|
b8cade8b7a | ||
|
|
696c6f13f0 | ||
|
|
b8d923d3ba | ||
|
|
1a96c1bf0f | ||
|
|
14a01d0141 | ||
|
|
74a2a4d2e5 | ||
|
|
b13b44cfd0 | ||
|
|
50a164423a | ||
|
|
a40d50a3bd | ||
|
|
529e33140a | ||
|
|
132b1a41db | ||
|
|
75948b2977 | ||
|
|
eb84d4a7ef | ||
|
|
1816d0240e | ||
|
|
2c6d36b63e | ||
|
|
d9776ae8ed | ||
|
|
b456394663 | ||
|
|
94f599bdbd | ||
|
|
d466ca143a | ||
|
|
78ed85a49f | ||
|
|
c24e7f9ecd | ||
|
|
98caa89625 | ||
|
|
8e176eadb1 | ||
|
|
444268406f | ||
|
|
7e062977d0 | ||
|
|
f4bf56f159 | ||
|
|
9f3b020361 | ||
|
|
ef35021a44 | ||
|
|
b74ebd050a | ||
|
|
8a16d6aa3b | ||
|
|
cf7393992c | ||
|
|
c576114dad | ||
|
|
deeb399046 | ||
|
|
9cf8f8f492 | ||
|
|
28b7e99ebc | ||
|
|
0ba729e5b9 | ||
|
|
ac8c146cf7 | ||
|
|
2ba437be31 | ||
|
|
bd8bb73309 | ||
|
|
485c2f4669 | ||
|
|
6fb562d92f | ||
|
|
85efdcf7b9 | ||
|
|
fc1357d5d6 | ||
|
|
88b015bda4 | ||
|
|
b46f76cccf | ||
|
|
c3bb2185c2 | ||
|
|
a240853fe0 | ||
|
|
d7ce609940 | ||
|
|
1b0ec9839e | ||
|
|
77b166bb3b | ||
|
|
76bd54d7de | ||
|
|
684282f0a4 | ||
|
|
2459f46c19 | ||
|
|
5f0466e4d8 | ||
|
|
3738edfa83 | ||
|
|
769e27e2cb | ||
|
|
8ec5461338 | ||
|
|
4a2843cb67 | ||
|
|
a0e69a79ab | ||
|
|
3da94f18df | ||
|
|
17cb59b898 | ||
|
|
315e5c9289 | ||
|
|
01ba10fedb | ||
|
|
13e7594560 | ||
|
|
f9ac2ac84d | ||
|
|
09cfac1111 | ||
|
|
008f7a30fd | ||
|
|
ff65dbcba7 | ||
|
|
264a0d1678 | ||
|
|
7dcaf454ed | ||
|
|
17b2756291 | ||
|
|
57e48b5d34 | ||
|
|
57ed984e5a | ||
|
|
100122f388 | ||
|
|
12d4b3396e | ||
|
|
ab35c710cb | ||
|
|
4bd5b38aeb | ||
|
|
836db1b807 | ||
|
|
73d88cc5f1 | ||
|
|
3def66d968 | ||
|
|
3f73138fc3 | ||
|
|
bfe621a21d | ||
|
|
32fa792eeb | ||
|
|
a833050fc2 | ||
|
|
e7e6294bc3 | ||
|
|
7c71884e27 | ||
|
|
3e822044f2 | ||
|
|
d457f5fca4 | ||
|
|
1837e01719 | ||
|
|
f17f5abf0f | ||
|
|
82c229c755 | ||
|
|
c7e3ba3184 | ||
|
|
470c9bb6c8 | ||
|
|
bb3ab20b2a | ||
|
|
90ce1c4d1e | ||
|
|
5c436f3870 | ||
|
|
159339625d | ||
|
|
87e6f7fded | ||
|
|
fd2c7c2fc3 | ||
|
|
7fc76c1df4 | ||
|
|
f2758d036d | ||
|
|
ac670da793 | ||
|
|
c0465eb4d9 | ||
|
|
cea55b03e5 | ||
|
|
d78d802066 | ||
|
|
a562c74492 | ||
|
|
d1f2e0a84b | ||
|
|
49e2d128ad | ||
|
|
f587798fb0 | ||
|
|
3430ee743b | ||
|
|
83299587b0 | ||
|
|
7c0ecf9b06 | ||
|
|
abfd84d32c | ||
|
|
0583a978be | ||
|
|
75989cf92d | ||
|
|
f1cc284b6f | ||
|
|
0444cf0b3b | ||
|
|
226f9ad964 | ||
|
|
a956cb2ac9 | ||
|
|
76a91cc5e9 | ||
|
|
f012d126b9 | ||
|
|
bae0b52893 | ||
|
|
a24512cea9 | ||
|
|
84b75e8a58 | ||
|
|
6e25b7a83a | ||
|
|
136718df7e | ||
|
|
d48ef1f810 | ||
|
|
9e60c53750 | ||
|
|
f3c5e817a3 | ||
|
|
60f6e54da1 | ||
|
|
f5a59caca2 | ||
|
|
6ea671a434 | ||
|
|
036f3f6bd0 | ||
|
|
12552a1391 | ||
|
|
e9b658b60d | ||
|
|
15f69a19f5 | ||
|
|
54d885fa9c | ||
|
|
11cc299940 | ||
|
|
091b0a3ef3 | ||
|
|
87874a4b81 | ||
|
|
86aaa50946 | ||
|
|
68b2c287eb | ||
|
|
61f1316c42 | ||
|
|
afadc8f95a | ||
|
|
955ef6806c | ||
|
|
4d55c5ae48 | ||
|
|
5c6ae1912b | ||
|
|
083483645e | ||
|
|
c077e9a699 | ||
|
|
280b60808f | ||
|
|
eb9608b893 | ||
|
|
a29f3d67ea | ||
|
|
6b150dc8a8 | ||
|
|
8f55884602 | ||
|
|
2fdba2eb0f | ||
|
|
7e4bd30f04 | ||
|
|
eb8f098aaf | ||
|
|
5237fdd387 | ||
|
|
8a07b7a3db | ||
|
|
a41037833c | ||
|
|
6a780d94a3 | ||
|
|
506ef0b44f | ||
|
|
a4d1d41b6a | ||
|
|
4e9477f34a | ||
|
|
43b6285437 | ||
|
|
c26a7a3e51 | ||
|
|
93eb42785d | ||
|
|
02bb622e92 | ||
|
|
b873c147a6 | ||
|
|
5e7fb7a881 | ||
|
|
97790313eb | ||
|
|
954b29cb61 | ||
|
|
dc6a13962f | ||
|
|
23da202790 | ||
|
|
f237101b4a | ||
|
|
8a99326a76 | ||
|
|
8c95974e65 | ||
|
|
3f7454efad | ||
|
|
e5c890e29b | ||
|
|
53e0f17c55 | ||
|
|
34f6be868e | ||
|
|
c84b899276 | ||
|
|
266a26fa31 | ||
|
|
bbf92be652 | ||
|
|
e19c7b949d | ||
|
|
5ce6f1fe4d | ||
|
|
9c36520c79 | ||
|
|
a85a8ea208 | ||
|
|
c2e0c97d94 | ||
|
|
a5447fda3c | ||
|
|
507e9a55c2 | ||
|
|
5bd0eb3362 | ||
|
|
458496a09e | ||
|
|
f13a98b6b8 | ||
|
|
dde32724b1 | ||
|
|
63b76fdb50 | ||
|
|
1b9cd56e9f | ||
|
|
784b0dded8 | ||
|
|
4a658787de | ||
|
|
4beb49041d | ||
|
|
1f6e29084f | ||
|
|
7c6cb2454b | ||
|
|
96720d186c | ||
|
|
a45fb88c48 | ||
|
|
b4b0a925af | ||
|
|
72822c9529 | ||
|
|
ca6cdbf9cf | ||
|
|
74cd35f527 | ||
|
|
010866a0ef | ||
|
|
5885df4ae9 | ||
|
|
89bc6da5f4 | ||
|
|
d87f698512 | ||
|
|
08dd871cb8 | ||
|
|
b5578eadf7 | ||
|
|
aadc1bb84c | ||
|
|
2823af9441 | ||
|
|
d7da83359f | ||
|
|
a143cd3427 | ||
|
|
3c4dcde1d4 | ||
|
|
7adfc195dc | ||
|
|
5a2c315b20 | ||
|
|
299803f03c | ||
|
|
1eac62a26e | ||
|
|
f1b5416d0b | ||
|
|
65168c8532 | ||
|
|
35f6476d0f | ||
|
|
36fabe194f | ||
|
|
921c700fab | ||
|
|
2dbe35a31c | ||
|
|
656d13d79b | ||
|
|
77b1adae37 | ||
|
|
c18373bb0e | ||
|
|
d4e7563272 | ||
|
|
86d6052c89 | ||
|
|
c5ae0dc4ca | ||
|
|
e979a2be9b | ||
|
|
586b84f434 | ||
|
|
56b9d22d49 | ||
|
|
69aa241dc9 | ||
|
|
1335e12b97 | ||
|
|
d1b1fa7ffd | ||
|
|
d6a3492e90 | ||
|
|
4af57810d6 | ||
|
|
6555cc4639 | ||
|
|
3f57287d79 | ||
|
|
1713e311f3 | ||
|
|
8a14e78d2d | ||
|
|
1942e55f76 | ||
|
|
d83f41d0ff | ||
|
|
0f09240fb2 | ||
|
|
3731b49ea8 | ||
|
|
209223f77e | ||
|
|
7a5f5ee31d | ||
|
|
a148cb6c9b | ||
|
|
e9ac049744 | ||
|
|
e06d4bd841 | ||
|
|
6cad4f5839 | ||
|
|
86f5f9eba3 | ||
|
|
473d091fa8 | ||
|
|
aec5ad4099 | ||
|
|
f14f98f7c1 | ||
|
|
e3d9a7ddf2 | ||
|
|
58e4f9b7b4 | ||
|
|
ef1f09cd4a | ||
|
|
617619eb31 | ||
|
|
00a135b00f | ||
|
|
c71104db4f | ||
|
|
eef7940fbc | ||
|
|
da4b3db17a | ||
|
|
c0d20f04b6 | ||
|
|
8fda8668b7 | ||
|
|
ea2c641604 | ||
|
|
84e38505c5 | ||
|
|
6584eb0827 | ||
|
|
467d897e05 | ||
|
|
2e6ea202cd | ||
|
|
27f17551ad | ||
|
|
48bfc4e3cd | ||
|
|
61b9a4cf28 | ||
|
|
c95448bf25 | ||
|
|
f1ca60c182 | ||
|
|
52e79f78e5 | ||
|
|
586d6876f1 | ||
|
|
25759ecf0a | ||
|
|
1fbe870884 | ||
|
|
9fcd497c42 | ||
|
|
63ee6b7f0e | ||
|
|
73c0cd6934 | ||
|
|
e6c95a0913 | ||
|
|
af11cae29c | ||
|
|
b984a9ff00 | ||
|
|
13837e0bf3 | ||
|
|
f5d19fd28a | ||
|
|
24ac3ea37d | ||
|
|
13cb33cc4a | ||
|
|
949a4697fe | ||
|
|
3bbb828284 | ||
|
|
942b0f3dc9 | ||
|
|
208d8845c4 | ||
|
|
24cac9dcd5 | ||
|
|
c8b29da677 | ||
|
|
4f63d14529 | ||
|
|
cef6248650 | ||
|
|
774e443a79 | ||
|
|
1166807434 | ||
|
|
99cd502b65 | ||
|
|
d959e72a9c | ||
|
|
ee83788b43 | ||
|
|
62dd5f8ed7 | ||
|
|
2de9984945 | ||
|
|
890b46b697 | ||
|
|
5419957e06 | ||
|
|
39d4667916 | ||
|
|
083db67df9 | ||
|
|
8dceb6032b | ||
|
|
c300dad316 | ||
|
|
45b07f46f1 | ||
|
|
4023127c87 | ||
|
|
ab96c549ae | ||
|
|
bc0afb589e | ||
|
|
b42127f083 | ||
|
|
61d5a964ee | ||
|
|
f8fd6b78f5 | ||
|
|
4546ef6619 | ||
|
|
1f4457d9ca | ||
|
|
65cbbf78bc | ||
|
|
a73a24c1df | ||
|
|
31f850c19c | ||
|
|
6d90d7bc82 | ||
|
|
d2a1c02b92 | ||
|
|
6d96452ef8 | ||
|
|
833589e6e7 | ||
|
|
8bb566e189 | ||
|
|
38d2117752 | ||
|
|
914decd4f9 | ||
|
|
873c38f9e1 |
1
.commitlintrc.json
Normal file
1
.commitlintrc.json
Normal file
@@ -0,0 +1 @@
|
||||
{ "extends": ["@commitlint/config-conventional"] }
|
||||
@@ -28,7 +28,7 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['*.{spec,test}.{,c,m}js'],
|
||||
files: ['*.{integ,spec,test}.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-unpublished-require': 'off',
|
||||
'n/no-unpublished-import': 'off',
|
||||
|
||||
32
.github/workflows/ci.yml
vendored
Normal file
32
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Continous Integration
|
||||
on: push
|
||||
jobs:
|
||||
CI:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# https://github.com/actions/checkout
|
||||
- uses: actions/checkout@v3
|
||||
- name: Install packages
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y curl qemu-utils python3-vmdkstream git libxml2-utils libfuse2 nbdkit
|
||||
- name: Cache Turbo
|
||||
# https://github.com/actions/cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: '**/node_modules/.cache/turbo'
|
||||
key: ${{ runner.os }}-turbo-cache
|
||||
- name: Setup Node environment
|
||||
# https://github.com/actions/setup-node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '18'
|
||||
cache: 'yarn'
|
||||
- name: Install project dependencies
|
||||
run: yarn
|
||||
- name: Build the project
|
||||
run: yarn build
|
||||
- name: Lint tests
|
||||
run: yarn test-lint
|
||||
- name: Integration tests
|
||||
run: sudo yarn test-integration
|
||||
12
.github/workflows/push.yml
vendored
12
.github/workflows/push.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: CI
|
||||
on: push
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Build docker image
|
||||
run: docker-compose -f docker/docker-compose.dev.yml build
|
||||
- name: Create the container and start the tests
|
||||
run: docker-compose -f docker/docker-compose.dev.yml up --exit-code-from xo
|
||||
11
.husky/commit-msg
Executable file
11
.husky/commit-msg
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
# Only check commit message if commit on master or first commit on another
|
||||
# branch to avoid bothering fix commits after reviews
|
||||
#
|
||||
# FIXME: does not properly run with git commit --amend
|
||||
if [ "$(git rev-parse --abbrev-ref HEAD)" = master ] || [ "$(git rev-list --count master..)" -eq 0 ]
|
||||
then
|
||||
npx --no -- commitlint --edit "$1"
|
||||
fi
|
||||
32
@vates/diff/.USAGE.md
Normal file
32
@vates/diff/.USAGE.md
Normal file
@@ -0,0 +1,32 @@
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
1
@vates/diff/.npmignore
Symbolic link
1
@vates/diff/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
65
@vates/diff/README.md
Normal file
65
@vates/diff/README.md
Normal file
@@ -0,0 +1,65 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/diff
|
||||
|
||||
[](https://npmjs.org/package/@vates/diff)  [](https://bundlephobia.com/result?p=@vates/diff) [](https://npmjs.org/package/@vates/diff)
|
||||
|
||||
> Computes differences between two arrays, buffers or strings
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/diff):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/diff
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import diff from '@vates/diff'
|
||||
|
||||
diff('foo bar baz', 'Foo qux')
|
||||
// → [ 0, 'F', 4, 'qux', 7, '' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains `F`
|
||||
// - at position 4, it contains `qux`
|
||||
// - at position 7, it ends
|
||||
|
||||
diff('Foo qux', 'foo bar baz')
|
||||
// → [ 0, 'f', 4, 'bar', 7, ' baz' ]
|
||||
//
|
||||
// Differences of the second string from the first one:
|
||||
// - at position 0, it contains f`
|
||||
// - at position 4, it contains `bar`
|
||||
// - at position 7, it contains `baz`
|
||||
|
||||
// works with all collections that supports
|
||||
// - `.length`
|
||||
// - `collection[index]`
|
||||
// - `.slice(start, end)`
|
||||
//
|
||||
// which includes:
|
||||
// - arrays
|
||||
// - strings
|
||||
// - `Buffer`
|
||||
// - `TypedArray`
|
||||
diff([0, 1, 2], [3, 4])
|
||||
// → [ 0, [ 3, 4 ], 2, [] ]
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
37
@vates/diff/index.js
Normal file
37
@vates/diff/index.js
Normal file
@@ -0,0 +1,37 @@
|
||||
'use strict'
|
||||
|
||||
/**
|
||||
* Compare two data arrays, buffers or strings and invoke the provided callback function for each difference.
|
||||
*
|
||||
* @template {Array|Buffer|string} T
|
||||
* @param {Array|Buffer|string} data1 - The first data array or buffer to compare.
|
||||
* @param {T} data2 - The second data array or buffer to compare.
|
||||
* @param {(index: number, diff: T) => void} [cb] - The callback function to invoke for each difference. If not provided, an array of differences will be returned.
|
||||
* @returns {Array<number|T>|undefined} - An array of differences if no callback is provided, otherwise undefined.
|
||||
*/
|
||||
module.exports = function diff(data1, data2, cb) {
|
||||
let result
|
||||
if (cb === undefined) {
|
||||
result = []
|
||||
cb = result.push.bind(result)
|
||||
}
|
||||
|
||||
const n1 = data1.length
|
||||
const n2 = data2.length
|
||||
const n = Math.min(n1, n2)
|
||||
for (let i = 0; i < n; ++i) {
|
||||
if (data1[i] !== data2[i]) {
|
||||
let j = i + 1
|
||||
while (j < n && data1[j] !== data2[j]) {
|
||||
++j
|
||||
}
|
||||
cb(i, data2.slice(i, j))
|
||||
i = j
|
||||
}
|
||||
}
|
||||
if (n1 !== n2) {
|
||||
cb(n, n1 < n2 ? data2.slice(n) : data2.slice(0, 0))
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
51
@vates/diff/index.test.js
Normal file
51
@vates/diff/index.test.js
Normal file
@@ -0,0 +1,51 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert/strict')
|
||||
const test = require('test')
|
||||
|
||||
const diff = require('./index.js')
|
||||
|
||||
test('data of equal length', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz', 8, 'foo'])
|
||||
})
|
||||
|
||||
test('data1 is longer', function () {
|
||||
const data1 = 'foo bar'
|
||||
const data2 = 'foo'
|
||||
assert.deepEqual(diff(data1, data2), [3, ''])
|
||||
})
|
||||
|
||||
test('data2 is longer', function () {
|
||||
const data1 = 'foo'
|
||||
const data2 = 'foo bar'
|
||||
assert.deepEqual(diff(data1, data2), [3, ' bar'])
|
||||
})
|
||||
|
||||
test('with arrays', function () {
|
||||
const data1 = 'foo bar baz'.split('')
|
||||
const data2 = 'baz bar foo'.split('')
|
||||
assert.deepEqual(diff(data1, data2), [0, 'baz'.split(''), 8, 'foo'.split('')])
|
||||
})
|
||||
|
||||
test('with buffers', function () {
|
||||
const data1 = Buffer.from('foo bar baz')
|
||||
const data2 = Buffer.from('baz bar foo')
|
||||
assert.deepEqual(diff(data1, data2), [0, Buffer.from('baz'), 8, Buffer.from('foo')])
|
||||
})
|
||||
|
||||
test('cb param', function () {
|
||||
const data1 = 'foo bar baz'
|
||||
const data2 = 'baz bar foo'
|
||||
|
||||
const calls = []
|
||||
const cb = (...args) => calls.push(args)
|
||||
|
||||
diff(data1, data2, cb)
|
||||
|
||||
assert.deepEqual(calls, [
|
||||
[0, 'baz'],
|
||||
[8, 'foo'],
|
||||
])
|
||||
})
|
||||
36
@vates/diff/package.json
Normal file
36
@vates/diff/package.json
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/diff",
|
||||
"description": "Computes differences between two arrays, buffers or strings",
|
||||
"keywords": [
|
||||
"array",
|
||||
"binary",
|
||||
"buffer",
|
||||
"diff",
|
||||
"differences",
|
||||
"string"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/diff",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/diff",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@
|
||||
"fuse-native": "^2.2.6",
|
||||
"lru-cache": "^7.14.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"vhd-lib": "^4.2.1"
|
||||
"vhd-lib": "^4.5.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
|
||||
@@ -16,9 +16,13 @@ const {
|
||||
NBD_REPLY_MAGIC,
|
||||
NBD_REQUEST_MAGIC,
|
||||
OPTS_MAGIC,
|
||||
NBD_CMD_DISC,
|
||||
} = require('./constants.js')
|
||||
const { fromCallback } = require('promise-toolbox')
|
||||
const { fromCallback, pRetry, pDelay, pTimeout } = require('promise-toolbox')
|
||||
const { readChunkStrict } = require('@vates/read-chunk')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { warn } = createLogger('vates:nbd-client')
|
||||
|
||||
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
|
||||
|
||||
@@ -31,18 +35,34 @@ module.exports = class NbdClient {
|
||||
#exportName
|
||||
#exportSize
|
||||
|
||||
#waitBeforeReconnect
|
||||
#readAhead
|
||||
#readBlockRetries
|
||||
#reconnectRetry
|
||||
#connectTimeout
|
||||
|
||||
// AFAIK, there is no guaranty the server answers in the same order as the queries
|
||||
// so we handle a backlog of command waiting for response and handle concurrency manually
|
||||
|
||||
#waitingForResponse // there is already a listenner waiting for a response
|
||||
#nextCommandQueryId = BigInt(0)
|
||||
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
|
||||
#connected = false
|
||||
|
||||
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert }) {
|
||||
#reconnectingPromise
|
||||
constructor(
|
||||
{ address, port = NBD_DEFAULT_PORT, exportname, cert },
|
||||
{ connectTimeout = 6e4, waitBeforeReconnect = 1e3, readAhead = 10, readBlockRetries = 5, reconnectRetry = 5 } = {}
|
||||
) {
|
||||
this.#serverAddress = address
|
||||
this.#serverPort = port
|
||||
this.#exportName = exportname
|
||||
this.#serverCert = cert
|
||||
this.#waitBeforeReconnect = waitBeforeReconnect
|
||||
this.#readAhead = readAhead
|
||||
this.#readBlockRetries = readBlockRetries
|
||||
this.#reconnectRetry = reconnectRetry
|
||||
this.#connectTimeout = connectTimeout
|
||||
}
|
||||
|
||||
get exportSize() {
|
||||
@@ -77,19 +97,55 @@ module.exports = class NbdClient {
|
||||
})
|
||||
}
|
||||
|
||||
async connect() {
|
||||
// first we connect to the serve without tls, and then we upgrade the connection
|
||||
async #connect() {
|
||||
// first we connect to the server without tls, and then we upgrade the connection
|
||||
// to tls during the handshake
|
||||
await this.#unsecureConnect()
|
||||
await this.#handshake()
|
||||
|
||||
this.#connected = true
|
||||
// reset internal state if we reconnected a nbd client
|
||||
this.#commandQueryBacklog = new Map()
|
||||
this.#waitingForResponse = false
|
||||
}
|
||||
async connect() {
|
||||
return pTimeout.call(this.#connect(), this.#connectTimeout)
|
||||
}
|
||||
|
||||
async disconnect() {
|
||||
if (!this.#connected) {
|
||||
return
|
||||
}
|
||||
|
||||
const buffer = Buffer.alloc(28)
|
||||
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
|
||||
buffer.writeInt16BE(0, 4) // no command flags for a disconnect
|
||||
buffer.writeInt16BE(NBD_CMD_DISC, 6) // we want to disconnect from nbd server
|
||||
await this.#write(buffer)
|
||||
await this.#serverSocket.destroy()
|
||||
this.#serverSocket = undefined
|
||||
this.#connected = false
|
||||
}
|
||||
|
||||
#clearReconnectPromise = () => {
|
||||
this.#reconnectingPromise = undefined
|
||||
}
|
||||
|
||||
async #reconnect() {
|
||||
await this.disconnect().catch(() => {})
|
||||
await pDelay(this.#waitBeforeReconnect) // need to let the xapi clean things on its side
|
||||
await this.connect()
|
||||
}
|
||||
|
||||
async reconnect() {
|
||||
// we need to ensure reconnections do not occur in parallel
|
||||
if (this.#reconnectingPromise === undefined) {
|
||||
this.#reconnectingPromise = pRetry(() => this.#reconnect(), {
|
||||
tries: this.#reconnectRetry,
|
||||
})
|
||||
this.#reconnectingPromise.then(this.#clearReconnectPromise, this.#clearReconnectPromise)
|
||||
}
|
||||
|
||||
return this.#reconnectingPromise
|
||||
}
|
||||
|
||||
// we can use individual read/write from the socket here since there is no concurrency
|
||||
@@ -167,7 +223,6 @@ module.exports = class NbdClient {
|
||||
this.#commandQueryBacklog.forEach(({ reject }) => {
|
||||
reject(error)
|
||||
})
|
||||
await this.disconnect()
|
||||
}
|
||||
|
||||
async #readBlockResponse() {
|
||||
@@ -175,7 +230,6 @@ module.exports = class NbdClient {
|
||||
if (this.#waitingForResponse) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
this.#waitingForResponse = true
|
||||
const magic = await this.#readInt32()
|
||||
@@ -200,7 +254,8 @@ module.exports = class NbdClient {
|
||||
query.resolve(data)
|
||||
this.#waitingForResponse = false
|
||||
if (this.#commandQueryBacklog.size > 0) {
|
||||
await this.#readBlockResponse()
|
||||
// it doesn't throw directly but will throw all relevant promise on failure
|
||||
this.#readBlockResponse()
|
||||
}
|
||||
} catch (error) {
|
||||
// reject all the promises
|
||||
@@ -211,6 +266,11 @@ module.exports = class NbdClient {
|
||||
}
|
||||
|
||||
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
|
||||
// we don't want to add anything in backlog while reconnecting
|
||||
if (this.#reconnectingPromise) {
|
||||
await this.#reconnectingPromise
|
||||
}
|
||||
|
||||
const queryId = this.#nextCommandQueryId
|
||||
this.#nextCommandQueryId++
|
||||
|
||||
@@ -225,19 +285,67 @@ module.exports = class NbdClient {
|
||||
buffer.writeInt32BE(size, 24)
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
function decoratedReject(error) {
|
||||
error.index = index
|
||||
error.size = size
|
||||
reject(error)
|
||||
}
|
||||
|
||||
// this will handle one block response, but it can be another block
|
||||
// since server does not guaranty to handle query in order
|
||||
this.#commandQueryBacklog.set(queryId, {
|
||||
size,
|
||||
resolve,
|
||||
reject,
|
||||
reject: decoratedReject,
|
||||
})
|
||||
// really send the command to the server
|
||||
this.#write(buffer).catch(reject)
|
||||
this.#write(buffer).catch(decoratedReject)
|
||||
|
||||
// #readBlockResponse never throws directly
|
||||
// but if it fails it will reject all the promises in the backlog
|
||||
this.#readBlockResponse()
|
||||
})
|
||||
}
|
||||
|
||||
async *readBlocks(indexGenerator) {
|
||||
// default : read all blocks
|
||||
if (indexGenerator === undefined) {
|
||||
const exportSize = this.#exportSize
|
||||
const chunkSize = 2 * 1024 * 1024
|
||||
indexGenerator = function* () {
|
||||
const nbBlocks = Math.ceil(Number(exportSize / BigInt(chunkSize)))
|
||||
for (let index = 0; BigInt(index) < nbBlocks; index++) {
|
||||
yield { index, size: chunkSize }
|
||||
}
|
||||
}
|
||||
}
|
||||
const readAhead = []
|
||||
const readAheadMaxLength = this.#readAhead
|
||||
const makeReadBlockPromise = (index, size) => {
|
||||
const promise = pRetry(() => this.readBlock(index, size), {
|
||||
tries: this.#readBlockRetries,
|
||||
onRetry: async err => {
|
||||
warn('will retry reading block ', index, err)
|
||||
await this.reconnect()
|
||||
},
|
||||
})
|
||||
// error is handled during unshift
|
||||
promise.catch(() => {})
|
||||
return promise
|
||||
}
|
||||
|
||||
// read all blocks, but try to keep readAheadMaxLength promise waiting ahead
|
||||
for (const { index, size } of indexGenerator()) {
|
||||
// stack readAheadMaxLength promises before starting to handle the results
|
||||
if (readAhead.length === readAheadMaxLength) {
|
||||
// any error will stop reading blocks
|
||||
yield readAhead.shift()
|
||||
}
|
||||
|
||||
readAhead.push(makeReadBlockPromise(index, size))
|
||||
}
|
||||
while (readAhead.length > 0) {
|
||||
yield readAhead.shift()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
'use strict'
|
||||
const NbdClient = require('./index.js')
|
||||
const { spawn } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const FILE_SIZE = 2 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
|
||||
const client = new NbdClient({
|
||||
address: 'localhost',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
secure: false,
|
||||
})
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 128 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
// read mutiple blocks in parallel
|
||||
await asyncEach(
|
||||
indexes,
|
||||
async i => {
|
||||
const block = await client.readBlock(i, CHUNK_SIZE)
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
},
|
||||
{ concurrency: 8 }
|
||||
)
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
@@ -13,16 +13,17 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"version": "1.2.0",
|
||||
"engines": {
|
||||
"node": ">=14.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/read-chunk": "^1.0.1",
|
||||
"@vates/read-chunk": "^1.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"xen-api": "^1.2.2"
|
||||
"xen-api": "^1.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.3.0",
|
||||
@@ -30,6 +31,6 @@
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test-integration": "tap *.spec.js"
|
||||
"test-integration": "tap --lines 97 --functions 95 --branches 74 --statements 97 tests/*.integ.js"
|
||||
}
|
||||
}
|
||||
|
||||
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
182
@vates/nbd-client/tests/ca-cert.pem
Normal file
@@ -0,0 +1,182 @@
|
||||
Public Key Info:
|
||||
Public Key Algorithm: RSA
|
||||
Key Security Level: High (3072 bits)
|
||||
|
||||
modulus:
|
||||
00:be:92:be:df:de:0a:ab:38:fc:1a:c0:1a:58:4d:86
|
||||
b8:1f:25:10:7d:19:05:17:bf:02:3d:e9:ef:f8:c0:04
|
||||
5d:6f:98:de:5c:dd:c3:0f:e2:61:61:e4:b5:9c:42:ac
|
||||
3e:af:fd:30:10:e1:54:32:66:75:f6:80:90:85:05:a0
|
||||
6a:14:a2:6f:a7:2e:f0:f3:52:94:2a:f2:34:fc:0d:b4
|
||||
fb:28:5d:1c:11:5c:59:6e:63:34:ba:b3:fd:73:b1:48
|
||||
35:00:84:53:da:6a:9b:84:ab:64:b1:a1:2b:3a:d1:5a
|
||||
d7:13:7c:12:2a:4e:72:e9:96:d6:30:74:c5:71:05:14
|
||||
4b:2d:01:94:23:67:4e:37:3c:1e:c1:a0:bc:34:04:25
|
||||
21:11:fb:4b:6b:53:74:8f:90:93:57:af:7f:3b:78:d6
|
||||
a4:87:fe:7d:ed:20:11:8b:70:54:67:b8:c9:f5:c0:6b
|
||||
de:4e:e7:a5:79:ff:f7:ad:cf:10:57:f5:51:70:7b:54
|
||||
68:28:9e:b9:c2:10:7b:ab:aa:11:47:9f:ec:e6:2f:09
|
||||
44:4a:88:5b:dd:8c:10:b4:c4:03:25:06:d9:e0:9f:a0
|
||||
0d:cf:94:4b:3b:fa:a5:17:2c:e4:67:c4:17:6a:ab:d8
|
||||
c8:7a:16:41:b9:91:b7:9c:ae:8c:94:be:26:61:51:71
|
||||
c1:a6:39:39:97:75:28:a9:0e:21:ea:f0:bd:71:4a:8c
|
||||
e1:f8:1d:a9:22:2f:10:a8:1b:e5:a4:9a:fd:0f:fa:c6
|
||||
20:bc:96:99:79:c6:ba:a4:1f:3e:d4:91:c5:af:bb:71
|
||||
0a:5a:ef:69:9c:64:69:ce:5a:fe:3f:c2:24:f4:26:d4
|
||||
3d:ab:ab:9a:f0:f6:f1:b1:64:a9:f4:e2:34:6a:ab:2e
|
||||
95:47:b9:07:5a:39:c6:95:9c:a9:e8:ed:71:dd:c1:21
|
||||
16:c8:2d:4c:2c:af:06:9d:c6:fa:fe:c5:2a:6c:b4:c3
|
||||
d5:96:fc:5e:fd:ec:1c:30:b4:9d:cb:29:ef:a8:50:1c
|
||||
21:
|
||||
|
||||
public exponent:
|
||||
01:00:01:
|
||||
|
||||
private exponent:
|
||||
25:37:c5:7d:35:01:02:65:73:9e:c9:cb:9b:59:30:a9
|
||||
3e:b3:df:5f:7f:06:66:97:d0:19:45:59:af:4b:d8:ce
|
||||
62:a0:09:35:3b:bd:ff:99:27:89:95:bf:fe:0f:6b:52
|
||||
26:ce:9c:97:7f:5a:11:29:bf:79:ef:ab:c9:be:ca:90
|
||||
4d:0d:58:1e:df:65:01:30:2c:6d:a2:b5:c4:4f:ec:fb
|
||||
6b:eb:9b:32:ac:c5:6e:70:83:78:be:f4:0d:a7:1e:c1
|
||||
f3:22:e4:b9:70:3e:85:0f:6f:ef:dc:d8:f3:78:b5:73
|
||||
f1:83:36:8c:fa:9b:28:91:63:ad:3c:f0:de:5c:ae:94
|
||||
eb:ea:36:03:20:06:bf:74:c7:50:eb:52:36:1a:65:21
|
||||
eb:40:17:7f:93:61:dd:33:d0:02:bc:ec:6d:31:f1:41
|
||||
5a:a9:d1:f0:00:66:4c:c4:18:47:d5:67:e3:cd:bb:83
|
||||
44:07:ab:62:83:21:dc:d8:e6:89:37:08:bb:9d:ea:62
|
||||
c2:5d:ce:85:c2:dc:48:27:0c:a4:23:61:b7:30:e7:26
|
||||
44:dc:1e:5c:2e:16:35:2b:2e:a6:e6:a4:ce:1f:9b:e9
|
||||
fe:96:fa:49:1d:fb:2a:df:bc:bf:46:da:52:f8:37:8a
|
||||
84:ab:e4:73:e6:46:56:b5:b4:3d:e1:63:eb:02:8e:d7
|
||||
67:96:c4:dc:28:6d:6b:b6:0c:a3:0b:db:87:29:ad:f9
|
||||
ec:73:b6:55:a3:40:32:13:84:c7:2f:33:74:04:dc:42
|
||||
00:11:9c:fb:fc:62:35:b3:82:c3:3c:28:80:e8:09:a8
|
||||
97:c7:c1:2e:3d:27:fa:4f:9b:fc:c2:34:58:41:5c:a1
|
||||
e2:70:2e:2f:82:ad:bd:bd:8e:dd:23:12:25:de:89:70
|
||||
60:75:48:90:80:ac:55:74:51:6f:49:9e:7f:63:41:8b
|
||||
3c:b1:f5:c3:6b:4b:5a:50:a6:4d:38:e8:82:c2:04:c8
|
||||
30:fd:06:9b:c1:04:27:b6:63:3a:5e:f5:4d:00:c3:d1
|
||||
|
||||
|
||||
prime1:
|
||||
00:f6:00:2e:7d:89:61:24:16:5e:87:ca:18:6c:03:b8
|
||||
b4:33:df:4a:a7:7f:db:ed:39:15:41:12:61:4f:4e:b4
|
||||
de:ab:29:d9:0c:6c:01:7e:53:2e:ee:e7:5f:a2:e4:6d
|
||||
c6:4b:07:4e:d8:a3:ae:45:06:97:bd:18:a3:e9:dd:29
|
||||
54:64:6d:f0:af:08:95:ae:ae:3e:71:63:76:2a:a1:18
|
||||
c4:b1:fc:bc:3d:42:15:74:b3:c5:38:1f:5d:92:f1:b2
|
||||
c6:3f:10:fe:35:1a:c6:b1:ce:70:38:ff:08:5c:de:61
|
||||
79:c7:50:91:22:4d:e9:c8:18:49:e2:5c:91:84:86:e2
|
||||
4d:0f:6e:9b:0d:81:df:aa:f3:59:75:56:e9:33:18:dd
|
||||
ab:39:da:e2:25:01:05:a1:6e:23:59:15:2c:89:35:c7
|
||||
ae:9c:c7:ea:88:9a:1a:f3:48:07:11:82:59:79:8c:62
|
||||
53:06:37:30:14:b3:82:b1:50:fc:ae:b8:f7:1c:57:44
|
||||
7d:
|
||||
|
||||
prime2:
|
||||
00:c6:51:cc:dc:88:2e:cf:98:90:10:19:e0:d3:a4:d1
|
||||
3f:dc:b0:29:d3:bb:26:ee:eb:00:17:17:d1:d1:bb:9b
|
||||
34:b1:4e:af:b5:6c:1c:54:53:b4:bb:55:da:f7:78:cd
|
||||
38:b4:2e:3a:8c:63:80:3b:64:9c:b4:2b:cd:dd:50:0b
|
||||
05:d2:00:7a:df:8e:c3:e6:29:e0:9c:d8:40:b7:11:09
|
||||
f4:38:df:f6:ed:93:1e:18:d4:93:fa:8d:ee:82:9c:0f
|
||||
c1:88:26:84:9d:4f:ae:8a:17:d5:55:54:4c:c6:0a:ac
|
||||
4d:ec:33:51:68:0f:4b:92:2e:04:57:fe:15:f5:00:46
|
||||
5c:8e:ad:09:2c:e7:df:d5:36:7a:4e:bd:da:21:22:d7
|
||||
58:b4:72:93:94:af:34:cc:e2:b8:d0:4f:0b:5d:97:08
|
||||
12:19:17:34:c5:15:49:00:48:56:13:b8:45:4e:3b:f8
|
||||
bc:d5:ab:d9:6d:c2:4a:cc:01:1a:53:4d:46:50:49:3b
|
||||
75:
|
||||
|
||||
coefficient:
|
||||
63:67:50:29:10:6a:85:a3:dc:51:90:20:76:86:8c:83
|
||||
8e:d5:ff:aa:75:fd:b5:f8:31:b0:96:6c:18:1d:5b:ed
|
||||
a4:2e:47:8d:9c:c2:1e:2c:a8:6d:4b:10:a5:c2:53:46
|
||||
8a:9a:84:91:d7:fc:f5:cc:03:ce:b9:3d:5c:01:d2:27
|
||||
99:7b:79:89:4f:a1:12:e3:05:5d:ee:10:f6:8c:e6:ce
|
||||
5e:da:32:56:6d:6f:eb:32:b4:75:7b:94:49:d8:2d:9e
|
||||
4d:19:59:2e:e4:0b:bc:95:df:df:65:67:a1:dd:c6:2b
|
||||
99:f4:76:e8:9f:fa:57:1d:ca:f9:58:a9:ce:9b:30:5c
|
||||
42:8a:ba:05:e7:e2:15:45:25:bc:e9:68:c1:8b:1a:37
|
||||
cc:e1:aa:45:2e:94:f5:81:47:1e:64:7f:c0:c1:b7:a8
|
||||
21:58:18:a9:a0:ed:e0:27:75:bf:65:81:6b:e4:1d:5a
|
||||
b7:7e:df:d8:28:c6:36:21:19:c8:6e:da:ca:9e:da:84
|
||||
|
||||
|
||||
exp1:
|
||||
00:ba:d7:fe:77:a9:0d:98:2c:49:56:57:c0:5e:e2:20
|
||||
ba:f6:1f:26:03:bc:d0:5d:08:9b:45:16:61:c4:ab:e2
|
||||
22:b1:dc:92:17:a6:3d:28:26:a4:22:1e:a8:7b:ff:86
|
||||
05:33:5d:74:9c:85:0d:cb:2d:ab:b8:9b:6b:7c:28:57
|
||||
c8:da:92:ca:59:17:6b:21:07:05:34:78:37:fb:3e:ea
|
||||
a2:13:12:04:23:7e:fa:ee:ed:cf:e0:c5:a9:fb:ff:0a
|
||||
2b:1b:21:9c:02:d7:b8:8c:ba:60:70:59:fc:8f:14:f4
|
||||
f2:5a:d9:ad:b2:61:7d:2c:56:8e:5f:98:b1:89:f8:2d
|
||||
10:1c:a5:84:ad:28:b4:aa:92:34:a3:34:04:e1:a3:84
|
||||
52:16:1a:52:e3:8a:38:2d:99:8a:cd:91:90:87:12:ca
|
||||
fc:ab:e6:08:14:03:00:6f:41:88:e4:da:9d:7c:fd:8c
|
||||
7c:c4:de:cb:ed:1d:3f:29:d0:7a:6b:76:df:71:ae:32
|
||||
bd:
|
||||
|
||||
exp2:
|
||||
4a:e9:d3:6c:ea:b4:64:0e:c9:3c:8b:c9:f5:a8:a8:b2
|
||||
6a:f6:d0:95:fe:78:32:7f:ea:c4:ce:66:9f:c7:32:55
|
||||
b1:34:7c:03:18:17:8b:73:23:2e:30:bc:4a:07:03:de
|
||||
8b:91:7a:e4:55:21:b7:4d:c6:33:f8:e8:06:d5:99:94
|
||||
55:43:81:26:b9:93:1e:7a:6b:32:54:2d:fd:f9:1d:bd
|
||||
77:4e:82:c4:33:72:87:06:a5:ef:5b:75:e1:38:7a:6b
|
||||
2c:b7:00:19:3c:64:3e:1d:ca:a4:34:f7:db:47:64:d6
|
||||
fa:86:58:15:ea:d1:2d:22:dc:d9:30:4d:b3:02:ab:91
|
||||
83:03:b2:17:98:6f:60:e6:f7:44:8f:4a:ba:81:a2:bf
|
||||
0b:4a:cc:9c:b9:a2:44:52:d0:65:3f:b6:97:5f:d9:d8
|
||||
9c:49:bb:d1:46:bd:10:b2:42:71:a8:85:e5:8b:99:e6
|
||||
1b:00:93:5d:76:ab:32:6c:a8:39:17:53:9c:38:4d:91
|
||||
|
||||
|
||||
|
||||
Public Key PIN:
|
||||
pin-sha256:ISh/UeFjUG5Gwrpx6hMUGQPvg9wOKjOkHmRbs4YjZqs=
|
||||
Public Key ID:
|
||||
sha256:21287f51e163506e46c2ba71ea13141903ef83dc0e2a33a41e645bb3862366ab
|
||||
sha1:1a48455111ac45fb5807c5cdb7b20b896c52f0b6
|
||||
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIG4wIBAAKCAYEAvpK+394Kqzj8GsAaWE2GuB8lEH0ZBRe/Aj3p7/jABF1vmN5c
|
||||
3cMP4mFh5LWcQqw+r/0wEOFUMmZ19oCQhQWgahSib6cu8PNSlCryNPwNtPsoXRwR
|
||||
XFluYzS6s/1zsUg1AIRT2mqbhKtksaErOtFa1xN8EipOcumW1jB0xXEFFEstAZQj
|
||||
Z043PB7BoLw0BCUhEftLa1N0j5CTV69/O3jWpIf+fe0gEYtwVGe4yfXAa95O56V5
|
||||
//etzxBX9VFwe1RoKJ65whB7q6oRR5/s5i8JREqIW92MELTEAyUG2eCfoA3PlEs7
|
||||
+qUXLORnxBdqq9jIehZBuZG3nK6MlL4mYVFxwaY5OZd1KKkOIerwvXFKjOH4Haki
|
||||
LxCoG+Wkmv0P+sYgvJaZeca6pB8+1JHFr7txClrvaZxkac5a/j/CJPQm1D2rq5rw
|
||||
9vGxZKn04jRqqy6VR7kHWjnGlZyp6O1x3cEhFsgtTCyvBp3G+v7FKmy0w9WW/F79
|
||||
7BwwtJ3LKe+oUBwhAgMBAAECggGAJTfFfTUBAmVznsnLm1kwqT6z319/BmaX0BlF
|
||||
Wa9L2M5ioAk1O73/mSeJlb/+D2tSJs6cl39aESm/ee+ryb7KkE0NWB7fZQEwLG2i
|
||||
tcRP7Ptr65syrMVucIN4vvQNpx7B8yLkuXA+hQ9v79zY83i1c/GDNoz6myiRY608
|
||||
8N5crpTr6jYDIAa/dMdQ61I2GmUh60AXf5Nh3TPQArzsbTHxQVqp0fAAZkzEGEfV
|
||||
Z+PNu4NEB6tigyHc2OaJNwi7nepiwl3OhcLcSCcMpCNhtzDnJkTcHlwuFjUrLqbm
|
||||
pM4fm+n+lvpJHfsq37y/RtpS+DeKhKvkc+ZGVrW0PeFj6wKO12eWxNwobWu2DKML
|
||||
24cprfnsc7ZVo0AyE4THLzN0BNxCABGc+/xiNbOCwzwogOgJqJfHwS49J/pPm/zC
|
||||
NFhBXKHicC4vgq29vY7dIxIl3olwYHVIkICsVXRRb0mef2NBizyx9cNrS1pQpk04
|
||||
6ILCBMgw/QabwQQntmM6XvVNAMPRAoHBAPYALn2JYSQWXofKGGwDuLQz30qnf9vt
|
||||
ORVBEmFPTrTeqynZDGwBflMu7udfouRtxksHTtijrkUGl70Yo+ndKVRkbfCvCJWu
|
||||
rj5xY3YqoRjEsfy8PUIVdLPFOB9dkvGyxj8Q/jUaxrHOcDj/CFzeYXnHUJEiTenI
|
||||
GEniXJGEhuJND26bDYHfqvNZdVbpMxjdqzna4iUBBaFuI1kVLIk1x66cx+qImhrz
|
||||
SAcRgll5jGJTBjcwFLOCsVD8rrj3HFdEfQKBwQDGUczciC7PmJAQGeDTpNE/3LAp
|
||||
07sm7usAFxfR0bubNLFOr7VsHFRTtLtV2vd4zTi0LjqMY4A7ZJy0K83dUAsF0gB6
|
||||
347D5ingnNhAtxEJ9Djf9u2THhjUk/qN7oKcD8GIJoSdT66KF9VVVEzGCqxN7DNR
|
||||
aA9Lki4EV/4V9QBGXI6tCSzn39U2ek692iEi11i0cpOUrzTM4rjQTwtdlwgSGRc0
|
||||
xRVJAEhWE7hFTjv4vNWr2W3CSswBGlNNRlBJO3UCgcEAutf+d6kNmCxJVlfAXuIg
|
||||
uvYfJgO80F0Im0UWYcSr4iKx3JIXpj0oJqQiHqh7/4YFM110nIUNyy2ruJtrfChX
|
||||
yNqSylkXayEHBTR4N/s+6qITEgQjfvru7c/gxan7/worGyGcAte4jLpgcFn8jxT0
|
||||
8lrZrbJhfSxWjl+YsYn4LRAcpYStKLSqkjSjNATho4RSFhpS44o4LZmKzZGQhxLK
|
||||
/KvmCBQDAG9BiOTanXz9jHzE3svtHT8p0Hprdt9xrjK9AoHASunTbOq0ZA7JPIvJ
|
||||
9aiosmr20JX+eDJ/6sTOZp/HMlWxNHwDGBeLcyMuMLxKBwPei5F65FUht03GM/jo
|
||||
BtWZlFVDgSa5kx56azJULf35Hb13ToLEM3KHBqXvW3XhOHprLLcAGTxkPh3KpDT3
|
||||
20dk1vqGWBXq0S0i3NkwTbMCq5GDA7IXmG9g5vdEj0q6gaK/C0rMnLmiRFLQZT+2
|
||||
l1/Z2JxJu9FGvRCyQnGoheWLmeYbAJNddqsybKg5F1OcOE2RAoHAY2dQKRBqhaPc
|
||||
UZAgdoaMg47V/6p1/bX4MbCWbBgdW+2kLkeNnMIeLKhtSxClwlNGipqEkdf89cwD
|
||||
zrk9XAHSJ5l7eYlPoRLjBV3uEPaM5s5e2jJWbW/rMrR1e5RJ2C2eTRlZLuQLvJXf
|
||||
32Vnod3GK5n0duif+lcdyvlYqc6bMFxCiroF5+IVRSW86WjBixo3zOGqRS6U9YFH
|
||||
HmR/wMG3qCFYGKmg7eAndb9lgWvkHVq3ft/YKMY2IRnIbtrKntqE
|
||||
-----END RSA PRIVATE KEY-----
|
||||
169
@vates/nbd-client/tests/nbdclient.integ.js
Normal file
169
@vates/nbd-client/tests/nbdclient.integ.js
Normal file
@@ -0,0 +1,169 @@
|
||||
'use strict'
|
||||
const NbdClient = require('../index.js')
|
||||
const { spawn, exec } = require('node:child_process')
|
||||
const fs = require('node:fs/promises')
|
||||
const { test } = require('tap')
|
||||
const tmp = require('tmp')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const { Socket } = require('node:net')
|
||||
const { NBD_DEFAULT_PORT } = require('../constants.js')
|
||||
const assert = require('node:assert')
|
||||
|
||||
const FILE_SIZE = 10 * 1024 * 1024
|
||||
|
||||
async function createTempFile(size) {
|
||||
const tmpPath = await pFromCallback(cb => tmp.file(cb))
|
||||
const data = Buffer.alloc(size, 0)
|
||||
for (let i = 0; i < size; i += 4) {
|
||||
data.writeUInt32BE(i, i)
|
||||
}
|
||||
await fs.writeFile(tmpPath, data)
|
||||
|
||||
return tmpPath
|
||||
}
|
||||
|
||||
async function spawnNbdKit(path) {
|
||||
let tries = 5
|
||||
// wait for server to be ready
|
||||
|
||||
const nbdServer = spawn(
|
||||
'nbdkit',
|
||||
[
|
||||
'file',
|
||||
path,
|
||||
'--newstyle', //
|
||||
'--exit-with-parent',
|
||||
'--read-only',
|
||||
'--export-name=MY_SECRET_EXPORT',
|
||||
'--tls=on',
|
||||
'--tls-certificates=./tests/',
|
||||
// '--tls-verify-peer',
|
||||
// '--verbose',
|
||||
'--exit-with-parent',
|
||||
],
|
||||
{
|
||||
stdio: ['inherit', 'inherit', 'inherit'],
|
||||
}
|
||||
)
|
||||
nbdServer.on('error', err => {
|
||||
console.error(err)
|
||||
})
|
||||
do {
|
||||
try {
|
||||
const socket = new Socket()
|
||||
await new Promise((resolve, reject) => {
|
||||
socket.connect(NBD_DEFAULT_PORT, 'localhost')
|
||||
socket.once('error', reject)
|
||||
socket.once('connect', resolve)
|
||||
})
|
||||
socket.destroy()
|
||||
break
|
||||
} catch (err) {
|
||||
tries--
|
||||
if (tries <= 0) {
|
||||
throw err
|
||||
} else {
|
||||
await new Promise(resolve => setTimeout(resolve, 1000))
|
||||
}
|
||||
}
|
||||
} while (true)
|
||||
return nbdServer
|
||||
}
|
||||
|
||||
async function killNbdKit() {
|
||||
return new Promise((resolve, reject) =>
|
||||
exec('pkill -9 -f -o nbdkit', err => {
|
||||
err ? reject(err) : resolve()
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
test('it works with unsecured network', async tap => {
|
||||
const path = await createTempFile(FILE_SIZE)
|
||||
|
||||
let nbdServer = await spawnNbdKit(path)
|
||||
const client = new NbdClient(
|
||||
{
|
||||
address: '127.0.0.1',
|
||||
exportname: 'MY_SECRET_EXPORT',
|
||||
cert: `-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
`,
|
||||
},
|
||||
{
|
||||
readAhead: 2,
|
||||
}
|
||||
)
|
||||
|
||||
await client.connect()
|
||||
tap.equal(client.exportSize, BigInt(FILE_SIZE))
|
||||
const CHUNK_SIZE = 1024 * 1024 // non default size
|
||||
const indexes = []
|
||||
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
|
||||
indexes.push(i)
|
||||
}
|
||||
const nbdIterator = client.readBlocks(function* () {
|
||||
for (const index of indexes) {
|
||||
yield { index, size: CHUNK_SIZE }
|
||||
}
|
||||
})
|
||||
let i = 0
|
||||
for await (const block of nbdIterator) {
|
||||
let blockOk = true
|
||||
let firstFail
|
||||
for (let j = 0; j < CHUNK_SIZE; j += 4) {
|
||||
const wanted = i * CHUNK_SIZE + j
|
||||
const found = block.readUInt32BE(j)
|
||||
blockOk = blockOk && found === wanted
|
||||
if (!blockOk && firstFail === undefined) {
|
||||
firstFail = j
|
||||
}
|
||||
}
|
||||
tap.ok(blockOk, `check block ${i} content`)
|
||||
i++
|
||||
|
||||
// flaky server is flaky
|
||||
if (i % 7 === 0) {
|
||||
// kill the older nbdkit process
|
||||
await killNbdKit()
|
||||
nbdServer = await spawnNbdKit(path)
|
||||
}
|
||||
}
|
||||
|
||||
// we can reuse the conneciton to read other blocks
|
||||
// default iterator
|
||||
const nbdIteratorWithDefaultBlockIterator = client.readBlocks()
|
||||
let nb = 0
|
||||
for await (const block of nbdIteratorWithDefaultBlockIterator) {
|
||||
nb++
|
||||
tap.equal(block.length, 2 * 1024 * 1024)
|
||||
}
|
||||
|
||||
tap.equal(nb, 5)
|
||||
assert.rejects(() => client.readBlock(100, CHUNK_SIZE))
|
||||
|
||||
await client.disconnect()
|
||||
// double disconnection shouldn't pose any problem
|
||||
await client.disconnect()
|
||||
nbdServer.kill()
|
||||
await fs.unlink(path)
|
||||
})
|
||||
21
@vates/nbd-client/tests/server-cert.pem
Normal file
21
@vates/nbd-client/tests/server-cert.pem
Normal file
@@ -0,0 +1,21 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUeHpQ0IeD6BmP2zgsv3LV3J4BI/EwDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA1MTcxMzU1MzBaFw0yNDA1
|
||||
MTYxMzU1MzBaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQC/8wLopj/iZY6ijmpvgCJsl+zY0hQZQcIoaCs0H75u
|
||||
8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZolevaSJLNT2Iolscvc2W9NCF4N1V6y
|
||||
zs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh67u+uI40732AfQqD01BNCTD/uHRB
|
||||
lKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y2SJVTeT4a1sSJixl6I1YPmt80FJh
|
||||
gq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULwdJOGgmqGRDzgZKJS5UUpxe/ViEO4
|
||||
59I18vIkgibaRYhENgmnP3lIzTOLlUe07tbSML5RGBbBAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBR/8+zYoL0H0LdWfULHg1LynFdSbzAfBgNVHSMEGDAWgBR/8+zYoL0H0LdW
|
||||
fULHg1LynFdSbzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQBD
|
||||
OF5bTmbDEGoZ6OuQaI0vyya/T4FeaoWmh22gLeL6dEEmUVGJ1NyMTOvG9GiGJ8OM
|
||||
QhD1uHJei45/bXOYIDGey2+LwLWye7T4vtRFhf8amYh0ReyP/NV4/JoR/U3pTSH6
|
||||
tns7GZ4YWdwUhvOOlm17EQKVO/hP3t9mp74gcjdL4bCe5MYSheKuNACAakC1OR0U
|
||||
ZakJMP9ijvQuq8spfCzrK+NbHKNHR9tEgQw+ax/t1Au4dGVtFbcoxqCrx2kTl0RP
|
||||
CYu1Xn/FVPx1HoRgWc7E8wFhDcA/P3SJtfIQWHB9FzSaBflKGR4t8WCE2eE8+cTB
|
||||
57ABhfYpMlZ4aHjuN1bL
|
||||
-----END CERTIFICATE-----
|
||||
28
@vates/nbd-client/tests/server-key.pem
Normal file
28
@vates/nbd-client/tests/server-key.pem
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/8wLopj/iZY6i
|
||||
jmpvgCJsl+zY0hQZQcIoaCs0H75u8PPSzHedtOLURAkJeMmIS40UY/eIvHh7yZol
|
||||
evaSJLNT2Iolscvc2W9NCF4N1V6yzs4pDzP+YPF7Q8ldNaQIX0bAk4PfaMSM+pLh
|
||||
67u+uI40732AfQqD01BNCTD/uHRBlKnQuqQpe9UM9UzRRVejpu1r19D4dJruAm6y
|
||||
2SJVTeT4a1sSJixl6I1YPmt80FJhgq9O2KRGbXp1xIjemWgW99MHg63pTgxEiULw
|
||||
dJOGgmqGRDzgZKJS5UUpxe/ViEO459I18vIkgibaRYhENgmnP3lIzTOLlUe07tbS
|
||||
ML5RGBbBAgMBAAECggEATLYiafcTHfgnZmjTOad0WoDnC4n9tVBV948WARlUooLS
|
||||
duL3RQRHCLz9/ZaTuFA1XDpNcYyc/B/IZoU7aJGZR3+JSmJBjowpUphu+klVNNG4
|
||||
i6lDRrzYlUI0hfdLjHsDTDBIKi91KcB0lix/VkvsrVQvDHwsiR2ZAIiVWAWQFKrR
|
||||
5O3DhSTHbqyq47uR58rWr4Zf3zvZaUl841AS1yELzCiZqz7AenvyWphim0c0XA5d
|
||||
I63CEShntHnEAA9OMcP8+BNf/3AmqB4welY+m8elB3aJNH+j7DKq/AWqaM5nl2PC
|
||||
cS6qgpxwOyTxEOyj1xhwK5ZMRR3heW3NfutIxSOPlwKBgQDB9ZkrBeeGVtCISO7C
|
||||
eCANzSLpeVrahTvaCSQLdPHsLRLDUc+5mxdpi3CaRlzYs3S1OWdAtyWX9mBryltF
|
||||
qDPhCNjFDyHok4D3wLEWdS9oUVwEKUM8fOPW3tXLLiMM7p4862Qo7LqnqHzPqsnz
|
||||
22iZo5yjcc7aLJ+VmFrbAowwOwKBgQD9WNCvczTd7Ymn7zEvdiAyNoS0OZ0orwEJ
|
||||
zGaxtjqVguGklNfrb/UB+eKNGE80+YnMiSaFc9IQPetLntZdV0L7kWYdCI8kGDNA
|
||||
DbVRCOp+z8DwAojlrb/zsYu23anQozT3WeHxVU66lNuyEQvSW2tJa8gN1htrD7uY
|
||||
5KLibYrBMwKBgEM0iiHyJcrSgeb2/mO7o7+keJhVSDm3OInP6QFfQAQJihrLWiKB
|
||||
rpcPjbCm+LzNUX8JqNEvpIMHB1nR/9Ye9frfSdzd5W3kzicKSVHywL5wkmWOtpFa
|
||||
5Mcq5wFDtzlf5MxO86GKhRJauwRptRgdyhySKFApuva1x4XaCIEiXNjJAoGBAN82
|
||||
t3c+HCBEv3o05rMYcrmLC1T3Rh6oQlPtwbVmByvfywsFEVCgrc/16MPD3VWhXuXV
|
||||
GRmPuE8THxLbead30M5xhvShq+xzXgRbj5s8Lc9ZIHbW5OLoOS1vCtgtaQcoJOyi
|
||||
Rs4pCVqe+QpktnO6lEZ2Libys+maTQEiwNibBxu9AoGAUG1V5aKMoXa7pmGeuFR6
|
||||
ES+1NDiCt6yDq9BsLZ+e2uqvWTkvTGLLwvH6xf9a0pnnILd0AUTKAAaoUdZS6++E
|
||||
cGob7fxMwEE+UETp0QBgLtfjtExMOFwr2avw8PV4CYEUkPUAm2OFB2Twh+d/PNfr
|
||||
FAxF1rN47SBPNbFI8N4TFsg=
|
||||
-----END PRIVATE KEY-----
|
||||
@@ -24,3 +24,25 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
@@ -43,6 +43,28 @@ import { readChunkStrict } from '@vates/read-chunk'
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
### `skip(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
import { skip } from '@vates/read-chunk'
|
||||
|
||||
const bytesSkipped = await skip(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `skipStrict(stream, size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
import { skipStrict } from '@vates/read-chunk'
|
||||
|
||||
await skipStrict(stream, 2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -1,11 +1,36 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, `null` will be returned *unless* the stream has ended, in which case all of the data remaining will be returned.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown|null>} - A Promise that resolves to the read chunk if available, or null if end of stream is reached.
|
||||
*/
|
||||
const readChunk = (stream, size) =>
|
||||
stream.closed || stream.readableEnded
|
||||
stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: stream.closed || stream.readableEnded
|
||||
? Promise.resolve(null)
|
||||
: size === 0
|
||||
? Promise.resolve(Buffer.alloc(0))
|
||||
: new Promise((resolve, reject) => {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
|
||||
// per Node documentation:
|
||||
// > The size argument must be less than or equal to 1 GiB.
|
||||
assert(size < 1073741824)
|
||||
}
|
||||
|
||||
function onEnd() {
|
||||
resolve(null)
|
||||
removeListeners()
|
||||
@@ -33,6 +58,21 @@ const readChunk = (stream, size) =>
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
/**
|
||||
* Read a chunk of data from a stream.
|
||||
*
|
||||
* The returned promise is rejected if there is an error while reading the stream.
|
||||
*
|
||||
* For streams in object mode, the returned promise resolves to a single object read from the stream.
|
||||
*
|
||||
* For streams in binary mode, the returned promise resolves to a Buffer or a string if an encoding has been specified using the `stream.setEncoding()` method.
|
||||
*
|
||||
* If `size` bytes are not available to be read, the returned promise is rejected.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to read from.
|
||||
* @param {number} [size] - The number of bytes to read for binary streams (ignored for object streams).
|
||||
* @returns {Promise<Buffer|string|unknown>} - A Promise that resolves to the read chunk.
|
||||
*/
|
||||
exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
const chunk = await readChunk(stream, size)
|
||||
if (chunk === null) {
|
||||
@@ -40,7 +80,7 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error('stream has ended with not enough data')
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
@@ -51,3 +91,69 @@ exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a readable stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<number>} A Promise that resolves to the number of bytes actually skipped. If the end of the stream is reached before all bytes are skipped, the Promise resolves to the number of bytes that were skipped before the end of the stream was reached. The Promise is rejected if there is an error while reading from the stream.
|
||||
*/
|
||||
async function skip(stream, size) {
|
||||
return stream.errored != null
|
||||
? Promise.reject(stream.errored)
|
||||
: size === 0 || stream.closed || stream.readableEnded
|
||||
? Promise.resolve(0)
|
||||
: new Promise((resolve, reject) => {
|
||||
let left = size
|
||||
function onEnd() {
|
||||
resolve(size - left)
|
||||
removeListeners()
|
||||
}
|
||||
function onError(error) {
|
||||
reject(error)
|
||||
removeListeners()
|
||||
}
|
||||
function onReadable() {
|
||||
const data = stream.read()
|
||||
left -= data === null ? 0 : data.length
|
||||
if (left > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (left < 0) {
|
||||
stream.unshift(data.slice(left))
|
||||
}
|
||||
|
||||
resolve(size)
|
||||
removeListeners()
|
||||
}
|
||||
}
|
||||
function removeListeners() {
|
||||
stream.removeListener('end', onEnd)
|
||||
stream.removeListener('error', onError)
|
||||
stream.removeListener('readable', onReadable)
|
||||
}
|
||||
stream.on('end', onEnd)
|
||||
stream.on('error', onError)
|
||||
stream.on('readable', onReadable)
|
||||
onReadable()
|
||||
})
|
||||
}
|
||||
exports.skip = skip
|
||||
|
||||
/**
|
||||
* Skips a given number of bytes from a stream.
|
||||
*
|
||||
* @param {Readable} stream - A readable stream to skip bytes from.
|
||||
* @param {number} size - The number of bytes to skip.
|
||||
* @returns {Promise<void>} - A Promise that resolves when the exact number of bytes have been skipped. The Promise is rejected if there is an error while reading from the stream or the stream ends before the exact number of bytes have been skipped.
|
||||
*/
|
||||
exports.skipStrict = async function skipStrict(stream, size) {
|
||||
const bytesSkipped = await skip(stream, size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,12 +5,58 @@ const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk, readChunkStrict } = require('./')
|
||||
const { readChunk, readChunkStrict, skip, skipStrict } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = fn => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(fn(stream, 10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
// only supported for Node >= 18
|
||||
if (process.versions.node.split('.')[0] >= 18) {
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(fn(stream, 10)), error)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
describe('readChunk', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('rejects if size is greater than or equal to 1 GiB', async () => {
|
||||
const error = await rejectionOf(readChunk(makeStream([]), 1024 * 1024 * 1024))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
makeErrorTests(readChunk)
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await readChunk(makeStream([])), null)
|
||||
})
|
||||
@@ -38,10 +84,6 @@ describe('readChunk', () => {
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
|
||||
})
|
||||
|
||||
it('returns an empty buffer if the specified size is 0', async () => {
|
||||
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 0), Buffer.alloc(0))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
@@ -52,14 +94,6 @@ describe('readChunk', () => {
|
||||
})
|
||||
})
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
@@ -71,7 +105,43 @@ describe('readChunkStrict', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data')
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip', function () {
|
||||
makeErrorTests(skip)
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await skip(makeStream(['foo']), 0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const stream = await makeStream([])
|
||||
await readChunk(stream)
|
||||
|
||||
assert.strictEqual(await skip(stream, 10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const stream = makeStream('foo bar')
|
||||
|
||||
assert.strictEqual(await skip(stream, 4), 4)
|
||||
assert.deepEqual(await readChunk(stream, 4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await skip(makeStream('foo bar'), 10), 7)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(skipStrict(makeStream('foo bar'), 10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "1.0.1",
|
||||
"version": "1.1.1",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
42
@vates/stream-reader/.USAGE.md
Normal file
42
@vates/stream-reader/.USAGE.md
Normal file
@@ -0,0 +1,42 @@
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
1
@vates/stream-reader/.npmignore
Symbolic link
1
@vates/stream-reader/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
75
@vates/stream-reader/README.md
Normal file
75
@vates/stream-reader/README.md
Normal file
@@ -0,0 +1,75 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/stream-reader
|
||||
|
||||
[](https://npmjs.org/package/@vates/stream-reader)  [](https://bundlephobia.com/result?p=@vates/stream-reader) [](https://npmjs.org/package/@vates/stream-reader)
|
||||
|
||||
> Efficiently reads and skips chunks of a given size in a stream
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/stream-reader):
|
||||
|
||||
```sh
|
||||
npm install --save @vates/stream-reader
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
import StreamReader from '@vates/stream-reader'
|
||||
|
||||
const reader = new StreamReader(stream)
|
||||
```
|
||||
|
||||
### `.read([size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
const chunk = await reader.read(512)
|
||||
```
|
||||
|
||||
### `.readStrict([size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
const chunk = await reader.readStrict(512)
|
||||
```
|
||||
|
||||
### `.skip(size)`
|
||||
|
||||
Skips a given number of bytes from a stream.
|
||||
|
||||
Returns the number of bytes actually skipped, which may be less than the requested size if the stream has ended.
|
||||
|
||||
```js
|
||||
const bytesSkipped = await reader.skip(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
### `.skipStrict(size)`
|
||||
|
||||
Skips a given number of bytes from a stream and throws if the stream ended before enough stream has been skipped.
|
||||
|
||||
```js
|
||||
await reader.skipStrict(2 * 1024 * 1024 * 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
123
@vates/stream-reader/index.js
Normal file
123
@vates/stream-reader/index.js
Normal file
@@ -0,0 +1,123 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert')
|
||||
const { finished, Readable } = require('node:stream')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// Inspired by https://github.com/nodejs/node/blob/85705a47958c9ae5dbaa1f57456db19bdefdc494/lib/internal/streams/readable.js#L1107
|
||||
class StreamReader {
|
||||
#ended = false
|
||||
#error
|
||||
#executor = resolve => {
|
||||
this.#resolve = resolve
|
||||
}
|
||||
#stream
|
||||
#resolve = noop
|
||||
|
||||
constructor(stream) {
|
||||
stream = typeof stream.pipe === 'function' ? stream : Readable.from(stream)
|
||||
|
||||
this.#stream = stream
|
||||
|
||||
stream.on('readable', () => this.#resolve())
|
||||
|
||||
finished(stream, { writable: false }, error => {
|
||||
this.#error = error
|
||||
this.#ended = true
|
||||
this.#resolve()
|
||||
})
|
||||
}
|
||||
|
||||
async read(size) {
|
||||
if (size !== undefined) {
|
||||
assert(size > 0)
|
||||
}
|
||||
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const value = this.#stream.read(size)
|
||||
if (value !== null) {
|
||||
return value
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async readStrict(size) {
|
||||
const chunk = await this.read(size)
|
||||
if (chunk === null) {
|
||||
throw new Error('stream has ended without data')
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${chunk.length}, expected: ${size})`)
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
async skip(size) {
|
||||
if (size === 0) {
|
||||
return size
|
||||
}
|
||||
|
||||
let toSkip = size
|
||||
do {
|
||||
if (this.#ended) {
|
||||
if (this.#error) {
|
||||
throw this.#error
|
||||
}
|
||||
return size - toSkip
|
||||
}
|
||||
|
||||
const data = this.#stream.read()
|
||||
if (data !== null) {
|
||||
toSkip -= data === null ? 0 : data.length
|
||||
if (toSkip > 0) {
|
||||
// continue to read
|
||||
} else {
|
||||
// if more than wanted has been read, push back the rest
|
||||
if (toSkip < 0) {
|
||||
this.#stream.unshift(data.slice(toSkip))
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
}
|
||||
|
||||
await new Promise(this.#executor)
|
||||
} while (true)
|
||||
}
|
||||
|
||||
async skipStrict(size) {
|
||||
const bytesSkipped = await this.skip(size)
|
||||
if (bytesSkipped !== size) {
|
||||
const error = new Error(`stream has ended with not enough data (actual: ${bytesSkipped}, expected: ${size})`)
|
||||
error.bytesSkipped = bytesSkipped
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StreamReader.prototype[Symbol.asyncIterator] = async function* asyncIterator() {
|
||||
let chunk
|
||||
while ((chunk = await this.read()) !== null) {
|
||||
yield chunk
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = StreamReader
|
||||
141
@vates/stream-reader/index.test.js
Normal file
141
@vates/stream-reader/index.test.js
Normal file
@@ -0,0 +1,141 @@
|
||||
'use strict'
|
||||
|
||||
const { describe, it } = require('test')
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const StreamReader = require('./index.js')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
const makeErrorTests = method => {
|
||||
it('rejects if the stream errors', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
const pError = rejectionOf(new StreamReader(stream)[method](10))
|
||||
stream.destroy(error)
|
||||
|
||||
assert.strict(await pError, error)
|
||||
})
|
||||
|
||||
it('rejects if the stream has already errored', async () => {
|
||||
const error = new Error()
|
||||
const stream = makeStream([])
|
||||
|
||||
await new Promise(resolve => {
|
||||
stream.once('error', resolve).destroy(error)
|
||||
})
|
||||
|
||||
assert.strict(await rejectionOf(new StreamReader(stream)[method](10)), error)
|
||||
})
|
||||
}
|
||||
|
||||
describe('read()', () => {
|
||||
it('rejects if size is less than or equal to 0', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).read(0))
|
||||
assert.strictEqual(error.code, 'ERR_ASSERTION')
|
||||
})
|
||||
|
||||
it('returns null if stream is empty', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream([])).read(), null)
|
||||
})
|
||||
|
||||
makeErrorTests('read')
|
||||
|
||||
it('returns null if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.read(), null)
|
||||
})
|
||||
|
||||
describe('with binary stream', () => {
|
||||
it('returns the first chunk of data', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(), Buffer.from('foo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (smaller than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(2), Buffer.from('fo'))
|
||||
})
|
||||
|
||||
it('returns a chunk of the specified size (larger than first)', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(4), Buffer.from('foob'))
|
||||
})
|
||||
|
||||
it('returns less data if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream(['foo', 'bar'])).read(10), Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('with object stream', () => {
|
||||
it('returns the first chunk of data verbatim', async () => {
|
||||
const chunks = [{}, {}]
|
||||
assert.strictEqual(await new StreamReader(makeStream.obj(chunks)).read(), chunks[0])
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('readStrict()', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream([])).readStrict())
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended without data')
|
||||
assert.strictEqual(error.chunk, undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream(['foo', 'bar'])).readStrict(10))
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 6, expected: 10)')
|
||||
assert.deepEqual(error.chunk, Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
describe('skip()', function () {
|
||||
makeErrorTests('skip')
|
||||
|
||||
it('returns 0 if size is 0', async () => {
|
||||
assert.strictEqual(await new StreamReader(makeStream(['foo'])).skip(0), 0)
|
||||
})
|
||||
|
||||
it('returns 0 if the stream is already ended', async () => {
|
||||
const reader = new StreamReader(makeStream([]))
|
||||
|
||||
await reader.read()
|
||||
|
||||
assert.strictEqual(await reader.skip(10), 0)
|
||||
})
|
||||
|
||||
it('skips a number of bytes', async () => {
|
||||
const reader = new StreamReader(makeStream('foo bar'))
|
||||
|
||||
assert.strictEqual(await reader.skip(4), 4)
|
||||
assert.deepEqual(await reader.read(4), Buffer.from('bar'))
|
||||
})
|
||||
|
||||
it('returns less size if stream ends', async () => {
|
||||
assert.deepEqual(await new StreamReader(makeStream('foo bar')).skip(10), 7)
|
||||
})
|
||||
})
|
||||
|
||||
describe('skipStrict()', function () {
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(new StreamReader(makeStream('foo bar')).skipStrict(10))
|
||||
|
||||
assert(error instanceof Error)
|
||||
assert.strictEqual(error.message, 'stream has ended with not enough data (actual: 7, expected: 10)')
|
||||
assert.deepEqual(error.bytesSkipped, 7)
|
||||
})
|
||||
})
|
||||
39
@vates/stream-reader/package.json
Normal file
39
@vates/stream-reader/package.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@vates/stream-reader",
|
||||
"description": "Efficiently reads and skips chunks of a given size in a stream",
|
||||
"keywords": [
|
||||
"async",
|
||||
"chunk",
|
||||
"data",
|
||||
"node",
|
||||
"promise",
|
||||
"read",
|
||||
"reader",
|
||||
"skip",
|
||||
"stream"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/stream-reader",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/stream-reader",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,10 @@
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
name: 'my task',
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
@@ -11,13 +14,15 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -25,8 +30,18 @@ const task = new Task({
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
task.status
|
||||
await task.abort()
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
@@ -34,7 +49,11 @@ const result = await task.runInside(fn)
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
@@ -52,3 +71,67 @@ Task.warning(message, data)
|
||||
// - progress
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
|
||||
@@ -18,7 +18,10 @@ npm install --save @vates/task
|
||||
import { Task } from '@vates/task'
|
||||
|
||||
const task = new Task({
|
||||
name: 'my task',
|
||||
// this object will be sent in the *start* event
|
||||
properties: {
|
||||
name: 'my task',
|
||||
},
|
||||
|
||||
// if defined, a new detached task is created
|
||||
//
|
||||
@@ -27,13 +30,15 @@ const task = new Task({
|
||||
// this function is called each time this task or one of it's subtasks change state
|
||||
const { id, timestamp, type } = event
|
||||
if (type === 'start') {
|
||||
const { name, parentId } = event
|
||||
const { name, parentId, properties } = event
|
||||
} else if (type === 'end') {
|
||||
const { result, status } = event
|
||||
} else if (type === 'info' || type === 'warning') {
|
||||
const { data, message } = event
|
||||
} else if (type === 'property') {
|
||||
const { name, value } = event
|
||||
} else if (type === 'abortionRequested') {
|
||||
const { reason } = event
|
||||
}
|
||||
},
|
||||
})
|
||||
@@ -41,8 +46,18 @@ const task = new Task({
|
||||
// this field is settable once before being observed
|
||||
task.id
|
||||
|
||||
// contains the current status of the task
|
||||
//
|
||||
// possible statuses are:
|
||||
// - pending
|
||||
// - success
|
||||
// - failure
|
||||
task.status
|
||||
await task.abort()
|
||||
|
||||
// Triggers the abort signal associated to the task.
|
||||
//
|
||||
// This simply requests the task to abort, it will be up to the task to handle or not this signal.
|
||||
task.abort(reason)
|
||||
|
||||
// if fn rejects, the task will be marked as failed
|
||||
const result = await task.runInside(fn)
|
||||
@@ -50,7 +65,11 @@ const result = await task.runInside(fn)
|
||||
// if fn rejects, the task will be marked as failed
|
||||
// if fn resolves, the task will be marked as succeeded
|
||||
const result = await task.run(fn)
|
||||
```
|
||||
|
||||
Inside a task:
|
||||
|
||||
```js
|
||||
// the abort signal of the current task if any, otherwise is `undefined`
|
||||
Task.abortSignal
|
||||
|
||||
@@ -69,6 +88,70 @@ Task.warning(message, data)
|
||||
Task.set(property, value)
|
||||
```
|
||||
|
||||
### `combineEvents`
|
||||
|
||||
Create a consolidated log from individual events.
|
||||
|
||||
It can be used directly as an `onProgress` callback:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({
|
||||
// This function is called each time a root task starts.
|
||||
//
|
||||
// It will be called for as many times as there are tasks created with this `onProgress` function.
|
||||
onRootTaskStart(taskLog) {
|
||||
// `taskLog` is an object reflecting the state of this task and all its subtasks,
|
||||
// and will be mutated in real-time to reflect the changes of the task.
|
||||
|
||||
// timestamp at which the task started
|
||||
taskLog.start
|
||||
|
||||
// current status of the task as described in the previous section
|
||||
taskLog.status
|
||||
|
||||
// undefined or a dictionnary of properties attached to the task
|
||||
taskLog.properties
|
||||
|
||||
// timestamp at which the abortion was requested, undefined otherwise
|
||||
taskLog.abortionRequestedAt
|
||||
|
||||
// undefined or an array of infos emitted on the task
|
||||
taskLog.infos
|
||||
|
||||
// undefined or an array of warnings emitted on the task
|
||||
taskLog.warnings
|
||||
|
||||
// timestamp at which the task ended, undefined otherwise
|
||||
taskLog.end
|
||||
|
||||
// undefined or the result value of the task
|
||||
taskLog.result
|
||||
},
|
||||
|
||||
// This function is called each time a root task ends.
|
||||
onRootTaskEnd(taskLog) {},
|
||||
|
||||
// This function is called each time a root task or a subtask is updated.
|
||||
//
|
||||
// `taskLog.$root` can be used to uncondionally access the root task.
|
||||
onTaskUpdate(taskLog) {},
|
||||
})
|
||||
|
||||
Task.run({ data: { name: 'my task' }, onProgress }, asyncFn)
|
||||
```
|
||||
|
||||
It can also be fed event logs directly:
|
||||
|
||||
```js
|
||||
import { makeOnProgress } from '@vates/task/combineEvents'
|
||||
|
||||
const onProgress = makeOnProgress({ onRootTaskStart, onRootTaskEnd, onTaskUpdate })
|
||||
|
||||
eventLogs.forEach(onProgress)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
61
@vates/task/combineEvents.js
Normal file
61
@vates/task/combineEvents.js
Normal file
@@ -0,0 +1,61 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
exports.makeOnProgress = function ({ onRootTaskEnd = noop, onRootTaskStart = noop, onTaskUpdate = noop }) {
|
||||
const taskLogs = new Map()
|
||||
return function onProgress(event) {
|
||||
const { id, type } = event
|
||||
let taskLog
|
||||
if (type === 'start') {
|
||||
taskLog = {
|
||||
id,
|
||||
properties: { __proto__: null, ...event.properties },
|
||||
start: event.timestamp,
|
||||
status: 'pending',
|
||||
}
|
||||
taskLogs.set(id, taskLog)
|
||||
|
||||
const { parentId } = event
|
||||
if (parentId === undefined) {
|
||||
Object.defineProperty(taskLog, '$root', { value: taskLog })
|
||||
|
||||
// start of a root task
|
||||
onRootTaskStart(taskLog)
|
||||
} else {
|
||||
// start of a subtask
|
||||
const parent = taskLogs.get(parentId)
|
||||
assert.notEqual(parent, undefined)
|
||||
|
||||
// inject a (non-enumerable) reference to the parent and the root task
|
||||
Object.defineProperties(taskLog, { $parent: { value: parent }, $root: { value: parent.$root } })
|
||||
;(parent.tasks ?? (parent.tasks = [])).push(taskLog)
|
||||
}
|
||||
} else {
|
||||
taskLog = taskLogs.get(id)
|
||||
assert.notEqual(taskLog, undefined)
|
||||
|
||||
if (type === 'info' || type === 'warning') {
|
||||
const key = type + 's'
|
||||
const { data, message } = event
|
||||
;(taskLog[key] ?? (taskLog[key] = [])).push({ data, message })
|
||||
} else if (type === 'property') {
|
||||
;(taskLog.properties ?? (taskLog.properties = { __proto__: null }))[event.name] = event.value
|
||||
} else if (type === 'end') {
|
||||
taskLog.end = event.timestamp
|
||||
taskLog.result = event.result
|
||||
taskLog.status = event.status
|
||||
} else if (type === 'abortionRequested') {
|
||||
taskLog.abortionRequestedAt = event.timestamp
|
||||
}
|
||||
|
||||
if (type === 'end' && taskLog.$root === taskLog) {
|
||||
onRootTaskEnd(taskLog)
|
||||
}
|
||||
}
|
||||
|
||||
onTaskUpdate(taskLog)
|
||||
}
|
||||
}
|
||||
81
@vates/task/combineEvents.test.js
Normal file
81
@vates/task/combineEvents.test.js
Normal file
@@ -0,0 +1,81 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { makeOnProgress } = require('./combineEvents.js')
|
||||
const { Task } = require('./index.js')
|
||||
|
||||
describe('makeOnProgress()', function () {
|
||||
it('works', async function () {
|
||||
const events = []
|
||||
let log
|
||||
const task = new Task({
|
||||
properties: { name: 'task' },
|
||||
onProgress: makeOnProgress({
|
||||
onRootTaskStart(log_) {
|
||||
assert.equal(log, undefined)
|
||||
log = log_
|
||||
events.push('onRootTaskStart')
|
||||
},
|
||||
onRootTaskEnd(log_) {
|
||||
assert.equal(log_, log)
|
||||
events.push('onRootTaskEnd')
|
||||
},
|
||||
|
||||
onTaskUpdate(log_) {
|
||||
assert.equal(log_.$root, log)
|
||||
events.push('onTaskUpdate')
|
||||
},
|
||||
}),
|
||||
})
|
||||
|
||||
assert.equal(events.length, 0)
|
||||
|
||||
let i = 0
|
||||
|
||||
await task.run(async () => {
|
||||
assert.equal(events[i++], 'onRootTaskStart')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.id, task.id)
|
||||
assert.equal(log.properties.name, 'task')
|
||||
assert(Math.abs(log.start - Date.now()) < 10)
|
||||
|
||||
Task.set('name', 'new name')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.name, 'new name')
|
||||
|
||||
Task.set('progress', 0)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 0)
|
||||
|
||||
Task.info('foo', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.infos, [{ data: {}, message: 'foo' }])
|
||||
|
||||
const subtask = new Task({ properties: { name: 'subtask' } })
|
||||
await subtask.run(() => {
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].properties.name, 'subtask')
|
||||
|
||||
Task.warning('bar', {})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.deepEqual(log.tasks[0].warnings, [{ data: {}, message: 'bar' }])
|
||||
|
||||
subtask.abort()
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.tasks[0].abortionRequestedAt - Date.now()) < 10)
|
||||
})
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.tasks[0].status, 'success')
|
||||
|
||||
Task.set('progress', 100)
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert.equal(log.properties.progress, 100)
|
||||
})
|
||||
assert.equal(events[i++], 'onRootTaskEnd')
|
||||
assert.equal(events[i++], 'onTaskUpdate')
|
||||
assert(Math.abs(log.end - Date.now()) < 10)
|
||||
assert.equal(log.status, 'success')
|
||||
})
|
||||
})
|
||||
@@ -10,14 +10,15 @@ function define(object, property, value) {
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const ABORTED = 'aborted'
|
||||
const ABORTING = 'aborting'
|
||||
const FAILURE = 'failure'
|
||||
const PENDING = 'pending'
|
||||
const SUCCESS = 'success'
|
||||
exports.STATUS = { ABORTED, ABORTING, FAILURE, PENDING, SUCCESS }
|
||||
exports.STATUS = { FAILURE, PENDING, SUCCESS }
|
||||
|
||||
// stored in the global context so that various versions of the library can interact.
|
||||
const asyncStorageKey = '@vates/task@0'
|
||||
const asyncStorage = global[asyncStorageKey] ?? (global[asyncStorageKey] = new AsyncLocalStorage())
|
||||
|
||||
const asyncStorage = new AsyncLocalStorage()
|
||||
const getTask = () => asyncStorage.getStore()
|
||||
|
||||
exports.Task = class Task {
|
||||
@@ -66,7 +67,6 @@ exports.Task = class Task {
|
||||
|
||||
#abortController = new AbortController()
|
||||
#onProgress
|
||||
#parent
|
||||
|
||||
get id() {
|
||||
return (this.id = Math.random().toString(36).slice(2))
|
||||
@@ -82,16 +82,14 @@ exports.Task = class Task {
|
||||
return this.#status
|
||||
}
|
||||
|
||||
constructor({ name, onProgress }) {
|
||||
this.#startData = { name }
|
||||
constructor({ properties, onProgress } = {}) {
|
||||
this.#startData = { properties }
|
||||
|
||||
if (onProgress !== undefined) {
|
||||
this.#onProgress = onProgress
|
||||
} else {
|
||||
const parent = getTask()
|
||||
if (parent !== undefined) {
|
||||
this.#parent = parent
|
||||
|
||||
const { signal } = parent.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
this.#abortController.abort(signal.reason)
|
||||
@@ -107,7 +105,15 @@ exports.Task = class Task {
|
||||
const { signal } = this.#abortController
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.status === PENDING) {
|
||||
this.#status = this.#running ? ABORTING : ABORTED
|
||||
this.#maybeStart()
|
||||
|
||||
this.#emit('abortionRequested', { reason: signal.reason })
|
||||
|
||||
if (!this.#running) {
|
||||
const status = FAILURE
|
||||
this.#status = status
|
||||
this.#emit('end', { result: signal.reason, status })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -123,14 +129,12 @@ exports.Task = class Task {
|
||||
this.#onProgress(data)
|
||||
}
|
||||
|
||||
#handleMaybeAbortion(result) {
|
||||
if (this.status === ABORTING) {
|
||||
this.#status = ABORTED
|
||||
this.#emit('end', { status: ABORTED, result })
|
||||
return true
|
||||
#maybeStart() {
|
||||
const startData = this.#startData
|
||||
if (startData !== undefined) {
|
||||
this.#startData = undefined
|
||||
this.#emit('start', startData)
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async run(fn) {
|
||||
@@ -148,22 +152,17 @@ exports.Task = class Task {
|
||||
assert.equal(this.#running, false)
|
||||
this.#running = true
|
||||
|
||||
const startData = this.#startData
|
||||
if (startData !== undefined) {
|
||||
this.#startData = undefined
|
||||
this.#emit('start', startData)
|
||||
}
|
||||
this.#maybeStart()
|
||||
|
||||
try {
|
||||
const result = await asyncStorage.run(this, fn)
|
||||
this.#handleMaybeAbortion(result)
|
||||
this.#running = false
|
||||
return result
|
||||
} catch (result) {
|
||||
if (!this.#handleMaybeAbortion(result)) {
|
||||
this.#status = FAILURE
|
||||
this.#emit('end', { status: FAILURE, result })
|
||||
}
|
||||
const status = FAILURE
|
||||
|
||||
this.#status = status
|
||||
this.#emit('end', { status, result })
|
||||
throw result
|
||||
}
|
||||
}
|
||||
|
||||
347
@vates/task/index.test.js
Normal file
347
@vates/task/index.test.js
Normal file
@@ -0,0 +1,347 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('node:assert').strict
|
||||
const { describe, it } = require('test')
|
||||
|
||||
const { Task } = require('./index.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
function assertEvent(task, expected, eventIndex = -1) {
|
||||
const logs = task.$events
|
||||
const actual = logs[eventIndex < 0 ? logs.length + eventIndex : eventIndex]
|
||||
|
||||
assert.equal(typeof actual, 'object')
|
||||
assert.equal(typeof actual.id, 'string')
|
||||
assert.equal(typeof actual.timestamp, 'number')
|
||||
for (const keys of Object.keys(expected)) {
|
||||
assert.deepEqual(actual[keys], expected[keys])
|
||||
}
|
||||
}
|
||||
|
||||
// like new Task() but with a custom onProgress which adds event to task.$events
|
||||
function createTask(opts) {
|
||||
const events = []
|
||||
const task = new Task({ ...opts, onProgress: events.push.bind(events) })
|
||||
task.$events = events
|
||||
return task
|
||||
}
|
||||
|
||||
describe('Task', function () {
|
||||
describe('contructor', function () {
|
||||
it('data properties are passed to the start event', async function () {
|
||||
const properties = { foo: 0, bar: 1 }
|
||||
const task = createTask({ properties })
|
||||
await task.run(noop)
|
||||
assertEvent(task, { type: 'start', properties }, 0)
|
||||
})
|
||||
})
|
||||
|
||||
it('subtasks events are passed to root task', async function () {
|
||||
const task = createTask()
|
||||
const result = {}
|
||||
|
||||
await task.run(async () => {
|
||||
await new Task().run(() => result)
|
||||
})
|
||||
|
||||
assert.equal(task.$events.length, 4)
|
||||
assertEvent(task, { type: 'start', parentId: task.id }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
describe('.abortSignal', function () {
|
||||
it('is undefined when run outside a task', function () {
|
||||
assert.equal(Task.abortSignal, undefined)
|
||||
})
|
||||
|
||||
it('is the current abort signal when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
const { abortSignal } = Task
|
||||
assert.equal(abortSignal.aborted, false)
|
||||
task.abort()
|
||||
assert.equal(abortSignal.aborted, true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.abort()', function () {
|
||||
it('aborts if the task throws fails with the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task fails without the abort reason', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = new Error()
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
throw result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result }, 2)
|
||||
})
|
||||
|
||||
it('does not abort if the task succeed', async function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
const result = {}
|
||||
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort(reason)
|
||||
|
||||
return result
|
||||
})
|
||||
.catch(noop)
|
||||
|
||||
assert.equal(task.status, 'success')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'success', result }, 2)
|
||||
})
|
||||
|
||||
it('aborts before task is running', function () {
|
||||
const task = createTask()
|
||||
const reason = {}
|
||||
|
||||
task.abort(reason)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
|
||||
assert.equal(task.$events.length, 3)
|
||||
assertEvent(task, { type: 'start' }, 0)
|
||||
assertEvent(task, { type: 'abortionRequested', reason }, 1)
|
||||
assertEvent(task, { type: 'end', status: 'failure', result: reason }, 2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('.info()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.info('foo')
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.info('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'info',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.set()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.set('progress', 10)
|
||||
})
|
||||
|
||||
it('emits an info message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.set('progress', 10)
|
||||
assertEvent(task, {
|
||||
name: 'progress',
|
||||
type: 'property',
|
||||
value: 10,
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('.warning()', function () {
|
||||
it('does nothing when run outside a task', function () {
|
||||
Task.warning('foo')
|
||||
})
|
||||
|
||||
it('emits an warning message when run inside a task', async function () {
|
||||
const task = createTask()
|
||||
await task.run(() => {
|
||||
Task.warning('foo')
|
||||
assertEvent(task, {
|
||||
data: undefined,
|
||||
message: 'foo',
|
||||
type: 'warning',
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('#id', function () {
|
||||
it('can be set', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
assert.equal(task.id, 'foo')
|
||||
})
|
||||
|
||||
it('cannot be set more than once', function () {
|
||||
const task = createTask()
|
||||
task.id = 'foo'
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
|
||||
it('is randomly generated if not set', function () {
|
||||
assert.notEqual(createTask().id, createTask().id)
|
||||
})
|
||||
|
||||
it('cannot be set after being observed', function () {
|
||||
const task = createTask()
|
||||
noop(task.id)
|
||||
|
||||
assert.throws(() => {
|
||||
task.id = 'bar'
|
||||
}, TypeError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#status', function () {
|
||||
it('starts as pending', function () {
|
||||
assert.equal(createTask().status, 'pending')
|
||||
})
|
||||
|
||||
it('changes to success when finish without error', async function () {
|
||||
const task = createTask()
|
||||
await task.run(noop)
|
||||
assert.equal(task.status, 'success')
|
||||
})
|
||||
|
||||
it('changes to failure when finish with error', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
throw Error()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted after run is complete', async function () {
|
||||
const task = createTask()
|
||||
await task
|
||||
.run(() => {
|
||||
task.abort()
|
||||
assert.equal(task.status, 'pending')
|
||||
Task.abortSignal.throwIfAborted()
|
||||
})
|
||||
.catch(noop)
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
|
||||
it('changes to failure if aborted when not running', function () {
|
||||
const task = createTask()
|
||||
task.abort()
|
||||
assert.equal(task.status, 'failure')
|
||||
})
|
||||
})
|
||||
|
||||
function makeRunTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('finishes the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'success')
|
||||
assertEvent(task, {
|
||||
status: 'success',
|
||||
result: 'foo',
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.run', function () {
|
||||
makeRunTests((task, fn) => task.run(fn))
|
||||
})
|
||||
describe('.wrap', function () {
|
||||
makeRunTests((task, fn) => task.wrap(fn)())
|
||||
})
|
||||
|
||||
function makeRunInsideTests(run) {
|
||||
it('starts the task', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => {
|
||||
assertEvent(task, { type: 'start' })
|
||||
})
|
||||
})
|
||||
|
||||
it('does not finish the task on success', async function () {
|
||||
const task = createTask()
|
||||
await run(task, () => 'foo')
|
||||
assert.equal(task.status, 'pending')
|
||||
})
|
||||
|
||||
it('fails the task on error', async function () {
|
||||
const task = createTask()
|
||||
const e = new Error()
|
||||
await run(task, () => {
|
||||
throw e
|
||||
}).catch(noop)
|
||||
|
||||
assert.equal(task.status, 'failure')
|
||||
assertEvent(task, {
|
||||
status: 'failure',
|
||||
result: e,
|
||||
type: 'end',
|
||||
})
|
||||
})
|
||||
}
|
||||
describe('.runInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.runInside(fn))
|
||||
})
|
||||
describe('.wrapInside', function () {
|
||||
makeRunInsideTests((task, fn) => task.wrapInside(fn)())
|
||||
})
|
||||
})
|
||||
@@ -13,11 +13,19 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.0.1",
|
||||
"version": "0.1.2",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"test": "^3.3.0"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
},
|
||||
"exports": {
|
||||
".": "./index.js",
|
||||
"./combineEvents": "./combineEvents.js"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.29.5",
|
||||
"@xen-orchestra/fs": "^3.3.1",
|
||||
"@xen-orchestra/backups": "^0.38.2",
|
||||
"@xen-orchestra/fs": "^4.0.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "1.0.0",
|
||||
"version": "1.0.8",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,292 +1,19 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { Metadata } = require('./_runners/Metadata.js')
|
||||
const { VmsRemote } = require('./_runners/VmsRemote.js')
|
||||
const { VmsXapi } = require('./_runners/VmsXapi.js')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
const DEFAULT_SETTINGS = {
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
const DEFAULT_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getAdapter = Disposable.factory(function* (remoteId) {
|
||||
return {
|
||||
adapter: yield getAdapter(remoteId),
|
||||
remoteId,
|
||||
}
|
||||
})
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
|
||||
const { type } = job
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
if (type === 'backup') {
|
||||
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
this.run = this._runVmBackup
|
||||
} else if (type === 'metadataBackup') {
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
this.run = this._runMetadataBackup
|
||||
} else {
|
||||
exports.createRunner = function createRunner(opts) {
|
||||
const { type } = opts.job
|
||||
switch (type) {
|
||||
case 'backup':
|
||||
return new VmsXapi(opts)
|
||||
case 'mirrorBackup':
|
||||
return new VmsRemote(opts)
|
||||
case 'metadataBackup':
|
||||
return new Metadata(opts)
|
||||
default:
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
remoteIds.map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
async _runVmBackup() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
|
||||
const config = this._config
|
||||
const settings = this._settings
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id =>
|
||||
this._getAdapter(id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
#xapi
|
||||
#restoredVm
|
||||
#timeout
|
||||
#xapi
|
||||
|
||||
constructor({ restoredVm, xapi }) {
|
||||
constructor({ restoredVm, timeout = 10 * 60 * 1000, xapi }) {
|
||||
this.#restoredVm = restoredVm
|
||||
this.#xapi = xapi
|
||||
this.#timeout = timeout
|
||||
}
|
||||
|
||||
async run() {
|
||||
@@ -23,7 +25,12 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
|
||||
// remove vifs
|
||||
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
|
||||
|
||||
const waitForScript = restoredVm.tags.includes('xo-backup-health-check-xenstore')
|
||||
if (waitForScript) {
|
||||
await restoredVm.set_xenstore_data({
|
||||
'vm-data/xo-backup-health-check': 'planned',
|
||||
})
|
||||
}
|
||||
const start = new Date()
|
||||
// start Vm
|
||||
|
||||
@@ -34,7 +41,7 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
const started = new Date()
|
||||
const timeout = 10 * 60 * 1000
|
||||
const timeout = this.#timeout
|
||||
const startDuration = started - start
|
||||
|
||||
let remainingTimeout = timeout - startDuration
|
||||
@@ -52,12 +59,52 @@ exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
remainingTimeout -= running - started
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
throw new Error(`local xapi did not get Running state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
// wait for the guest tool version to be defined
|
||||
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const guestToolsReady = new Date()
|
||||
remainingTimeout -= guestToolsReady - running
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get he guest tools check ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
if (waitForScript) {
|
||||
const startedRestoredVm = await xapi.waitObjectState(
|
||||
restoredVm.$ref,
|
||||
vm =>
|
||||
vm?.xenstore_data !== undefined &&
|
||||
(vm.xenstore_data['vm-data/xo-backup-health-check'] === 'success' ||
|
||||
vm.xenstore_data['vm-data/xo-backup-health-check'] === 'failure'),
|
||||
{
|
||||
timeout: remainingTimeout,
|
||||
}
|
||||
)
|
||||
const scriptOk = new Date()
|
||||
remainingTimeout -= scriptOk - guestToolsReady
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(
|
||||
`Backup health check script did not update vm-data/xo-backup-health-check of ${restoredId} after ${
|
||||
timeout / 1000
|
||||
} second, got ${
|
||||
startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check']
|
||||
} instead of 'success' or 'failure'`
|
||||
)
|
||||
}
|
||||
|
||||
if (startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check'] !== 'success') {
|
||||
const message = startedRestoredVm.xenstore_data['vm-data/xo-backup-health-check-error']
|
||||
if (message) {
|
||||
throw new Error(`Backup health check script failed with message ${message} for VM ${restoredId} `)
|
||||
} else {
|
||||
throw new Error(`Backup health check script failed for VM ${restoredId} `)
|
||||
}
|
||||
}
|
||||
Task.info('Backup health check script successfully executed')
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
const assert = require('assert')
|
||||
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { importDeltaVm } = require('./_deltaVm.js')
|
||||
const { importIncrementalVm } = require('./_incrementalVm.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importDeltaVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._importIncrementalVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._metadata = metadata
|
||||
this._srUuid = srUuid
|
||||
this._xapi = xapi
|
||||
@@ -31,11 +31,11 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(this._importDeltaVmSettings.mapVdisSrs)
|
||||
Object.entries(this._importIncrementalVmSettings.mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
backup = await adapter.readDeltaVmBackup(metadata, ignoredVdis)
|
||||
backup = await adapter.readIncrementalVmBackup(metadata, ignoredVdis)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
@@ -49,8 +49,8 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
|
||||
const vmRef = isFull
|
||||
? await xapi.VM_import(backup, srRef)
|
||||
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importDeltaVmSettings,
|
||||
: await importIncrementalVm(backup, await xapi.getRecord('SR', srRef), {
|
||||
...this._importIncrementalVmSettings,
|
||||
detectBase: false,
|
||||
})
|
||||
|
||||
|
||||
@@ -209,8 +209,8 @@ class RemoteAdapter {
|
||||
|
||||
const isVhdDirectory = vhd instanceof VhdDirectory
|
||||
return isVhdDirectory
|
||||
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.#useVhdDirectory()
|
||||
? this.useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
|
||||
: !this.useVhdDirectory()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -321,19 +321,19 @@ class RemoteAdapter {
|
||||
return this._vhdDirectoryCompression
|
||||
}
|
||||
|
||||
#useVhdDirectory() {
|
||||
useVhdDirectory() {
|
||||
return this.handler.useVhdDirectory()
|
||||
}
|
||||
|
||||
#useAlias() {
|
||||
return this.#useVhdDirectory()
|
||||
return this.useVhdDirectory()
|
||||
}
|
||||
|
||||
async *#getDiskLegacy(diskId) {
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
const handler = this._handler
|
||||
|
||||
const diskPath = handler._getFilePath('/' + diskId)
|
||||
const diskPath = handler.getFilePath('/' + diskId)
|
||||
const mountDir = yield getTmpDir()
|
||||
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
|
||||
try {
|
||||
@@ -404,20 +404,27 @@ class RemoteAdapter {
|
||||
return `${baseName}.vhd`
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
async listAllVms() {
|
||||
const handler = this._handler
|
||||
|
||||
const backups = { __proto__: null }
|
||||
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
|
||||
const vmsUuids = []
|
||||
await asyncEach(await handler.list(BACKUP_DIR), async entry => {
|
||||
// ignore hidden and lock files
|
||||
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
|
||||
const vmBackups = await this.listVmBackups(entry)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[entry] = vmBackups
|
||||
}
|
||||
vmsUuids.push(entry)
|
||||
}
|
||||
})
|
||||
return vmsUuids
|
||||
}
|
||||
|
||||
async listAllVmBackups() {
|
||||
const vmsUuids = await this.listAllVms()
|
||||
const backups = { __proto__: null }
|
||||
await asyncEach(vmsUuids, async vmUuid => {
|
||||
const vmBackups = await this.listVmBackups(vmUuid)
|
||||
if (vmBackups.length !== 0) {
|
||||
backups[vmUuid] = vmBackups
|
||||
}
|
||||
})
|
||||
return backups
|
||||
}
|
||||
|
||||
@@ -658,9 +665,9 @@ class RemoteAdapter {
|
||||
return path
|
||||
}
|
||||
|
||||
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
|
||||
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
|
||||
const handler = this._handler
|
||||
if (this.#useVhdDirectory()) {
|
||||
if (this.useVhdDirectory()) {
|
||||
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
|
||||
const size = await createVhdDirectoryFromStream(handler, dataPath, input, {
|
||||
concurrency: writeBlockConcurrency,
|
||||
@@ -669,7 +676,6 @@ class RemoteAdapter {
|
||||
await input.task
|
||||
return validator.apply(this, arguments)
|
||||
},
|
||||
nbdClient,
|
||||
})
|
||||
await VhdAbstract.createAlias(handler, path, dataPath)
|
||||
return size
|
||||
@@ -692,8 +698,8 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
// open the hierarchy of ancestors until we find a full one
|
||||
async _createSyntheticStream(handler, path) {
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
|
||||
async _createVhdStream(handler, path, { useChain }) {
|
||||
const disposableSynthetic = useChain ? await VhdSynthetic.fromVhdChain(handler, path) : await openVhd(handler, path)
|
||||
// I don't want the vhds to be disposed on return
|
||||
// but only when the stream is done ( or failed )
|
||||
|
||||
@@ -718,15 +724,15 @@ class RemoteAdapter {
|
||||
return stream
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata, ignoredVdis) {
|
||||
async readIncrementalVmBackup(metadata, ignoredVdis, { useChain = true } = {}) {
|
||||
const handler = this._handler
|
||||
const { vbds, vhds, vifs, vm } = metadata
|
||||
const { vbds, vhds, vifs, vm, vmSnapshot } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async ref => {
|
||||
streams[`${ref}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[ref]))
|
||||
streams[`${ref}.vhd`] = await this._createVhdStream(handler, join(dir, vhds[ref]), { useChain })
|
||||
})
|
||||
|
||||
return {
|
||||
@@ -735,7 +741,7 @@ class RemoteAdapter {
|
||||
vdis,
|
||||
version: '1.0.0',
|
||||
vifs,
|
||||
vm,
|
||||
vm: { ...vm, suspend_VDI: vmSnapshot.suspend_VDI },
|
||||
}
|
||||
}
|
||||
|
||||
@@ -747,7 +753,49 @@ class RemoteAdapter {
|
||||
// _filename is a private field used to compute the backup id
|
||||
//
|
||||
// it's enumerable to make it cacheable
|
||||
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
const metadata = { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
|
||||
// backups created on XenServer < 7.1 via JSON in XML-RPC transports have boolean values encoded as integers, which make them unusable with more recent XAPIs
|
||||
if (typeof metadata.vm.is_a_template === 'number') {
|
||||
const properties = {
|
||||
vbds: ['bootable', 'unpluggable', 'storage_lock', 'empty', 'currently_attached'],
|
||||
vdis: [
|
||||
'sharable',
|
||||
'read_only',
|
||||
'storage_lock',
|
||||
'managed',
|
||||
'missing',
|
||||
'is_a_snapshot',
|
||||
'allow_caching',
|
||||
'metadata_latest',
|
||||
],
|
||||
vifs: ['currently_attached', 'MAC_autogenerated'],
|
||||
vm: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_a_snapshot', 'is_snapshot_from_vmpp'],
|
||||
vmSnapshot: ['is_a_template', 'is_control_domain', 'ha_always_run', 'is_snapshot_from_vmpp'],
|
||||
}
|
||||
|
||||
function fixBooleans(obj, properties) {
|
||||
properties.forEach(property => {
|
||||
if (typeof obj[property] === 'number') {
|
||||
obj[property] = obj[property] === 1
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
for (const [key, propertiesInKey] of Object.entries(properties)) {
|
||||
const value = metadata[key]
|
||||
if (value !== undefined) {
|
||||
// some properties of the metadata are collections indexed by the opaqueRef
|
||||
const isCollection = Object.keys(value).some(subKey => subKey.startsWith('OpaqueRef:'))
|
||||
if (isCollection) {
|
||||
Object.values(value).forEach(subValue => fixBooleans(subValue, propertiesInKey))
|
||||
} else {
|
||||
fixBooleans(value, propertiesInKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')
|
||||
const { PATH_DB_DUMP } = require('./_runners/_PoolMetadataBackup.js')
|
||||
|
||||
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
|
||||
constructor({ backupId, handler, xapi }) {
|
||||
|
||||
@@ -1,496 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { DeltaBackupWriter } = require('./writers/DeltaBackupWriter.js')
|
||||
const { DeltaReplicationWriter } = require('./writers/DeltaReplicationWriter.js')
|
||||
const { exportDeltaVm } = require('./_deltaVm.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { FullBackupWriter } = require('./writers/FullBackupWriter.js')
|
||||
const { FullReplicationWriter } = require('./writers/FullReplicationWriter.js')
|
||||
const { getOldEntries } = require('./_getOldEntries.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:VmBackup')
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
const forkDeltaExport = deltaExport =>
|
||||
Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
|
||||
class VmBackup {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
vm,
|
||||
}) {
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
// don't match replicated VMs created by this very job otherwise they
|
||||
// will be replicated again and again
|
||||
throw new Error('cannot backup a VM created by this very job')
|
||||
}
|
||||
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
this.vm = vm
|
||||
const { tags } = this.vm
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this.exportedVm = undefined
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._isDelta
|
||||
? [DeltaBackupWriter, DeltaReplicationWriter]
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(new BackupWriter({ backup: this, remoteId, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(new ReplicationWriter({ backup: this, sr, settings: targetSettings }))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const { vm } = this
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const { vm } = this
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot =
|
||||
settings.unconditionalSnapshot ||
|
||||
this._isDelta ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
ignoreNobakVdis: true,
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
unplugVusbs: true,
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this.exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this.exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _copyDelta() {
|
||||
const { exportedVm } = this
|
||||
const baseVm = this._baseVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _copyFull() {
|
||||
const { compression } = this.job
|
||||
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this.vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this.vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const [baseUuid, snapshotOf] = await Promise.all([
|
||||
xapi.getField('VDI', baseRef, 'uuid'),
|
||||
xapi.getField('VDI', baseRef, 'snapshot_of'),
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
async _healthCheck() {
|
||||
const settings = this._settings
|
||||
|
||||
if (this._healthCheckSr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const { tags } = this.vm
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
return
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
if (this._isDelta) {
|
||||
await this._selectBaseVm()
|
||||
}
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const { vm } = this
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await (this._isDelta ? this._copyDelta() : this._copyFull())
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.VmBackup = VmBackup
|
||||
|
||||
decorateMethodsWith(VmBackup, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -1,8 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
require('@xen-orchestra/log/configure').catchGlobalErrors(
|
||||
require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
)
|
||||
const logger = require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
|
||||
require('@xen-orchestra/log/configure').catchGlobalErrors(logger)
|
||||
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
|
||||
@@ -13,13 +13,15 @@ const { createDebounceResource } = require('@vates/disposable/debounceResource.j
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { createRunner } = require('./Backup.js')
|
||||
const { parseDuration } = require('@vates/parse-duration')
|
||||
const { Xapi } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Backup } = require('./Backup.js')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter.js')
|
||||
const { Task } = require('./Task.js')
|
||||
|
||||
const { debug } = logger
|
||||
|
||||
class BackupWorker {
|
||||
#config
|
||||
#job
|
||||
@@ -46,7 +48,7 @@ class BackupWorker {
|
||||
}
|
||||
|
||||
run() {
|
||||
return new Backup({
|
||||
return createRunner({
|
||||
config: this.#config,
|
||||
getAdapter: remoteId => this.getAdapter(this.#remotes[remoteId]),
|
||||
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
|
||||
@@ -122,6 +124,11 @@ decorateMethodsWith(BackupWorker, {
|
||||
]),
|
||||
})
|
||||
|
||||
const emitMessage = message => {
|
||||
debug('message emitted', { message })
|
||||
process.send(message)
|
||||
}
|
||||
|
||||
// Received message:
|
||||
//
|
||||
// Message {
|
||||
@@ -139,6 +146,8 @@ decorateMethodsWith(BackupWorker, {
|
||||
// result?: any
|
||||
// }
|
||||
process.on('message', async message => {
|
||||
debug('message received', { message })
|
||||
|
||||
if (message.action === 'run') {
|
||||
const backupWorker = new BackupWorker(message.data)
|
||||
try {
|
||||
@@ -147,7 +156,7 @@ process.on('message', async message => {
|
||||
{
|
||||
name: 'backup run',
|
||||
onLog: data =>
|
||||
process.send({
|
||||
emitMessage({
|
||||
data,
|
||||
type: 'log',
|
||||
}),
|
||||
@@ -156,13 +165,13 @@ process.on('message', async message => {
|
||||
)
|
||||
: await backupWorker.run()
|
||||
|
||||
process.send({
|
||||
emitMessage({
|
||||
type: 'result',
|
||||
result,
|
||||
status: 'success',
|
||||
})
|
||||
} catch (error) {
|
||||
process.send({
|
||||
emitMessage({
|
||||
type: 'result',
|
||||
result: error,
|
||||
status: 'failure',
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
const { beforeEach, afterEach, test, describe } = require('test')
|
||||
const assert = require('assert').strict
|
||||
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const uuid = require('uuid')
|
||||
@@ -14,6 +13,7 @@ const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
const { checkAliases } = require('./_cleanVm')
|
||||
const { dirname, basename } = require('path')
|
||||
const { rimraf } = require('rimraf')
|
||||
|
||||
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
|
||||
const rootPath = 'xo-vm-backups/VMUUID/'
|
||||
@@ -541,7 +541,8 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
logWarn('incorrect backup size in metadata', {
|
||||
// FIXME: figure out why it occurs so often and, once fixed, log the real problems with `logWarn`
|
||||
console.warn('cleanVm: incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const eos = require('end-of-stream')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
|
||||
const { forks = 0 } = stream
|
||||
stream.forks = forks + 1
|
||||
|
||||
debug('forking', { forks: stream.forks })
|
||||
|
||||
const proxy = new PassThrough()
|
||||
stream.pipe(proxy)
|
||||
eos(stream, error => {
|
||||
if (error !== undefined) {
|
||||
debug('error on original stream, destroying fork', { error })
|
||||
proxy.destroy(error)
|
||||
}
|
||||
})
|
||||
eos(proxy, error => {
|
||||
debug('end of stream, unpiping', { error, forks: --stream.forks })
|
||||
|
||||
stream.unpipe(proxy)
|
||||
|
||||
if (stream.forks === 0) {
|
||||
debug('no more forks, destroying original stream')
|
||||
stream.destroy(new Error('no more consumers for this stream'))
|
||||
}
|
||||
})
|
||||
return proxy
|
||||
}
|
||||
@@ -12,7 +12,7 @@ const { defer } = require('golike-defer')
|
||||
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { pick } = require('lodash')
|
||||
const pick = require('lodash/pick.js')
|
||||
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
@@ -33,7 +33,7 @@ const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
return ref
|
||||
}
|
||||
|
||||
exports.exportDeltaVm = async function exportDeltaVm(
|
||||
exports.exportIncrementalVm = async function exportIncrementalVm(
|
||||
vm,
|
||||
baseVm,
|
||||
{
|
||||
@@ -143,18 +143,18 @@ exports.exportDeltaVm = async function exportDeltaVm(
|
||||
)
|
||||
}
|
||||
|
||||
exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
exports.importIncrementalVm = defer(async function importIncrementalVm(
|
||||
$defer,
|
||||
deltaVm,
|
||||
incrementalVm,
|
||||
sr,
|
||||
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {}, newMacAddresses = false } = {}
|
||||
) {
|
||||
const { version } = deltaVm
|
||||
const { version } = incrementalVm
|
||||
if (compareVersions(version, '1.0.0') < 0) {
|
||||
throw new Error(`Unsupported delta backup version: ${version}`)
|
||||
}
|
||||
|
||||
const vmRecord = deltaVm.vm
|
||||
const vmRecord = incrementalVm.vm
|
||||
const xapi = sr.$xapi
|
||||
|
||||
let baseVm
|
||||
@@ -183,15 +183,15 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
baseVdis[vbd.VDI] = vbd.$VDI
|
||||
}
|
||||
})
|
||||
const vdiRecords = deltaVm.vdis
|
||||
const vdiRecords = incrementalVm.vdis
|
||||
|
||||
// 0. Create suspend_VDI
|
||||
let suspendVdi
|
||||
if (vmRecord.power_state === 'Suspended') {
|
||||
if (vmRecord.suspend_VDI !== undefined && vmRecord.suspend_VDI !== 'OpaqueRef:NULL') {
|
||||
const vdi = vdiRecords[vmRecord.suspend_VDI]
|
||||
if (vdi === undefined) {
|
||||
Task.warning('Suspend VDI not available for this suspended VM', {
|
||||
vm: pick(vmRecord, 'uuid', 'name_label'),
|
||||
vm: pick(vmRecord, 'uuid', 'name_label', 'suspend_VDI'),
|
||||
})
|
||||
} else {
|
||||
suspendVdi = await xapi.getRecord(
|
||||
@@ -240,7 +240,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
await asyncMap(await xapi.getField('VM', vmRef, 'VBDs'), ref => ignoreErrors.call(xapi.call('VBD.destroy', ref)))
|
||||
|
||||
// 3. Create VDIs & VBDs.
|
||||
const vbdRecords = deltaVm.vbds
|
||||
const vbdRecords = incrementalVm.vbds
|
||||
const vbds = groupBy(vbdRecords, 'VDI')
|
||||
const newVdis = {}
|
||||
await asyncMap(Object.keys(vdiRecords), async vdiRef => {
|
||||
@@ -309,7 +309,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
}
|
||||
})
|
||||
|
||||
const { streams } = deltaVm
|
||||
const { streams } = incrementalVm
|
||||
|
||||
await Promise.all([
|
||||
// Import VDI contents.
|
||||
@@ -326,7 +326,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
}),
|
||||
|
||||
// Create VIFs.
|
||||
asyncMap(Object.values(deltaVm.vifs), vif => {
|
||||
asyncMap(Object.values(incrementalVm.vifs), vif => {
|
||||
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)
|
||||
|
||||
if (network === undefined) {
|
||||
@@ -358,8 +358,8 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
])
|
||||
|
||||
await Promise.all([
|
||||
deltaVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', deltaVm.vm.name_label),
|
||||
incrementalVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
|
||||
xapi.setField('VM', vmRef, 'name_label', incrementalVm.vm.name_label),
|
||||
])
|
||||
|
||||
return vmRef
|
||||
134
@xen-orchestra/backups/_runners/Metadata.js
Normal file
134
@xen-orchestra/backups/_runners/Metadata.js
Normal file
@@ -0,0 +1,134 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { XoMetadataBackup } = require('./_XoMetadataBackup.js')
|
||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
||||
const { runTask } = require('./_runTask.js')
|
||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
exports.Metadata = class MetadataBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const schedule = this._schedule
|
||||
const job = this._job
|
||||
const remoteIds = extractIdsFromSimplePattern(job.remotes)
|
||||
if (remoteIds.length === 0) {
|
||||
throw new Error('metadata backup job cannot run without remotes')
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
if (!isXoMetadata && isEmptyPools) {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
|
||||
(!isXoMetadata && retentionPoolMetadata === 0) ||
|
||||
(isEmptyPools && retentionXoMetadata === 0)
|
||||
) {
|
||||
throw new Error('no retentions corresponding to the metadata modes found')
|
||||
}
|
||||
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
poolIds.map(id =>
|
||||
this._getRecord('pool', id).catch(error => {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get pool record',
|
||||
data: { type: 'pool', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(remoteIds.map(id => this._getAdapter(id))),
|
||||
async (pools, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
// remove pools that failed (already handled)
|
||||
pools = pools.filter(_ => _ !== undefined)
|
||||
|
||||
const promises = []
|
||||
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
|
||||
promises.push(
|
||||
asyncMap(pools, async pool =>
|
||||
runTask(
|
||||
{
|
||||
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
|
||||
data: {
|
||||
id: pool.$id,
|
||||
pool,
|
||||
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
|
||||
type: 'pool',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new PoolMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
pool,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
|
||||
promises.push(
|
||||
runTask(
|
||||
{
|
||||
name: `Starting XO metadata backup. (${job.id})`,
|
||||
data: {
|
||||
type: 'xo',
|
||||
},
|
||||
},
|
||||
() =>
|
||||
new XoMetadataBackup({
|
||||
config,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
98
@xen-orchestra/backups/_runners/VmsRemote.js
Normal file
98
@xen-orchestra/backups/_runners/VmsRemote.js
Normal file
@@ -0,0 +1,98 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
||||
const { runTask } = require('./_runTask.js')
|
||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
||||
const { FullRemote } = require('./_vmRunners/FullRemote.js')
|
||||
const { IncrementalRemote } = require('./_vmRunners/IncrementalRemote.js')
|
||||
|
||||
const DEFAULT_REMOTE_VM_SETTINGS = {
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
timeout: 0,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
exports.VmsRemote = class RemoteVmsBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_REMOTE_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const job = this._job
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
() => this._getAdapter(job.sourceRemote),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.remotes).map(id => id !== job.sourceRemote && this._getAdapter(id))
|
||||
),
|
||||
async ({ adapter: sourceRemoteAdapter }, healthCheckSr, remoteAdapters) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => !!_)
|
||||
if (remoteAdapters.length === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmsUuids = await sourceRemoteAdapter.listAllVms()
|
||||
|
||||
Task.info('vms', { vms: vmsUuids })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vmUuid] },
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalRemote(opts)
|
||||
} else if (job.mode === 'full') {
|
||||
vmBackup = new FullRemote(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented for mirror backup`)
|
||||
}
|
||||
|
||||
return runTask(taskStart, () => vmBackup.run())
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmsUuids, !concurrency ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
138
@xen-orchestra/backups/_runners/VmsXapi.js
Normal file
138
@xen-orchestra/backups/_runners/VmsXapi.js
Normal file
@@ -0,0 +1,138 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('../extractIdsFromSimplePattern.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const createStreamThrottle = require('./_createStreamThrottle.js')
|
||||
const { DEFAULT_SETTINGS, Abstract } = require('./_Abstract.js')
|
||||
const { runTask } = require('./_runTask.js')
|
||||
const { getAdaptersByRemote } = require('./_getAdaptersByRemote.js')
|
||||
const { IncrementalXapi } = require('./_vmRunners/IncrementalXapi.js')
|
||||
const { FullXapi } = require('./_vmRunners/FullXapi.js')
|
||||
|
||||
const DEFAULT_XAPI_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxExportRate: 0,
|
||||
maxMergedDeltasPerRun: Infinity,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
useNbd: false,
|
||||
unconditionalSnapshot: false,
|
||||
validateVhdStreams: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
exports.VmsXapi = class VmsXapiBackupRunner extends Abstract {
|
||||
_computeBaseSettings(config, job) {
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
Object.assign(baseSettings, DEFAULT_XAPI_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
return baseSettings
|
||||
}
|
||||
|
||||
async run() {
|
||||
const job = this._job
|
||||
|
||||
// FIXME: proper SimpleIdPattern handling
|
||||
const getSnapshotNameLabel = this._getSnapshotNameLabel
|
||||
const schedule = this._schedule
|
||||
const settings = this._settings
|
||||
|
||||
const throttleStream = createStreamThrottle(settings.maxExportRate)
|
||||
|
||||
const config = this._config
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
this._getRecord('SR', id).catch(error => {
|
||||
runTask(
|
||||
{
|
||||
name: 'get SR record',
|
||||
data: { type: 'SR', id },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
})
|
||||
)
|
||||
),
|
||||
Disposable.all(extractIdsFromSimplePattern(job.remotes).map(id => this._getAdapter(id))),
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
const vmIds = extractIdsFromSimplePattern(job.vms)
|
||||
|
||||
Task.info('vms', { vms: vmIds })
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid => {
|
||||
const taskStart = { name: 'backup VM', data: { type: 'VM', id: vmUuid } }
|
||||
|
||||
return this._getRecord('VM', vmUuid).then(
|
||||
disposableVm =>
|
||||
Disposable.use(disposableVm, vm => {
|
||||
taskStart.data.name_label = vm.name_label
|
||||
return runTask(taskStart, () => {
|
||||
const opts = {
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}
|
||||
let vmBackup
|
||||
if (job.mode === 'delta') {
|
||||
vmBackup = new IncrementalXapi(opts)
|
||||
} else {
|
||||
if (job.mode === 'full') {
|
||||
vmBackup = new FullXapi(opts)
|
||||
} else {
|
||||
throw new Error(`Job mode ${job.mode} not implemented`)
|
||||
}
|
||||
}
|
||||
return vmBackup.run()
|
||||
})
|
||||
}),
|
||||
error =>
|
||||
runTask(taskStart, () => {
|
||||
throw error
|
||||
})
|
||||
)
|
||||
}
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
51
@xen-orchestra/backups/_runners/_Abstract.js
Normal file
51
@xen-orchestra/backups/_runners/_Abstract.js
Normal file
@@ -0,0 +1,51 @@
|
||||
'use strict'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const pTimeout = require('promise-toolbox/timeout')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { runTask } = require('./_runTask.js')
|
||||
const { RemoteTimeoutError } = require('./_RemoteTimeoutError.js')
|
||||
|
||||
exports.DEFAULT_SETTINGS = {
|
||||
getRemoteTimeout: 300e3,
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
exports.Abstract = class AbstractRunner {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
this._getRecord = getConnectedRecord
|
||||
this._job = job
|
||||
this._schedule = schedule
|
||||
|
||||
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
|
||||
const baseSettings = this._computeBaseSettings(config, job)
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
|
||||
const { getRemoteTimeout } = this._settings
|
||||
this._getAdapter = async function (remoteId) {
|
||||
try {
|
||||
const disposable = await pTimeout.call(getAdapter(remoteId), getRemoteTimeout, new RemoteTimeoutError(remoteId))
|
||||
|
||||
return new Disposable(() => disposable.dispose(), {
|
||||
adapter: disposable.value,
|
||||
remoteId,
|
||||
})
|
||||
} catch (error) {
|
||||
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
|
||||
runTask(
|
||||
{
|
||||
name: 'get remote adapter',
|
||||
data: { type: 'remote', id: remoteId },
|
||||
},
|
||||
() => Promise.reject(error)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,10 +2,10 @@
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { DIR_XO_POOL_METADATA_BACKUPS } = require('../RemoteAdapter.js')
|
||||
const { forkStreamUnpipe } = require('./_forkStreamUnpipe.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
const PATH_DB_DUMP = '/pool/xmldbdump'
|
||||
exports.PATH_DB_DUMP = PATH_DB_DUMP
|
||||
8
@xen-orchestra/backups/_runners/_RemoteTimeoutError.js
Normal file
8
@xen-orchestra/backups/_runners/_RemoteTimeoutError.js
Normal file
@@ -0,0 +1,8 @@
|
||||
'use strict'
|
||||
class RemoteTimeoutError extends Error {
|
||||
constructor(remoteId) {
|
||||
super('timeout while getting the remote ' + remoteId)
|
||||
this.remoteId = remoteId
|
||||
}
|
||||
}
|
||||
exports.RemoteTimeoutError = RemoteTimeoutError
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { DIR_XO_CONFIG_BACKUPS } = require('../RemoteAdapter.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
exports.XoMetadataBackup = class XoMetadataBackup {
|
||||
constructor({ config, job, remoteAdapters, schedule, settings }) {
|
||||
17
@xen-orchestra/backups/_runners/_createStreamThrottle.js
Normal file
17
@xen-orchestra/backups/_runners/_createStreamThrottle.js
Normal file
@@ -0,0 +1,17 @@
|
||||
'use strict'
|
||||
|
||||
const { pipeline } = require('node:stream')
|
||||
const { ThrottleGroup } = require('@kldzj/stream-throttle')
|
||||
const identity = require('lodash/identity.js')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
module.exports = function createStreamThrottle(rate) {
|
||||
if (rate === 0) {
|
||||
return identity
|
||||
}
|
||||
const group = new ThrottleGroup({ rate })
|
||||
return function throttleStream(stream) {
|
||||
return pipeline(stream, group.createThrottle(), noop)
|
||||
}
|
||||
}
|
||||
36
@xen-orchestra/backups/_runners/_forkStreamUnpipe.js
Normal file
36
@xen-orchestra/backups/_runners/_forkStreamUnpipe.js
Normal file
@@ -0,0 +1,36 @@
|
||||
'use strict'
|
||||
|
||||
const { finished, PassThrough } = require('node:stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
// from the original one
|
||||
exports.forkStreamUnpipe = function forkStreamUnpipe(source) {
|
||||
const { forks = 0 } = source
|
||||
source.forks = forks + 1
|
||||
|
||||
debug('forking', { forks: source.forks })
|
||||
|
||||
const fork = new PassThrough()
|
||||
source.pipe(fork)
|
||||
finished(source, { writable: false }, error => {
|
||||
if (error !== undefined) {
|
||||
debug('error on original stream, destroying fork', { error })
|
||||
fork.destroy(error)
|
||||
}
|
||||
})
|
||||
finished(fork, { readable: false }, error => {
|
||||
debug('end of stream, unpiping', { error, forks: --source.forks })
|
||||
|
||||
source.unpipe(fork)
|
||||
|
||||
if (source.forks === 0) {
|
||||
debug('no more forks, destroying original stream')
|
||||
source.destroy(new Error('no more consumers for this stream'))
|
||||
}
|
||||
})
|
||||
return fork
|
||||
}
|
||||
9
@xen-orchestra/backups/_runners/_getAdaptersByRemote.js
Normal file
9
@xen-orchestra/backups/_runners/_getAdaptersByRemote.js
Normal file
@@ -0,0 +1,9 @@
|
||||
'use strict'
|
||||
const getAdaptersByRemote = adapters => {
|
||||
const adaptersByRemote = {}
|
||||
adapters.forEach(({ adapter, remoteId }) => {
|
||||
adaptersByRemote[remoteId] = adapter
|
||||
})
|
||||
return adaptersByRemote
|
||||
}
|
||||
exports.getAdaptersByRemote = getAdaptersByRemote
|
||||
6
@xen-orchestra/backups/_runners/_runTask.js
Normal file
6
@xen-orchestra/backups/_runners/_runTask.js
Normal file
@@ -0,0 +1,6 @@
|
||||
'use strict'
|
||||
const { Task } = require('../Task.js')
|
||||
const noop = Function.prototype
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
exports.runTask = runTask
|
||||
53
@xen-orchestra/backups/_runners/_vmRunners/FullRemote.js
Normal file
53
@xen-orchestra/backups/_runners/_vmRunners/FullRemote.js
Normal file
@@ -0,0 +1,53 @@
|
||||
'use strict'
|
||||
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { AbstractRemote } = require('./_AbstractRemote')
|
||||
const { FullRemoteWriter } = require('../_writers/FullRemoteWriter')
|
||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe')
|
||||
const { watchStreamSize } = require('../../_watchStreamSize')
|
||||
const { Task } = require('../../Task')
|
||||
|
||||
class FullRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return FullRemoteWriter
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'full')
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
const stream = await this._sourceRemoteAdapter.readFullVmBackup(metadata)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
// @todo shouldn't transfer backup if it will be deleted by retention policy (higher retention on source than destination)
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
sizeContainer,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
}
|
||||
} else {
|
||||
Task.info('No new data to upload for this VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exports.FullRemote = FullRemoteVmBackupRunner
|
||||
decorateMethodsWith(FullRemoteVmBackupRunner, {
|
||||
_run: defer,
|
||||
})
|
||||
65
@xen-orchestra/backups/_runners/_vmRunners/FullXapi.js
Normal file
65
@xen-orchestra/backups/_runners/_vmRunners/FullXapi.js
Normal file
@@ -0,0 +1,65 @@
|
||||
'use strict'
|
||||
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
|
||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe.js')
|
||||
const { FullRemoteWriter } = require('../_writers/FullRemoteWriter.js')
|
||||
const { FullXapiWriter } = require('../_writers/FullXapiWriter.js')
|
||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
||||
const { AbstractXapi } = require('./_AbstractXapi.js')
|
||||
|
||||
const { debug } = createLogger('xo:backups:FullXapiVmBackup')
|
||||
|
||||
exports.FullXapi = class FullXapiVmBackupRunner extends AbstractXapi {
|
||||
_getWriters() {
|
||||
return [FullRemoteWriter, FullXapiWriter]
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
const vm = this._vm
|
||||
|
||||
const settings = this._settings
|
||||
return (
|
||||
settings.unconditionalSnapshot ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
)
|
||||
}
|
||||
_selectBaseVm() {}
|
||||
|
||||
async _copy() {
|
||||
const { compression } = this.job
|
||||
const vm = this._vm
|
||||
const exportedVm = this._exportedVm
|
||||
const stream = this._throttleStream(
|
||||
await this._xapi.VM_export(exportedVm.$ref, {
|
||||
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
|
||||
useSnapshot: false,
|
||||
})
|
||||
)
|
||||
const sizeContainer = watchStreamSize(stream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.run({
|
||||
sizeContainer,
|
||||
stream: forkStreamUnpipe(stream),
|
||||
timestamp,
|
||||
vm,
|
||||
vmSnapshot: exportedVm,
|
||||
}),
|
||||
'writer.run()'
|
||||
)
|
||||
|
||||
const { size } = sizeContainer
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
'use strict'
|
||||
const assert = require('node:assert')
|
||||
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { mapValues } = require('lodash')
|
||||
const { Task } = require('../../Task')
|
||||
const { AbstractRemote } = require('./_AbstractRemote')
|
||||
const { IncrementalRemoteWriter } = require('../_writers/IncrementalRemoteWriter')
|
||||
const { forkDeltaExport } = require('./_forkDeltaExport')
|
||||
const isVhdDifferencingDisk = require('vhd-lib/isVhdDifferencingDisk')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
class IncrementalRemoteVmBackupRunner extends AbstractRemote {
|
||||
_getRemoteWriter() {
|
||||
return IncrementalRemoteWriter
|
||||
}
|
||||
async _run($defer) {
|
||||
const transferList = await this._computeTransferList(({ mode }) => mode === 'delta')
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
if (transferList.length > 0) {
|
||||
for (const metadata of transferList) {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isBase: metadata.isBase }), 'writer.prepare()')
|
||||
const incrementalExport = await this._sourceRemoteAdapter.readIncrementalVmBackup(metadata, undefined, {
|
||||
useChain: false,
|
||||
})
|
||||
|
||||
const differentialVhds = {}
|
||||
|
||||
await asyncEach(Object.entries(incrementalExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
|
||||
incrementalExport.streams = mapValues(incrementalExport.streams, this._throttleStream)
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(incrementalExport),
|
||||
differentialVhds,
|
||||
timestamp: metadata.timestamp,
|
||||
vm: metadata.vm,
|
||||
vmSnapshot: metadata.vmSnapshot,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
// for healthcheck
|
||||
this._tags = metadata.vm.tags
|
||||
}
|
||||
} else {
|
||||
Task.info('No new data to upload for this VM')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exports.IncrementalRemote = IncrementalRemoteVmBackupRunner
|
||||
decorateMethodsWith(IncrementalRemoteVmBackupRunner, {
|
||||
_run: defer,
|
||||
})
|
||||
175
@xen-orchestra/backups/_runners/_vmRunners/IncrementalXapi.js
Normal file
175
@xen-orchestra/backups/_runners/_vmRunners/IncrementalXapi.js
Normal file
@@ -0,0 +1,175 @@
|
||||
'use strict'
|
||||
|
||||
const findLast = require('lodash/findLast.js')
|
||||
const keyBy = require('lodash/keyBy.js')
|
||||
const mapValues = require('lodash/mapValues.js')
|
||||
const vhdStreamValidator = require('vhd-lib/vhdStreamValidator.js')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { pipeline } = require('node:stream')
|
||||
|
||||
const { IncrementalRemoteWriter } = require('../_writers/IncrementalRemoteWriter.js')
|
||||
const { IncrementalXapiWriter } = require('../_writers/IncrementalXapiWriter.js')
|
||||
const { exportIncrementalVm } = require('../../_incrementalVm.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
const { watchStreamSize } = require('../../_watchStreamSize.js')
|
||||
const { AbstractXapi } = require('./_AbstractXapi.js')
|
||||
const { forkDeltaExport } = require('./_forkDeltaExport.js')
|
||||
const isVhdDifferencingDisk = require('vhd-lib/isVhdDifferencingDisk')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const { debug } = createLogger('xo:backups:IncrementalXapiVmBackup')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
exports.IncrementalXapi = class IncrementalXapiVmBackupRunner extends AbstractXapi {
|
||||
_getWriters() {
|
||||
return [IncrementalRemoteWriter, IncrementalXapiWriter]
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
return true
|
||||
}
|
||||
|
||||
async _copy() {
|
||||
const baseVm = this._baseVm
|
||||
const vm = this._vm
|
||||
const exportedVm = this._exportedVm
|
||||
const fullVdisRequired = this._fullVdisRequired
|
||||
|
||||
const isFull = fullVdisRequired === undefined || fullVdisRequired.size !== 0
|
||||
|
||||
await this._callWriters(writer => writer.prepare({ isFull }), 'writer.prepare()')
|
||||
|
||||
const deltaExport = await exportIncrementalVm(exportedVm, baseVm, {
|
||||
fullVdisRequired,
|
||||
})
|
||||
// since NBD is network based, if one disk use nbd , all the disk use them
|
||||
// except the suspended VDI
|
||||
if (Object.values(deltaExport.streams).some(({ _nbd }) => _nbd)) {
|
||||
Task.info('Transfer data using NBD')
|
||||
}
|
||||
|
||||
const differentialVhds = {}
|
||||
// since isVhdDifferencingDisk is reading and unshifting data in stream
|
||||
// it should be done BEFORE any other stream transform
|
||||
await asyncEach(Object.entries(deltaExport.streams), async ([key, stream]) => {
|
||||
differentialVhds[key] = await isVhdDifferencingDisk(stream)
|
||||
})
|
||||
const sizeContainers = mapValues(deltaExport.streams, stream => watchStreamSize(stream))
|
||||
|
||||
if (this._settings.validateVhdStreams) {
|
||||
deltaExport.streams = mapValues(deltaExport.streams, stream => pipeline(stream, vhdStreamValidator, noop))
|
||||
}
|
||||
deltaExport.streams = mapValues(deltaExport.streams, this._throttleStream)
|
||||
|
||||
const timestamp = Date.now()
|
||||
|
||||
await this._callWriters(
|
||||
writer =>
|
||||
writer.transfer({
|
||||
deltaExport: forkDeltaExport(deltaExport),
|
||||
differentialVhds,
|
||||
sizeContainers,
|
||||
timestamp,
|
||||
vm,
|
||||
vmSnapshot: exportedVm,
|
||||
}),
|
||||
'writer.transfer()'
|
||||
)
|
||||
|
||||
this._baseVm = exportedVm
|
||||
|
||||
if (baseVm !== undefined) {
|
||||
await exportedVm.update_other_config(
|
||||
'xo:backup:deltaChainLength',
|
||||
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
|
||||
)
|
||||
}
|
||||
|
||||
// not the case if offlineBackup
|
||||
if (exportedVm.is_a_snapshot) {
|
||||
await exportedVm.update_other_config('xo:backup:exported', 'true')
|
||||
}
|
||||
|
||||
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
|
||||
const end = Date.now()
|
||||
const duration = end - timestamp
|
||||
debug('transfer complete', {
|
||||
duration,
|
||||
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
|
||||
size,
|
||||
})
|
||||
|
||||
await this._callWriters(writer => writer.cleanup(), 'writer.cleanup()')
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
const xapi = this._xapi
|
||||
|
||||
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
|
||||
if (baseVm === undefined) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullInterval = this._settings.fullInterval
|
||||
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
|
||||
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
|
||||
debug('not using base VM becaust fullInterval reached')
|
||||
return
|
||||
}
|
||||
|
||||
const srcVdis = keyBy(await xapi.getRecords('VDI', await this._vm.$getDisks()), '$ref')
|
||||
|
||||
// resolve full record
|
||||
baseVm = await xapi.getRecord('VM', baseVm.$ref)
|
||||
|
||||
const baseUuidToSrcVdi = new Map()
|
||||
await asyncMap(await baseVm.$getDisks(), async baseRef => {
|
||||
const [baseUuid, snapshotOf] = await Promise.all([
|
||||
xapi.getField('VDI', baseRef, 'uuid'),
|
||||
xapi.getField('VDI', baseRef, 'snapshot_of'),
|
||||
])
|
||||
const srcVdi = srcVdis[snapshotOf]
|
||||
if (srcVdi !== undefined) {
|
||||
baseUuidToSrcVdi.set(baseUuid, srcVdi)
|
||||
} else {
|
||||
debug('ignore snapshot VDI because no longer present on VM', {
|
||||
vdi: baseUuid,
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
const presentBaseVdis = new Map(baseUuidToSrcVdi)
|
||||
await this._callWriters(
|
||||
writer => presentBaseVdis.size !== 0 && writer.checkBaseVdis(presentBaseVdis, baseVm),
|
||||
'writer.checkBaseVdis()',
|
||||
false
|
||||
)
|
||||
|
||||
if (presentBaseVdis.size === 0) {
|
||||
debug('no base VM found')
|
||||
return
|
||||
}
|
||||
|
||||
const fullVdisRequired = new Set()
|
||||
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
|
||||
if (presentBaseVdis.has(baseUuid)) {
|
||||
debug('found base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
} else {
|
||||
debug('missing base VDI', {
|
||||
base: baseUuid,
|
||||
vdi: srcVdi.uuid,
|
||||
})
|
||||
fullVdisRequired.add(srcVdi.uuid)
|
||||
}
|
||||
})
|
||||
|
||||
this._baseVm = baseVm
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
}
|
||||
95
@xen-orchestra/backups/_runners/_vmRunners/_Abstract.js
Normal file
95
@xen-orchestra/backups/_runners/_vmRunners/_Abstract.js
Normal file
@@ -0,0 +1,95 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { Task } = require('../../Task.js')
|
||||
|
||||
const { debug, warn } = createLogger('xo:backups:AbstractVmRunner')
|
||||
|
||||
class AggregateError extends Error {
|
||||
constructor(errors, message) {
|
||||
super(message)
|
||||
this.errors = errors
|
||||
}
|
||||
}
|
||||
|
||||
const asyncEach = async (iterable, fn, thisArg = iterable) => {
|
||||
for (const item of iterable) {
|
||||
await fn.call(thisArg, item)
|
||||
}
|
||||
}
|
||||
|
||||
exports.Abstract = class AbstractVmBackupRunner {
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
async _healthCheck() {
|
||||
const settings = this._settings
|
||||
|
||||
if (this._healthCheckSr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const tags = this._tags
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
// create a task to have an info in the logs and reports
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
() => {
|
||||
Task.info(`This VM doesn't match the health check's tags for this schedule`)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(), 'writer.healthCheck()')
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,97 @@
|
||||
'use strict'
|
||||
const { Abstract } = require('./_Abstract')
|
||||
|
||||
const { getVmBackupDir } = require('../../_getVmBackupDir')
|
||||
const { asyncEach } = require('@vates/async-each')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
|
||||
exports.AbstractRemote = class AbstractRemoteVmBackupRunner extends Abstract {
|
||||
constructor({
|
||||
config,
|
||||
job,
|
||||
healthCheckSr,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings,
|
||||
sourceRemoteAdapter,
|
||||
throttleStream,
|
||||
vmUuid,
|
||||
}) {
|
||||
super()
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._sourceRemoteAdapter = sourceRemoteAdapter
|
||||
this._throttleStream = throttleStream
|
||||
this._vmUuid = vmUuid
|
||||
|
||||
const allSettings = job.settings
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const RemoteWriter = this._getRemoteWriter()
|
||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
writers.add(
|
||||
new RemoteWriter({
|
||||
adapter,
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid,
|
||||
remoteId,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
async _computeTransferList(predicate) {
|
||||
const vmBackups = await this._sourceRemoteAdapter.listVmBackups(this._vmUuid, predicate)
|
||||
const localMetada = new Map()
|
||||
Object.values(vmBackups).forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
localMetada.set(timestamp, metadata)
|
||||
})
|
||||
const nbRemotes = Object.keys(this.remoteAdapters).length
|
||||
const remoteMetadatas = {}
|
||||
await asyncEach(Object.values(this.remoteAdapters), async remoteAdapter => {
|
||||
const remoteMetadata = await remoteAdapter.listVmBackups(this._vmUuid, predicate)
|
||||
remoteMetadata.forEach(metadata => {
|
||||
const timestamp = metadata.timestamp
|
||||
remoteMetadatas[timestamp] = (remoteMetadatas[timestamp] ?? 0) + 1
|
||||
})
|
||||
})
|
||||
|
||||
let chain = []
|
||||
const timestamps = [...localMetada.keys()]
|
||||
timestamps.sort()
|
||||
for (const timestamp of timestamps) {
|
||||
if (remoteMetadatas[timestamp] !== nbRemotes) {
|
||||
// this backup is not present in all the remote
|
||||
// should be retransfered if not found later
|
||||
chain.push(localMetada.get(timestamp))
|
||||
} else {
|
||||
// backup is present in local and remote : the chain has already been transferred
|
||||
chain = []
|
||||
}
|
||||
}
|
||||
return chain
|
||||
}
|
||||
|
||||
async run() {
|
||||
const handler = this._sourceRemoteAdapter._handler
|
||||
await Disposable.use(await handler.lock(getVmBackupDir(this._vmUuid)), async () => {
|
||||
await this._run()
|
||||
await this._healthCheck()
|
||||
})
|
||||
}
|
||||
}
|
||||
278
@xen-orchestra/backups/_runners/_vmRunners/_AbstractXapi.js
Normal file
278
@xen-orchestra/backups/_runners/_vmRunners/_AbstractXapi.js
Normal file
@@ -0,0 +1,278 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
const { Abstract } = require('./_Abstract.js')
|
||||
|
||||
class AbstractXapiVmBackupRunner extends Abstract {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
throttleStream,
|
||||
vm,
|
||||
}) {
|
||||
super()
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
// don't match replicated VMs created by this very job otherwise they
|
||||
// will be replicated again and again
|
||||
throw new Error('cannot backup a VM created by this very job')
|
||||
}
|
||||
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
// VM currently backed up
|
||||
const tags = (this._tags = vm.tags)
|
||||
|
||||
// VM (snapshot) that is really exported
|
||||
this._exportedVm = undefined
|
||||
this._vm = vm
|
||||
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isIncremental = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._throttleStream = throttleStream
|
||||
this._xapi = vm.$xapi
|
||||
|
||||
// Base VM for the export
|
||||
this._baseVm = undefined
|
||||
|
||||
// Settings for this specific run (job, schedule, VM)
|
||||
if (tags.includes('xo-memory-backup')) {
|
||||
settings.checkpointSnapshot = true
|
||||
}
|
||||
if (tags.includes('xo-offline-backup')) {
|
||||
settings.offlineSnapshot = true
|
||||
}
|
||||
this._settings = settings
|
||||
// Create writers
|
||||
{
|
||||
const writers = new Set()
|
||||
this._writers = writers
|
||||
|
||||
const [BackupWriter, ReplicationWriter] = this._getWriters()
|
||||
|
||||
const allSettings = job.settings
|
||||
Object.entries(remoteAdapters).forEach(([remoteId, adapter]) => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[remoteId],
|
||||
}
|
||||
if (targetSettings.exportRetention !== 0) {
|
||||
writers.add(
|
||||
new BackupWriter({
|
||||
adapter,
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
remoteId,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
srs.forEach(sr => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
...allSettings[sr.uuid],
|
||||
}
|
||||
if (targetSettings.copyRetention !== 0) {
|
||||
writers.add(
|
||||
new ReplicationWriter({
|
||||
config,
|
||||
healthCheckSr,
|
||||
job,
|
||||
scheduleId: schedule.id,
|
||||
vmUuid: vm.uuid,
|
||||
sr,
|
||||
settings: targetSettings,
|
||||
})
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the VM itself does not have any backup metadata which would be
|
||||
// copied on manual snapshots and interfere with the backup jobs
|
||||
async _cleanMetadata() {
|
||||
const vm = this._vm
|
||||
if ('xo:backup:job' in vm.other_config) {
|
||||
await vm.update_other_config({
|
||||
'xo:backup:datetime': null,
|
||||
'xo:backup:deltaChainLength': null,
|
||||
'xo:backup:exported': null,
|
||||
'xo:backup:job': null,
|
||||
'xo:backup:schedule': null,
|
||||
'xo:backup:vm': null,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async _snapshot() {
|
||||
const vm = this._vm
|
||||
const xapi = this._xapi
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
if (this._mustDoSnapshot()) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
await vm.$assertHealthyVdiChains()
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
ignoreNobakVdis: true,
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
unplugVusbs: true,
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
|
||||
'xo:backup:datetime': formatDateTime(this.timestamp),
|
||||
'xo:backup:job': this._jobId,
|
||||
'xo:backup:schedule': this.scheduleId,
|
||||
'xo:backup:vm': vm.uuid,
|
||||
})
|
||||
|
||||
this._exportedVm = await xapi.getRecord('VM', snapshotRef)
|
||||
|
||||
return this._exportedVm.uuid
|
||||
})
|
||||
} else {
|
||||
this._exportedVm = vm
|
||||
this.timestamp = Date.now()
|
||||
}
|
||||
}
|
||||
|
||||
async _fetchJobSnapshots() {
|
||||
const jobId = this._jobId
|
||||
const vmRef = this._vm.$ref
|
||||
const xapi = this._xapi
|
||||
|
||||
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
|
||||
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
|
||||
|
||||
const snapshots = []
|
||||
snapshotsOtherConfig.forEach((other_config, i) => {
|
||||
if (other_config['xo:backup:job'] === jobId) {
|
||||
snapshots.push({ other_config, $ref: snapshotsRef[i] })
|
||||
}
|
||||
})
|
||||
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
|
||||
this._jobSnapshots = snapshots
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this._vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
return xapi.VM_destroy($ref)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
async copy() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
_getWriters() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async _selectBaseVm() {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
!settings.offlineBackup || settings.snapshotRetention === 0,
|
||||
'offlineBackup is not compatible with snapshotRetention'
|
||||
)
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
|
||||
await this._selectBaseVm()
|
||||
|
||||
await this._cleanMetadata()
|
||||
await this._removeUnusedSnapshots()
|
||||
|
||||
const vm = this._vm
|
||||
const isRunning = vm.power_state === 'Running'
|
||||
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
|
||||
if (startAfter) {
|
||||
await vm.$callAsync('clean_shutdown')
|
||||
}
|
||||
|
||||
try {
|
||||
await this._snapshot()
|
||||
if (startAfter === 'snapshot') {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
if (this._writers.size !== 0) {
|
||||
await this._copy()
|
||||
}
|
||||
} finally {
|
||||
if (startAfter) {
|
||||
ignoreErrors.call(vm.$callAsync('start', false, false))
|
||||
}
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.AbstractXapi = AbstractXapiVmBackupRunner
|
||||
|
||||
decorateMethodsWith(AbstractXapiVmBackupRunner, {
|
||||
run: defer,
|
||||
})
|
||||
@@ -0,0 +1,12 @@
|
||||
'use strict'
|
||||
|
||||
const { mapValues } = require('lodash')
|
||||
const { forkStreamUnpipe } = require('../_forkStreamUnpipe')
|
||||
|
||||
exports.forkDeltaExport = function forkDeltaExport(deltaExport) {
|
||||
return Object.create(deltaExport, {
|
||||
streams: {
|
||||
value: mapValues(deltaExport.streams, forkStreamUnpipe),
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
'use strict'
|
||||
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
|
||||
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
|
||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
|
||||
exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(AbstractFullWriter) {
|
||||
exports.FullRemoteWriter = class FullRemoteWriter extends MixinRemoteWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
@@ -26,15 +26,17 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
|
||||
)
|
||||
}
|
||||
|
||||
async _run({ timestamp, sizeContainer, stream }) {
|
||||
const backup = this._backup
|
||||
async _run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
|
||||
const settings = this._settings
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
const adapter = this._adapter
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
let metadata = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadata !== undefined) {
|
||||
// @todo : should skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
}
|
||||
|
||||
const oldBackups = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
@@ -47,14 +49,14 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
|
||||
const dataBasename = basename + '.xva'
|
||||
const dataFilename = this._vmBackupDir + '/' + dataBasename
|
||||
|
||||
const metadata = {
|
||||
metadata = {
|
||||
jobId: job.id,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
timestamp,
|
||||
version: '2.0.0',
|
||||
vm,
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
vmSnapshot,
|
||||
xva: './' + dataBasename,
|
||||
}
|
||||
|
||||
@@ -4,15 +4,15 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
|
||||
const { AbstractFullWriter } = require('./_AbstractFullWriter.js')
|
||||
const { MixinReplicationWriter } = require('./_MixinReplicationWriter.js')
|
||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplicationWriter(AbstractFullWriter) {
|
||||
exports.FullXapiWriter = class FullXapiWriter extends MixinXapiWriter(AbstractFullWriter) {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
|
||||
@@ -21,6 +21,7 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
|
||||
name: 'export',
|
||||
data: {
|
||||
id: props.sr.uuid,
|
||||
name_label: this._sr.name_label,
|
||||
type: 'SR',
|
||||
|
||||
// necessary?
|
||||
@@ -31,10 +32,11 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
|
||||
)
|
||||
}
|
||||
|
||||
async _run({ timestamp, sizeContainer, stream }) {
|
||||
async _run({ timestamp, sizeContainer, stream, vm }) {
|
||||
const sr = this._sr
|
||||
const settings = this._settings
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
const job = this._job
|
||||
const scheduleId = this.scheduleId
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
@@ -46,7 +48,7 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
|
||||
const oldVms = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
|
||||
const deleteOldBackups = () => asyncMapSettled(oldVms, vm => xapi.VM_destroy(vm.$ref))
|
||||
const { deleteFirst } = settings
|
||||
const { deleteFirst, _warmMigration } = settings
|
||||
if (deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
}
|
||||
@@ -55,14 +57,18 @@ exports.FullReplicationWriter = class FullReplicationWriter extends MixinReplica
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
targetVmRef = await xapi.VM_import(stream, sr.$ref, vm =>
|
||||
Promise.all([
|
||||
vm.add_tags('Disaster Recovery'),
|
||||
vm.ha_restart_priority !== '' && Promise.all([vm.set_ha_restart_priority(''), vm.add_tags('HA disabled')]),
|
||||
!_warmMigration && vm.add_tags('Disaster Recovery'),
|
||||
// warm migration does not disable HA , since the goal is to start the new VM in production
|
||||
!_warmMigration &&
|
||||
vm.ha_restart_priority !== '' &&
|
||||
Promise.all([vm.set_ha_restart_priority(''), vm.add_tags('HA disabled')]),
|
||||
vm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
|
||||
])
|
||||
)
|
||||
return { size: sizeContainer.size }
|
||||
})
|
||||
|
||||
this._targetVmRef = targetVmRef
|
||||
const targetVm = await xapi.getRecord('VM', targetVmRef)
|
||||
|
||||
await Promise.all([
|
||||
@@ -7,28 +7,28 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
const { dirname } = require('path')
|
||||
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
|
||||
const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
|
||||
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
|
||||
const { MixinRemoteWriter } = require('./_MixinRemoteWriter.js')
|
||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
||||
const { checkVhd } = require('./_checkVhd.js')
|
||||
const { packUuid } = require('./_packUuid.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const NbdClient = require('@vates/nbd-client')
|
||||
|
||||
const { debug, warn, info } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
|
||||
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
|
||||
class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi) {
|
||||
const { handler } = this._adapter
|
||||
const backup = this._backup
|
||||
const adapter = this._adapter
|
||||
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${backup.job.id}`
|
||||
const vdisDir = `${this._vmBackupDir}/vdis/${this._job.id}`
|
||||
|
||||
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
|
||||
let found = false
|
||||
@@ -90,11 +90,12 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
async _prepare() {
|
||||
const adapter = this._adapter
|
||||
const settings = this._settings
|
||||
const { scheduleId, vm } = this._backup
|
||||
const scheduleId = this._scheduleId
|
||||
const vmUuid = this._vmUuid
|
||||
|
||||
const oldEntries = getOldEntries(
|
||||
settings.exportRetention - 1,
|
||||
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
await adapter.listVmBackups(vmUuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
|
||||
)
|
||||
this._oldEntries = oldEntries
|
||||
|
||||
@@ -133,16 +134,19 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
}
|
||||
}
|
||||
|
||||
async _transfer({ timestamp, deltaExport }) {
|
||||
async _transfer($defer, { differentialVhds, timestamp, deltaExport, vm, vmSnapshot }) {
|
||||
const adapter = this._adapter
|
||||
const backup = this._backup
|
||||
|
||||
const { job, scheduleId, vm } = backup
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
const jobId = job.id
|
||||
const handler = adapter.handler
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
let metadataContent = await this._isAlreadyTransferred(timestamp)
|
||||
if (metadataContent !== undefined) {
|
||||
// @todo : should skip backup while being vigilant to not stuck the forked stream
|
||||
Task.info('This backup has already been transfered')
|
||||
}
|
||||
|
||||
const basename = formatFilenameDate(timestamp)
|
||||
const vhds = mapValues(
|
||||
@@ -157,7 +161,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
|
||||
const metadataContent = {
|
||||
metadataContent = {
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
scheduleId,
|
||||
@@ -168,16 +172,15 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
vifs: deltaExport.vifs,
|
||||
vhds,
|
||||
vm,
|
||||
vmSnapshot: this._backup.exportedVm,
|
||||
vmSnapshot,
|
||||
}
|
||||
|
||||
const { size } = await Task.run({ name: 'transfer' }, async () => {
|
||||
let transferSize = 0
|
||||
await Promise.all(
|
||||
map(deltaExport.vdis, async (vdi, id) => {
|
||||
const path = `${this._vmBackupDir}/${vhds[id]}`
|
||||
|
||||
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
|
||||
const isDelta = differentialVhds[`${id}.vhd`]
|
||||
let parentPath
|
||||
if (isDelta) {
|
||||
const vdiDir = dirname(path)
|
||||
@@ -190,7 +193,11 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
.sort()
|
||||
.pop()
|
||||
|
||||
assert.notStrictEqual(parentPath, undefined, `missing parent of ${id}`)
|
||||
assert.notStrictEqual(
|
||||
parentPath,
|
||||
undefined,
|
||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config['xo:base_delta']}`
|
||||
)
|
||||
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
@@ -198,34 +205,13 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
await checkVhd(handler, parentPath)
|
||||
}
|
||||
|
||||
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
|
||||
|
||||
let nbdClient
|
||||
if (this._backup.config.useNbd) {
|
||||
debug('useNbd is enabled', { vdi: id, path })
|
||||
// get nbd if possible
|
||||
try {
|
||||
// this will always take the first host in the list
|
||||
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
|
||||
debug('got NBD info', { nbdInfo, vdi: id, path })
|
||||
nbdClient = new NbdClient(nbdInfo)
|
||||
await nbdClient.connect()
|
||||
info('NBD client ready', { vdi: id, path })
|
||||
} catch (error) {
|
||||
nbdClient = undefined
|
||||
warn('error connecting to NBD server', { error, vdi: id, path })
|
||||
}
|
||||
} else {
|
||||
debug('useNbd is disabled', { vdi: id, path })
|
||||
}
|
||||
|
||||
transferSize += await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
|
||||
// no checksum for VHDs, because they will be invalidated by
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
|
||||
nbdClient,
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
isDelta,
|
||||
})
|
||||
|
||||
if (isDelta) {
|
||||
@@ -248,3 +234,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
}
|
||||
exports.IncrementalRemoteWriter = decorateClass(IncrementalRemoteWriter, {
|
||||
_transfer: defer,
|
||||
})
|
||||
@@ -4,19 +4,19 @@ const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { formatDateTime } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getOldEntries } = require('../_getOldEntries.js')
|
||||
const { importDeltaVm, TAG_COPY_SRC } = require('../_deltaVm.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getOldEntries } = require('../../_getOldEntries.js')
|
||||
const { importIncrementalVm, TAG_COPY_SRC } = require('../../_incrementalVm.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
|
||||
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
|
||||
const { MixinReplicationWriter } = require('./_MixinReplicationWriter.js')
|
||||
const { AbstractIncrementalWriter } = require('./_AbstractIncrementalWriter.js')
|
||||
const { MixinXapiWriter } = require('./_MixinXapiWriter.js')
|
||||
const { listReplicatedVms } = require('./_listReplicatedVms.js')
|
||||
|
||||
exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinReplicationWriter(AbstractDeltaWriter) {
|
||||
exports.IncrementalXapiWriter = class IncrementalXapiWriter extends MixinXapiWriter(AbstractIncrementalWriter) {
|
||||
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
const sr = this._sr
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._backup.job.id, sr.uuid, this._backup.vm.uuid).find(
|
||||
const replicatedVm = listReplicatedVms(sr.$xapi, this._job.id, sr.uuid, this._vmUuid).find(
|
||||
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
|
||||
)
|
||||
if (replicatedVm === undefined) {
|
||||
@@ -45,11 +45,14 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
|
||||
data: {
|
||||
id: this._sr.uuid,
|
||||
isFull,
|
||||
name_label: this._sr.name_label,
|
||||
type: 'SR',
|
||||
},
|
||||
})
|
||||
const hasHealthCheckSr = this._healthCheckSr !== undefined
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup, true)
|
||||
this.cleanup = task.wrapFn(this.cleanup, !hasHealthCheckSr)
|
||||
this.healthCheck = task.wrapFn(this.healthCheck, hasHealthCheckSr)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
@@ -57,12 +60,13 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
|
||||
async _prepare() {
|
||||
const settings = this._settings
|
||||
const { uuid: srUuid, $xapi: xapi } = this._sr
|
||||
const { scheduleId, vm } = this._backup
|
||||
const vmUuid = this._vmUuid
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
// delete previous interrupted copies
|
||||
ignoreErrors.call(asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => vm.$destroy))
|
||||
ignoreErrors.call(asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vmUuid), vm => vm.$destroy))
|
||||
|
||||
this._oldEntries = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
|
||||
this._oldEntries = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vmUuid))
|
||||
|
||||
if (settings.deleteFirst) {
|
||||
await this._deleteOldEntries()
|
||||
@@ -79,20 +83,22 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
|
||||
return asyncMapSettled(this._oldEntries, vm => vm.$destroy())
|
||||
}
|
||||
|
||||
async _transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
async _transfer({ timestamp, deltaExport, sizeContainers, vm }) {
|
||||
const { _warmMigration } = this._settings
|
||||
const sr = this._sr
|
||||
const { job, scheduleId, vm } = this._backup
|
||||
const job = this._job
|
||||
const scheduleId = this._scheduleId
|
||||
|
||||
const { uuid: srUuid, $xapi: xapi } = sr
|
||||
|
||||
let targetVmRef
|
||||
await Task.run({ name: 'transfer' }, async () => {
|
||||
targetVmRef = await importDeltaVm(
|
||||
targetVmRef = await importIncrementalVm(
|
||||
{
|
||||
__proto__: deltaExport,
|
||||
vm: {
|
||||
...deltaExport.vm,
|
||||
tags: [...deltaExport.vm.tags, 'Continuous Replication'],
|
||||
tags: _warmMigration ? deltaExport.vm.tags : [...deltaExport.vm.tags, 'Continuous Replication'],
|
||||
},
|
||||
},
|
||||
sr
|
||||
@@ -101,11 +107,13 @@ exports.DeltaReplicationWriter = class DeltaReplicationWriter extends MixinRepli
|
||||
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
|
||||
}
|
||||
})
|
||||
|
||||
this._targetVmRef = targetVmRef
|
||||
const targetVm = await xapi.getRecord('VM', targetVmRef)
|
||||
|
||||
await Promise.all([
|
||||
targetVm.ha_restart_priority !== '' &&
|
||||
// warm migration does not disable HA , since the goal is to start the new VM in production
|
||||
!_warmMigration &&
|
||||
targetVm.ha_restart_priority !== '' &&
|
||||
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
|
||||
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
|
||||
asyncMap(['start', 'start_on'], op =>
|
||||
@@ -3,9 +3,9 @@
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractFullWriter = class AbstractFullWriter extends AbstractWriter {
|
||||
async run({ timestamp, sizeContainer, stream }) {
|
||||
async run({ timestamp, sizeContainer, stream, vm, vmSnapshot }) {
|
||||
try {
|
||||
return await this._run({ timestamp, sizeContainer, stream })
|
||||
return await this._run({ timestamp, sizeContainer, stream, vm, vmSnapshot })
|
||||
} finally {
|
||||
// ensure stream is properly closed
|
||||
stream.destroy()
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
const { AbstractWriter } = require('./_AbstractWriter.js')
|
||||
|
||||
exports.AbstractDeltaWriter = class AbstractDeltaWriter extends AbstractWriter {
|
||||
exports.AbstractIncrementalWriter = class AbstractIncrementalWriter extends AbstractWriter {
|
||||
checkBaseVdis(baseUuidToSrcVdi, baseVm) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
@@ -15,9 +15,9 @@ exports.AbstractDeltaWriter = class AbstractDeltaWriter extends AbstractWriter {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
async transfer({ timestamp, deltaExport, sizeContainers }) {
|
||||
async transfer({ deltaExport, ...other }) {
|
||||
try {
|
||||
return await this._transfer({ timestamp, deltaExport, sizeContainers })
|
||||
return await this._transfer({ deltaExport, ...other })
|
||||
} finally {
|
||||
// ensure all streams are properly closed
|
||||
for (const stream of Object.values(deltaExport.streams)) {
|
||||
31
@xen-orchestra/backups/_runners/_writers/_AbstractWriter.js
Normal file
31
@xen-orchestra/backups/_runners/_writers/_AbstractWriter.js
Normal file
@@ -0,0 +1,31 @@
|
||||
'use strict'
|
||||
|
||||
const { formatFilenameDate } = require('../../_filenameDate')
|
||||
const { getVmBackupDir } = require('../../_getVmBackupDir')
|
||||
|
||||
exports.AbstractWriter = class AbstractWriter {
|
||||
constructor({ config, healthCheckSr, job, vmUuid, scheduleId, settings }) {
|
||||
this._config = config
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._job = job
|
||||
this._scheduleId = scheduleId
|
||||
this._settings = settings
|
||||
this._vmUuid = vmUuid
|
||||
}
|
||||
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
|
||||
healthCheck(sr) {}
|
||||
|
||||
_isAlreadyTransferred(timestamp) {
|
||||
const vmUuid = this._vmUuid
|
||||
const adapter = this._adapter
|
||||
const backupDir = getVmBackupDir(vmUuid)
|
||||
try {
|
||||
const actualMetadata = JSON.parse(adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`))
|
||||
return actualMetadata
|
||||
} catch (error) {}
|
||||
}
|
||||
}
|
||||
@@ -4,26 +4,26 @@ const { createLogger } = require('@xen-orchestra/log')
|
||||
const { join } = require('path')
|
||||
|
||||
const assert = require('assert')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
|
||||
const { ImportVmBackup } = require('../ImportVmBackup.js')
|
||||
const { Task } = require('../Task.js')
|
||||
const MergeWorker = require('../merge-worker/index.js')
|
||||
const { formatFilenameDate } = require('../../_filenameDate.js')
|
||||
const { getVmBackupDir } = require('../../_getVmBackupDir.js')
|
||||
const { HealthCheckVmBackup } = require('../../HealthCheckVmBackup.js')
|
||||
const { ImportVmBackup } = require('../../ImportVmBackup.js')
|
||||
const { Task } = require('../../Task.js')
|
||||
const MergeWorker = require('../../merge-worker/index.js')
|
||||
|
||||
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
|
||||
exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
class MixinBackupWriter extends BaseClass {
|
||||
exports.MixinRemoteWriter = (BaseClass = Object) =>
|
||||
class MixinRemoteWriter extends BaseClass {
|
||||
#lock
|
||||
|
||||
constructor({ remoteId, ...rest }) {
|
||||
constructor({ remoteId, adapter, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._adapter = rest.backup.remoteAdapters[remoteId]
|
||||
this._adapter = adapter
|
||||
this._remoteId = remoteId
|
||||
|
||||
this._vmBackupDir = getVmBackupDir(this._backup.vm.uuid)
|
||||
this._vmBackupDir = getVmBackupDir(rest.vmUuid)
|
||||
}
|
||||
|
||||
async _cleanVm(options) {
|
||||
@@ -38,7 +38,7 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
Task.warning(message, data)
|
||||
},
|
||||
lock: false,
|
||||
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
|
||||
mergeBlockConcurrency: this._config.mergeBlockConcurrency,
|
||||
})
|
||||
})
|
||||
} catch (error) {
|
||||
@@ -55,10 +55,10 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
}
|
||||
|
||||
async afterBackup() {
|
||||
const { disableMergeWorker } = this._backup.config
|
||||
const { disableMergeWorker } = this._config
|
||||
// merge worker only compatible with local remotes
|
||||
const { handler } = this._adapter
|
||||
const willMergeInWorker = !disableMergeWorker && typeof handler._getRealPath === 'function'
|
||||
const willMergeInWorker = !disableMergeWorker && typeof handler.getRealPath === 'function'
|
||||
|
||||
const { merge } = await this._cleanVm({ remove: true, merge: !willMergeInWorker })
|
||||
await this.#lock.dispose()
|
||||
@@ -70,17 +70,19 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
// add a random suffix to avoid collision in case multiple tasks are created at the same second
|
||||
Math.random().toString(36).slice(2)
|
||||
|
||||
await handler.outputFile(taskFile, this._backup.vm.uuid)
|
||||
const remotePath = handler._getRealPath()
|
||||
await handler.outputFile(taskFile, this._vmUuid)
|
||||
const remotePath = handler.getRealPath()
|
||||
await MergeWorker.run(remotePath)
|
||||
}
|
||||
}
|
||||
|
||||
healthCheck(sr) {
|
||||
healthCheck() {
|
||||
const sr = this._healthCheckSr
|
||||
assert.notStrictEqual(sr, undefined, 'SR should be defined before making a health check')
|
||||
assert.notStrictEqual(
|
||||
this._metadataFileName,
|
||||
undefined,
|
||||
'Metadata file name should be defined before making a healthcheck'
|
||||
'Metadata file name should be defined before making a health check'
|
||||
)
|
||||
return Task.run(
|
||||
{
|
||||
@@ -109,4 +111,16 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
_isAlreadyTransferred(timestamp) {
|
||||
const vmUuid = this._vmUuid
|
||||
const adapter = this._adapter
|
||||
const backupDir = getVmBackupDir(vmUuid)
|
||||
try {
|
||||
const actualMetadata = JSON.parse(
|
||||
adapter._handler.readFile(`${backupDir}/${formatFilenameDate(timestamp)}.json`)
|
||||
)
|
||||
return actualMetadata
|
||||
} catch (error) {}
|
||||
}
|
||||
}
|
||||
46
@xen-orchestra/backups/_runners/_writers/_MixinXapiWriter.js
Normal file
46
@xen-orchestra/backups/_runners/_writers/_MixinXapiWriter.js
Normal file
@@ -0,0 +1,46 @@
|
||||
'use strict'
|
||||
|
||||
const { extractOpaqueRef } = require('@xen-orchestra/xapi')
|
||||
|
||||
const { Task } = require('../../Task')
|
||||
const assert = require('node:assert/strict')
|
||||
const { HealthCheckVmBackup } = require('../../HealthCheckVmBackup')
|
||||
|
||||
exports.MixinXapiWriter = (BaseClass = Object) =>
|
||||
class MixinXapiWriter extends BaseClass {
|
||||
constructor({ sr, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._sr = sr
|
||||
}
|
||||
|
||||
healthCheck() {
|
||||
const sr = this._healthCheckSr
|
||||
assert.notStrictEqual(sr, undefined, 'SR should be defined before making a health check')
|
||||
assert.notEqual(this._targetVmRef, undefined, 'A vm should have been transfered to be health checked')
|
||||
// copy VM
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
async () => {
|
||||
const { $xapi: xapi } = sr
|
||||
let clonedVm
|
||||
try {
|
||||
const baseVm = xapi.getObject(this._targetVmRef) ?? (await xapi.waitObject(this._targetVmRef))
|
||||
const clonedRef = await xapi
|
||||
.callAsync('VM.clone', this._targetVmRef, `Health Check - ${baseVm.name_label}`)
|
||||
.then(extractOpaqueRef)
|
||||
clonedVm = xapi.getObject(clonedRef) ?? (await xapi.waitObject(clonedRef))
|
||||
|
||||
await new HealthCheckVmBackup({
|
||||
restoredVm: clonedVm,
|
||||
xapi,
|
||||
}).run()
|
||||
} finally {
|
||||
clonedVm && (await xapi.VM_destroy(clonedVm.$ref))
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -94,13 +94,13 @@ In case any incoherence is detected, the file is deleted so it will be fully gen
|
||||
job.start(data: { mode: Mode, reportWhen: ReportWhen })
|
||||
├─ task.info(message: 'vms', data: { vms: string[] })
|
||||
├─ task.warning(message: string)
|
||||
├─ task.start(data: { type: 'VM', id: string })
|
||||
├─ task.start(data: { type: 'VM', id: string, name_label?: string })
|
||||
│ ├─ task.warning(message: string)
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'snapshot')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
|
||||
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, name_label?: string, isFull: boolean })
|
||||
│ │ ├─ task.warning(message: string)
|
||||
│ │ ├─ task.start(message: 'transfer')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
@@ -228,7 +228,7 @@ Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com
|
||||
- `prepare({ isFull })`
|
||||
- `transfer({ timestamp, deltaExport, sizeContainers })`
|
||||
- `cleanup()`
|
||||
- `healthCheck(sr)`
|
||||
- `healthCheck()` // is not executed if no health check sr or tag doesn't match
|
||||
- **Full**
|
||||
- `run({ timestamp, sizeContainer, stream })`
|
||||
- `afterBackup()`
|
||||
|
||||
35
@xen-orchestra/backups/docs/healthcheck/example.sh
Normal file
35
@xen-orchestra/backups/docs/healthcheck/example.sh
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/bin/sh
|
||||
|
||||
# This script must be executed at the start of the machine.
|
||||
#
|
||||
# It must run as root to be able to use xenstore-read and xenstore-write
|
||||
|
||||
# fail in case of error or undefined variable
|
||||
set -eu
|
||||
|
||||
# stop there if a health check is not in progress
|
||||
if [ "$(xenstore-read vm-data/xo-backup-health-check 2>&1)" != planned ]
|
||||
then
|
||||
exit
|
||||
fi
|
||||
|
||||
# not necessary, but informs XO that this script has started which helps diagnose issues
|
||||
xenstore-write vm-data/xo-backup-health-check running
|
||||
|
||||
# put your test here
|
||||
#
|
||||
# in this example, the command `sqlite3` is used to validate the health of a database
|
||||
# and its output is captured and passed to XO via the XenStore in case of error
|
||||
if output=$(sqlite3 ~/my-database.sqlite3 .table 2>&1)
|
||||
then
|
||||
# inform XO everything is ok
|
||||
xenstore-write vm-data/xo-backup-health-check success
|
||||
else
|
||||
# inform XO there is an issue
|
||||
xenstore-write vm-data/xo-backup-health-check failure
|
||||
|
||||
# more info about the issue can be written to `vm-data/health-check-error`
|
||||
#
|
||||
# it will be shown in XO
|
||||
xenstore-write vm-data/xo-backup-health-check-error "$output"
|
||||
fi
|
||||
@@ -8,31 +8,31 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.29.5",
|
||||
"version": "0.38.2",
|
||||
"engines": {
|
||||
"node": ">=14.6"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "node--test"
|
||||
"test-integration": "node--test *.integ.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@kldzj/stream-throttle": "^1.1.1",
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/cached-dns.lookup": "^1.0.0",
|
||||
"@vates/compose": "^2.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.4",
|
||||
"@vates/fuse-vhd": "^1.0.0",
|
||||
"@vates/nbd-client": "*",
|
||||
"@vates/nbd-client": "^1.2.0",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^3.3.1",
|
||||
"@xen-orchestra/fs": "^4.0.0",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"compare-versions": "^5.0.1",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"decorator-synchronized": "^0.6.0",
|
||||
"end-of-stream": "^1.4.4",
|
||||
"fs-extra": "^11.1.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"limit-concurrency-decorator": "^0.5.0",
|
||||
@@ -42,17 +42,17 @@
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"uuid": "^9.0.0",
|
||||
"vhd-lib": "^4.2.1",
|
||||
"vhd-lib": "^4.5.0",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"rimraf": "^4.1.1",
|
||||
"rimraf": "^5.0.1",
|
||||
"sinon": "^15.0.1",
|
||||
"test": "^3.2.1",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^1.6.1"
|
||||
"@xen-orchestra/xapi": "^2.2.1"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -12,7 +12,7 @@ exports.runBackupWorker = function runBackupWorker(params, onLog) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const worker = fork(PATH)
|
||||
|
||||
worker.on('exit', code => reject(new Error(`worker exited with code ${code}`)))
|
||||
worker.on('exit', (code, signal) => reject(new Error(`worker exited with code ${code} and signal ${signal}`)))
|
||||
worker.on('error', reject)
|
||||
|
||||
worker.on('message', message => {
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
exports.AbstractWriter = class AbstractWriter {
|
||||
constructor({ backup, settings }) {
|
||||
this._backup = backup
|
||||
this._settings = settings
|
||||
}
|
||||
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
|
||||
healthCheck(sr) {}
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
exports.MixinReplicationWriter = (BaseClass = Object) =>
|
||||
class MixinReplicationWriter extends BaseClass {
|
||||
constructor({ sr, ...rest }) {
|
||||
super(rest)
|
||||
|
||||
this._sr = sr
|
||||
}
|
||||
}
|
||||
@@ -18,7 +18,7 @@
|
||||
"preferGlobal": true,
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.5.1",
|
||||
"xen-api": "^1.2.2"
|
||||
"xen-api": "^1.3.1"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "3.3.1",
|
||||
"version": "4.0.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
@@ -20,6 +20,7 @@
|
||||
"node": ">=14.13"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/abort-controller": "^3.272.0",
|
||||
"@aws-sdk/client-s3": "^3.54.0",
|
||||
"@aws-sdk/lib-storage": "^3.54.0",
|
||||
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
|
||||
@@ -28,7 +29,7 @@
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/coalesce-calls": "^0.1.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/read-chunk": "^1.0.1",
|
||||
"@vates/read-chunk": "^1.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.6.0",
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
@@ -50,10 +51,11 @@
|
||||
"@babel/plugin-proposal-decorators": "^7.1.6",
|
||||
"@babel/plugin-proposal-function-bind": "^7.0.0",
|
||||
"@babel/preset-env": "^7.8.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"dotenv": "^16.0.0",
|
||||
"rimraf": "^4.1.1",
|
||||
"rimraf": "^5.0.1",
|
||||
"sinon": "^15.0.4",
|
||||
"test": "^3.3.0",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
@@ -63,7 +65,9 @@
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
"pretest": "yarn run build",
|
||||
"postversion": "npm publish",
|
||||
"test": "node--test ./dist/"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
/* eslint-env jest */
|
||||
import { describe, it } from 'test'
|
||||
import { strict as assert } from 'assert'
|
||||
|
||||
import { Readable } from 'readable-stream'
|
||||
import copyStreamToBuffer from './_copyStreamToBuffer.js'
|
||||
@@ -16,6 +17,6 @@ describe('copyStreamToBuffer', () => {
|
||||
|
||||
await copyStreamToBuffer(stream, buffer)
|
||||
|
||||
expect(buffer.toString()).toBe('hel')
|
||||
assert.equal(buffer.toString(), 'hel')
|
||||
})
|
||||
})
|
||||
@@ -1,4 +1,5 @@
|
||||
/* eslint-env jest */
|
||||
import { describe, it } from 'test'
|
||||
import { strict as assert } from 'assert'
|
||||
|
||||
import { Readable } from 'readable-stream'
|
||||
import createBufferFromStream from './_createBufferFromStream.js'
|
||||
@@ -14,6 +15,6 @@ describe('createBufferFromStream', () => {
|
||||
|
||||
const buffer = await createBufferFromStream(stream)
|
||||
|
||||
expect(buffer.toString()).toBe('hello')
|
||||
assert.equal(buffer.toString(), 'hello')
|
||||
})
|
||||
})
|
||||
@@ -1,4 +1,6 @@
|
||||
/* eslint-env jest */
|
||||
import { describe, it } from 'test'
|
||||
import { strict as assert } from 'assert'
|
||||
|
||||
import { Readable } from 'node:stream'
|
||||
import { _getEncryptor } from './_encryptor'
|
||||
import crypto from 'crypto'
|
||||
@@ -25,13 +27,13 @@ algorithms.forEach(algorithm => {
|
||||
it('handle buffer', () => {
|
||||
const encrypted = encryptor.encryptData(buffer)
|
||||
if (algorithm !== 'none') {
|
||||
expect(encrypted.equals(buffer)).toEqual(false) // encrypted should be different
|
||||
assert.equal(encrypted.equals(buffer), false) // encrypted should be different
|
||||
// ivlength, auth tag, padding
|
||||
expect(encrypted.length).not.toEqual(buffer.length)
|
||||
assert.notEqual(encrypted.length, buffer.length)
|
||||
}
|
||||
|
||||
const decrypted = encryptor.decryptData(encrypted)
|
||||
expect(decrypted.equals(buffer)).toEqual(true)
|
||||
assert.equal(decrypted.equals(buffer), true)
|
||||
})
|
||||
|
||||
it('handle stream', async () => {
|
||||
@@ -39,12 +41,12 @@ algorithms.forEach(algorithm => {
|
||||
stream.length = buffer.length
|
||||
const encrypted = encryptor.encryptStream(stream)
|
||||
if (algorithm !== 'none') {
|
||||
expect(encrypted.length).toEqual(undefined)
|
||||
assert.equal(encrypted.length, undefined)
|
||||
}
|
||||
|
||||
const decrypted = encryptor.decryptStream(encrypted)
|
||||
const decryptedBuffer = await streamToBuffer(decrypted)
|
||||
expect(decryptedBuffer.equals(buffer)).toEqual(true)
|
||||
assert.equal(decryptedBuffer.equals(buffer), true)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,4 +1,5 @@
|
||||
/* eslint-env jest */
|
||||
import { describe, it } from 'test'
|
||||
import { strict as assert } from 'assert'
|
||||
|
||||
import guessAwsRegion from './_guessAwsRegion.js'
|
||||
|
||||
@@ -6,12 +7,12 @@ describe('guessAwsRegion', () => {
|
||||
it('should return region from AWS URL', async () => {
|
||||
const region = guessAwsRegion('s3.test-region.amazonaws.com')
|
||||
|
||||
expect(region).toBe('test-region')
|
||||
assert.equal(region, 'test-region')
|
||||
})
|
||||
|
||||
it('should return default region if none is found is AWS URL', async () => {
|
||||
const region = guessAwsRegion('s3.amazonaws.com')
|
||||
|
||||
expect(region).toBe('us-east-1')
|
||||
assert.equal(region, 'us-east-1')
|
||||
})
|
||||
})
|
||||
@@ -9,28 +9,32 @@ import LocalHandler from './local'
|
||||
const sudoExeca = (command, args, opts) => execa('sudo', [command, ...args], opts)
|
||||
|
||||
export default class MountHandler extends LocalHandler {
|
||||
#execa
|
||||
#keeper
|
||||
#params
|
||||
#realPath
|
||||
|
||||
constructor(remote, { mountsDir = join(tmpdir(), 'xo-fs-mounts'), useSudo = false, ...opts } = {}, params) {
|
||||
super(remote, opts)
|
||||
|
||||
this._execa = useSudo ? sudoExeca : execa
|
||||
this._keeper = undefined
|
||||
this._params = {
|
||||
this.#execa = useSudo ? sudoExeca : execa
|
||||
this.#params = {
|
||||
...params,
|
||||
options: [params.options, remote.options ?? params.defaultOptions].filter(_ => _ !== undefined).join(','),
|
||||
}
|
||||
this._realPath = join(mountsDir, remote.id || Math.random().toString(36).slice(2))
|
||||
this.#realPath = join(mountsDir, remote.id || Math.random().toString(36).slice(2))
|
||||
}
|
||||
|
||||
async _forget() {
|
||||
const keeper = this._keeper
|
||||
const keeper = this.#keeper
|
||||
if (keeper === undefined) {
|
||||
return
|
||||
}
|
||||
this._keeper = undefined
|
||||
this.#keeper = undefined
|
||||
await fs.close(keeper)
|
||||
|
||||
await ignoreErrors.call(
|
||||
this._execa('umount', [this._getRealPath()], {
|
||||
this.#execa('umount', [this.getRealPath()], {
|
||||
env: {
|
||||
LANG: 'C',
|
||||
},
|
||||
@@ -38,30 +42,30 @@ export default class MountHandler extends LocalHandler {
|
||||
)
|
||||
}
|
||||
|
||||
_getRealPath() {
|
||||
return this._realPath
|
||||
getRealPath() {
|
||||
return this.#realPath
|
||||
}
|
||||
|
||||
async _sync() {
|
||||
// in case of multiple `sync`s, ensure we properly close previous keeper
|
||||
{
|
||||
const keeper = this._keeper
|
||||
const keeper = this.#keeper
|
||||
if (keeper !== undefined) {
|
||||
this._keeper = undefined
|
||||
this.#keeper = undefined
|
||||
ignoreErrors.call(fs.close(keeper))
|
||||
}
|
||||
}
|
||||
|
||||
const realPath = this._getRealPath()
|
||||
const realPath = this.getRealPath()
|
||||
|
||||
await fs.ensureDir(realPath)
|
||||
|
||||
try {
|
||||
const { type, device, options, env } = this._params
|
||||
const { type, device, options, env } = this.#params
|
||||
|
||||
// Linux mount is more flexible in which order the mount arguments appear.
|
||||
// But FreeBSD requires this order of the arguments.
|
||||
await this._execa('mount', ['-o', options, '-t', type, device, realPath], {
|
||||
await this.#execa('mount', ['-o', options, '-t', type, device, realPath], {
|
||||
env: {
|
||||
LANG: 'C',
|
||||
...env,
|
||||
@@ -71,7 +75,7 @@ export default class MountHandler extends LocalHandler {
|
||||
try {
|
||||
// the failure may mean it's already mounted, use `findmnt` to check
|
||||
// that's the case
|
||||
await this._execa('findmnt', [realPath], {
|
||||
await this.#execa('findmnt', [realPath], {
|
||||
stdio: 'ignore',
|
||||
})
|
||||
} catch (_) {
|
||||
@@ -82,7 +86,7 @@ export default class MountHandler extends LocalHandler {
|
||||
// keep an open file on the mount to prevent it from being unmounted if used
|
||||
// by another handler/process
|
||||
const keeperPath = `${realPath}/.keeper_${Math.random().toString(36).slice(2)}`
|
||||
this._keeper = await fs.open(keeperPath, 'w')
|
||||
this.#keeper = await fs.open(keeperPath, 'w')
|
||||
ignoreErrors.call(fs.unlink(keeperPath))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,8 +37,13 @@ const ignoreEnoent = error => {
|
||||
const noop = Function.prototype
|
||||
|
||||
class PrefixWrapper {
|
||||
#prefix
|
||||
|
||||
constructor(handler, prefix) {
|
||||
this._prefix = prefix
|
||||
this.#prefix = prefix
|
||||
|
||||
// cannot be a private field because used by methods dynamically added
|
||||
// outside of the class
|
||||
this._handler = handler
|
||||
}
|
||||
|
||||
@@ -50,7 +55,7 @@ class PrefixWrapper {
|
||||
async list(dir, opts) {
|
||||
const entries = await this._handler.list(this._resolve(dir), opts)
|
||||
if (opts != null && opts.prependDir) {
|
||||
const n = this._prefix.length
|
||||
const n = this.#prefix.length
|
||||
entries.forEach((entry, i, entries) => {
|
||||
entries[i] = entry.slice(n)
|
||||
})
|
||||
@@ -62,19 +67,21 @@ class PrefixWrapper {
|
||||
return this._handler.rename(this._resolve(oldPath), this._resolve(newPath))
|
||||
}
|
||||
|
||||
// cannot be a private method because used by methods dynamically added
|
||||
// outside of the class
|
||||
_resolve(path) {
|
||||
return this._prefix + normalizePath(path)
|
||||
return this.#prefix + normalizePath(path)
|
||||
}
|
||||
}
|
||||
|
||||
export default class RemoteHandlerAbstract {
|
||||
#encryptor
|
||||
#rawEncryptor
|
||||
|
||||
get _encryptor() {
|
||||
if (this.#encryptor === undefined) {
|
||||
get #encryptor() {
|
||||
if (this.#rawEncryptor === undefined) {
|
||||
throw new Error(`Can't access to encryptor before remote synchronization`)
|
||||
}
|
||||
return this.#encryptor
|
||||
return this.#rawEncryptor
|
||||
}
|
||||
|
||||
constructor(remote, options = {}) {
|
||||
@@ -111,6 +118,10 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
// Public members
|
||||
//
|
||||
// Should not be called directly because:
|
||||
// - some concurrency limits may be applied which may lead to deadlocks
|
||||
// - some preprocessing may be applied on parameters that should not be done multiple times (e.g. prefixing paths)
|
||||
|
||||
get type() {
|
||||
throw new Error('Not implemented')
|
||||
@@ -121,10 +132,6 @@ export default class RemoteHandlerAbstract {
|
||||
return prefix === '/' ? this : new PrefixWrapper(this, prefix)
|
||||
}
|
||||
|
||||
async closeFile(fd) {
|
||||
await this.__closeFile(fd)
|
||||
}
|
||||
|
||||
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
|
||||
if (options.end !== undefined || options.start !== undefined) {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
|
||||
@@ -157,7 +164,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
if (this.isEncrypted) {
|
||||
stream = this._encryptor.decryptStream(stream)
|
||||
stream = this.#encryptor.decryptStream(stream)
|
||||
} else {
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
|
||||
@@ -186,7 +193,7 @@ export default class RemoteHandlerAbstract {
|
||||
path = normalizePath(path)
|
||||
let checksumStream
|
||||
|
||||
input = this._encryptor.encryptStream(input)
|
||||
input = this.#encryptor.encryptStream(input)
|
||||
if (checksum) {
|
||||
checksumStream = createChecksumStream()
|
||||
pipeline(input, checksumStream, noop)
|
||||
@@ -224,10 +231,10 @@ export default class RemoteHandlerAbstract {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
|
||||
|
||||
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
|
||||
return size - this._encryptor.ivLength
|
||||
return size - this.#encryptor.ivLength
|
||||
}
|
||||
|
||||
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
|
||||
async __list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
|
||||
try {
|
||||
const virtualDir = normalizePath(dir)
|
||||
dir = normalizePath(dir)
|
||||
@@ -257,20 +264,12 @@ export default class RemoteHandlerAbstract {
|
||||
return { dispose: await this._lock(path) }
|
||||
}
|
||||
|
||||
async mkdir(dir, { mode } = {}) {
|
||||
await this.__mkdir(normalizePath(dir), { mode })
|
||||
}
|
||||
|
||||
async mktree(dir, { mode } = {}) {
|
||||
await this._mktree(normalizePath(dir), { mode })
|
||||
}
|
||||
|
||||
openFile(path, flags) {
|
||||
return this.__openFile(path, flags)
|
||||
}
|
||||
|
||||
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
|
||||
const encryptedData = this._encryptor.encryptData(data)
|
||||
const encryptedData = this.#encryptor.encryptData(data)
|
||||
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
|
||||
}
|
||||
|
||||
@@ -279,9 +278,9 @@ export default class RemoteHandlerAbstract {
|
||||
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async readFile(file, { flags = 'r' } = {}) {
|
||||
async __readFile(file, { flags = 'r' } = {}) {
|
||||
const data = await this._readFile(normalizePath(file), { flags })
|
||||
return this._encryptor.decryptData(data)
|
||||
return this.#encryptor.decryptData(data)
|
||||
}
|
||||
|
||||
async #rename(oldPath, newPath, { checksum }, createTree = true) {
|
||||
@@ -301,11 +300,11 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
rename(oldPath, newPath, { checksum = false } = {}) {
|
||||
__rename(oldPath, newPath, { checksum = false } = {}) {
|
||||
return this.#rename(normalizePath(oldPath), normalizePath(newPath), { checksum })
|
||||
}
|
||||
|
||||
async copy(oldPath, newPath, { checksum = false } = {}) {
|
||||
async __copy(oldPath, newPath, { checksum = false } = {}) {
|
||||
oldPath = normalizePath(oldPath)
|
||||
newPath = normalizePath(newPath)
|
||||
|
||||
@@ -332,33 +331,33 @@ export default class RemoteHandlerAbstract {
|
||||
async sync() {
|
||||
await this._sync()
|
||||
try {
|
||||
await this._checkMetadata()
|
||||
await this.#checkMetadata()
|
||||
} catch (error) {
|
||||
await this._forget()
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _canWriteMetadata() {
|
||||
const list = await this.list('/', {
|
||||
async #canWriteMetadata() {
|
||||
const list = await this.__list('/', {
|
||||
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
|
||||
})
|
||||
return list.length === 0
|
||||
}
|
||||
|
||||
async _createMetadata() {
|
||||
async #createMetadata() {
|
||||
const encryptionAlgorithm = this._remote.encryptionKey === undefined ? 'none' : DEFAULT_ENCRYPTION_ALGORITHM
|
||||
this.#encryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
|
||||
this.#rawEncryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
|
||||
|
||||
await Promise.all([
|
||||
this._writeFile(normalizePath(ENCRYPTION_DESC_FILENAME), JSON.stringify({ algorithm: encryptionAlgorithm }), {
|
||||
flags: 'w',
|
||||
}), // not encrypted
|
||||
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
|
||||
this.__writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
|
||||
])
|
||||
}
|
||||
|
||||
async _checkMetadata() {
|
||||
async #checkMetadata() {
|
||||
let encryptionAlgorithm = 'none'
|
||||
let data
|
||||
try {
|
||||
@@ -374,18 +373,18 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
try {
|
||||
this.#encryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
|
||||
this.#rawEncryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
|
||||
// this file is encrypted
|
||||
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME, 'utf-8')
|
||||
const data = await this.__readFile(ENCRYPTION_METADATA_FILENAME, 'utf-8')
|
||||
JSON.parse(data)
|
||||
} catch (error) {
|
||||
// can be enoent, bad algorithm, or broeken json ( bad key or algorithm)
|
||||
if (encryptionAlgorithm !== 'none') {
|
||||
if (await this._canWriteMetadata()) {
|
||||
if (await this.#canWriteMetadata()) {
|
||||
// any other error , but on empty remote => update with remote settings
|
||||
|
||||
info('will update metadata of this remote')
|
||||
return this._createMetadata()
|
||||
return this.#createMetadata()
|
||||
} else {
|
||||
warn(
|
||||
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
|
||||
@@ -438,7 +437,7 @@ export default class RemoteHandlerAbstract {
|
||||
await this._truncate(file, len)
|
||||
}
|
||||
|
||||
async unlink(file, { checksum = true } = {}) {
|
||||
async __unlink(file, { checksum = true } = {}) {
|
||||
file = normalizePath(file)
|
||||
|
||||
if (checksum) {
|
||||
@@ -453,8 +452,8 @@ export default class RemoteHandlerAbstract {
|
||||
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async writeFile(file, data, { flags = 'wx' } = {}) {
|
||||
const encryptedData = this._encryptor.encryptData(data)
|
||||
async __writeFile(file, data, { flags = 'wx' } = {}) {
|
||||
const encryptedData = this.#encryptor.encryptData(data)
|
||||
await this._writeFile(normalizePath(file), encryptedData, { flags })
|
||||
}
|
||||
|
||||
@@ -465,6 +464,8 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async __mkdir(dir, { mode } = {}) {
|
||||
dir = normalizePath(dir)
|
||||
|
||||
try {
|
||||
await this._mkdir(dir, { mode })
|
||||
} catch (error) {
|
||||
@@ -586,9 +587,9 @@ export default class RemoteHandlerAbstract {
|
||||
if (validator !== undefined) {
|
||||
await validator.call(this, tmpPath)
|
||||
}
|
||||
await this.rename(tmpPath, path)
|
||||
await this.__rename(tmpPath, path)
|
||||
} catch (error) {
|
||||
await this.unlink(tmpPath)
|
||||
await this.__unlink(tmpPath)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
@@ -665,7 +666,22 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
get isEncrypted() {
|
||||
return this._encryptor.id !== 'NULL_ENCRYPTOR'
|
||||
return this.#encryptor.id !== 'NULL_ENCRYPTOR'
|
||||
}
|
||||
}
|
||||
|
||||
// from implementation methods, which names start with `__`, create public
|
||||
// accessors on which external behaviors can be added (e.g. concurrency limits, path rewriting)
|
||||
{
|
||||
const proto = RemoteHandlerAbstract.prototype
|
||||
for (const method of Object.getOwnPropertyNames(proto)) {
|
||||
if (method.startsWith('__')) {
|
||||
const publicName = method.slice(2)
|
||||
|
||||
assert(!Object.hasOwn(proto, publicName))
|
||||
|
||||
Object.defineProperty(proto, publicName, Object.getOwnPropertyDescriptor(proto, method))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
/* eslint-env jest */
|
||||
import { after, beforeEach, describe, it } from 'test'
|
||||
import { strict as assert } from 'assert'
|
||||
import sinon from 'sinon'
|
||||
|
||||
import { DEFAULT_ENCRYPTION_ALGORITHM, _getEncryptor } from './_encryptor'
|
||||
import { Disposable, pFromCallback, TimeoutError } from 'promise-toolbox'
|
||||
import { getSyncedHandler } from '.'
|
||||
import { rimraf } from 'rimraf'
|
||||
import AbstractHandler from './abstract'
|
||||
import fs from 'fs-extra'
|
||||
import rimraf from 'rimraf'
|
||||
import tmp from 'tmp'
|
||||
|
||||
const TIMEOUT = 10e3
|
||||
@@ -24,7 +26,7 @@ class TestHandler extends AbstractHandler {
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
jest.useFakeTimers()
|
||||
const clock = sinon.useFakeTimers()
|
||||
|
||||
describe('closeFile()', () => {
|
||||
it(`throws in case of timeout`, async () => {
|
||||
@@ -33,8 +35,8 @@ describe('closeFile()', () => {
|
||||
})
|
||||
|
||||
const promise = testHandler.closeFile({ fd: undefined, path: '' })
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
clock.tick(TIMEOUT)
|
||||
await assert.rejects(promise, TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -45,8 +47,8 @@ describe('getInfo()', () => {
|
||||
})
|
||||
|
||||
const promise = testHandler.getInfo()
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
clock.tick(TIMEOUT)
|
||||
await assert.rejects(promise, TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -57,8 +59,8 @@ describe('getSize()', () => {
|
||||
})
|
||||
|
||||
const promise = testHandler.getSize('')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
clock.tick(TIMEOUT)
|
||||
await assert.rejects(promise, TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -69,8 +71,8 @@ describe('list()', () => {
|
||||
})
|
||||
|
||||
const promise = testHandler.list('.')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
clock.tick(TIMEOUT)
|
||||
await assert.rejects(promise, TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -81,8 +83,8 @@ describe('openFile()', () => {
|
||||
})
|
||||
|
||||
const promise = testHandler.openFile('path')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
clock.tick(TIMEOUT)
|
||||
await assert.rejects(promise, TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -93,8 +95,8 @@ describe('rename()', () => {
|
||||
})
|
||||
|
||||
const promise = testHandler.rename('oldPath', 'newPath')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
clock.tick(TIMEOUT)
|
||||
await assert.rejects(promise, TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -105,8 +107,8 @@ describe('rmdir()', () => {
|
||||
})
|
||||
|
||||
const promise = testHandler.rmdir('dir')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
clock.tick(TIMEOUT)
|
||||
await assert.rejects(promise, TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -115,14 +117,14 @@ describe('encryption', () => {
|
||||
beforeEach(async () => {
|
||||
dir = await pFromCallback(cb => tmp.dir(cb))
|
||||
})
|
||||
afterAll(async () => {
|
||||
after(async () => {
|
||||
await rimraf(dir)
|
||||
})
|
||||
|
||||
it('sync should NOT create metadata if missing (not encrypted)', async () => {
|
||||
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
|
||||
|
||||
expect(await fs.readdir(dir)).toEqual([])
|
||||
assert.deepEqual(await fs.readdir(dir), [])
|
||||
})
|
||||
|
||||
it('sync should create metadata if missing (encrypted)', async () => {
|
||||
@@ -131,12 +133,12 @@ describe('encryption', () => {
|
||||
noop
|
||||
)
|
||||
|
||||
expect(await fs.readdir(dir)).toEqual(['encryption.json', 'metadata.json'])
|
||||
assert.deepEqual(await fs.readdir(dir), ['encryption.json', 'metadata.json'])
|
||||
|
||||
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
|
||||
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
|
||||
assert.equal(encryption.algorithm, DEFAULT_ENCRYPTION_ALGORITHM)
|
||||
// encrypted , should not be parsable
|
||||
expect(async () => JSON.parse(await fs.readFile(`${dir}/metadata.json`))).rejects.toThrowError()
|
||||
assert.rejects(async () => JSON.parse(await fs.readFile(`${dir}/metadata.json`)))
|
||||
})
|
||||
|
||||
it('sync should not modify existing metadata', async () => {
|
||||
@@ -146,9 +148,9 @@ describe('encryption', () => {
|
||||
await Disposable.use(await getSyncedHandler({ url: `file://${dir}` }), noop)
|
||||
|
||||
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
|
||||
expect(encryption.algorithm).toEqual('none')
|
||||
assert.equal(encryption.algorithm, 'none')
|
||||
const metadata = JSON.parse(await fs.readFile(`${dir}/metadata.json`, 'utf-8'))
|
||||
expect(metadata.random).toEqual('NOTSORANDOM')
|
||||
assert.equal(metadata.random, 'NOTSORANDOM')
|
||||
})
|
||||
|
||||
it('should modify metadata if empty', async () => {
|
||||
@@ -160,11 +162,11 @@ describe('encryption', () => {
|
||||
noop
|
||||
)
|
||||
let encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
|
||||
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
|
||||
assert.equal(encryption.algorithm, DEFAULT_ENCRYPTION_ALGORITHM)
|
||||
|
||||
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
|
||||
encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
|
||||
expect(encryption.algorithm).toEqual('none')
|
||||
assert.equal(encryption.algorithm, 'none')
|
||||
})
|
||||
|
||||
it(
|
||||
@@ -178,9 +180,9 @@ describe('encryption', () => {
|
||||
const handler = yield getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` })
|
||||
|
||||
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
|
||||
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
|
||||
assert.equal(encryption.algorithm, DEFAULT_ENCRYPTION_ALGORITHM)
|
||||
const metadata = JSON.parse(await handler.readFile(`./metadata.json`))
|
||||
expect(metadata.random).toEqual('NOTSORANDOM')
|
||||
assert.equal(metadata.random, 'NOTSORANDOM')
|
||||
})
|
||||
)
|
||||
|
||||
@@ -198,9 +200,9 @@ describe('encryption', () => {
|
||||
|
||||
// remote is now non empty : can't modify key anymore
|
||||
await fs.writeFile(`${dir}/nonempty.json`, 'content')
|
||||
await expect(
|
||||
await assert.rejects(
|
||||
Disposable.use(getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd10"` }), noop)
|
||||
).rejects.toThrowError()
|
||||
)
|
||||
})
|
||||
|
||||
it('sync should fail when changing algorithm', async () => {
|
||||
@@ -213,8 +215,8 @@ describe('encryption', () => {
|
||||
// remote is now non empty : can't modify key anymore
|
||||
await fs.writeFile(`${dir}/nonempty.json`, 'content')
|
||||
|
||||
await expect(
|
||||
await assert.rejects(
|
||||
Disposable.use(getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` }), noop)
|
||||
).rejects.toThrowError()
|
||||
)
|
||||
})
|
||||
})
|
||||
@@ -1,7 +1,7 @@
|
||||
import through2 from 'through2'
|
||||
import { createHash } from 'crypto'
|
||||
import { defer, fromEvent } from 'promise-toolbox'
|
||||
import { invert } from 'lodash'
|
||||
import invert from 'lodash/invert.js'
|
||||
|
||||
// Format: $<algorithm>$<salt>$<encrypted>
|
||||
//
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
/* eslint-env jest */
|
||||
import { after, afterEach, before, beforeEach, describe, it } from 'test'
|
||||
import { strict as assert } from 'assert'
|
||||
|
||||
import 'dotenv/config'
|
||||
import { forOwn, random } from 'lodash'
|
||||
@@ -53,11 +54,11 @@ handlers.forEach(url => {
|
||||
})
|
||||
}
|
||||
|
||||
beforeAll(async () => {
|
||||
before(async () => {
|
||||
handler = getHandler({ url }).addPrefix(`xo-fs-tests-${Date.now()}`)
|
||||
await handler.sync()
|
||||
})
|
||||
afterAll(async () => {
|
||||
after(async () => {
|
||||
await handler.forget()
|
||||
handler = undefined
|
||||
})
|
||||
@@ -72,67 +73,63 @@ handlers.forEach(url => {
|
||||
|
||||
describe('#type', () => {
|
||||
it('returns the type of the remote', () => {
|
||||
expect(typeof handler.type).toBe('string')
|
||||
assert.equal(typeof handler.type, 'string')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#getInfo()', () => {
|
||||
let info
|
||||
beforeAll(async () => {
|
||||
before(async () => {
|
||||
info = await handler.getInfo()
|
||||
})
|
||||
|
||||
it('should return an object with info', async () => {
|
||||
expect(typeof info).toBe('object')
|
||||
assert.equal(typeof info, 'object')
|
||||
})
|
||||
|
||||
it('should return correct type of attribute', async () => {
|
||||
if (info.size !== undefined) {
|
||||
expect(typeof info.size).toBe('number')
|
||||
assert.equal(typeof info.size, 'number')
|
||||
}
|
||||
if (info.used !== undefined) {
|
||||
expect(typeof info.used).toBe('number')
|
||||
assert.equal(typeof info.used, 'number')
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
describe('#getSize()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
before(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
testWithFileDescriptor('file', 'r', async () => {
|
||||
expect(await handler.getSize('file')).toEqual(TEST_DATA_LEN)
|
||||
assert.equal(await handler.getSize('file'), TEST_DATA_LEN)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#list()', () => {
|
||||
it(`should list the content of folder`, async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await expect(await handler.list('.')).toEqual(['file'])
|
||||
assert.deepEqual(await handler.list('.'), ['file'])
|
||||
})
|
||||
|
||||
it('can prepend the directory to entries', async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual(['/dir/file'])
|
||||
})
|
||||
|
||||
it('can prepend the directory to entries', async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
expect(await handler.list('dir', { prependDir: true })).toEqual(['/dir/file'])
|
||||
assert.deepEqual(await handler.list('dir', { prependDir: true }), ['/dir/file'])
|
||||
})
|
||||
|
||||
it('throws ENOENT if no such directory', async () => {
|
||||
expect((await rejectionOf(handler.list('dir'))).code).toBe('ENOENT')
|
||||
await handler.rmtree('dir')
|
||||
assert.equal((await rejectionOf(handler.list('dir'))).code, 'ENOENT')
|
||||
})
|
||||
|
||||
it('can returns empty for missing directory', async () => {
|
||||
expect(await handler.list('dir', { ignoreMissing: true })).toEqual([])
|
||||
assert.deepEqual(await handler.list('dir', { ignoreMissing: true }), [])
|
||||
})
|
||||
})
|
||||
|
||||
describe('#mkdir()', () => {
|
||||
it('creates a directory', async () => {
|
||||
await handler.mkdir('dir')
|
||||
await expect(await handler.list('.')).toEqual(['dir'])
|
||||
assert.deepEqual(await handler.list('.'), ['dir'])
|
||||
})
|
||||
|
||||
it('does not throw on existing directory', async () => {
|
||||
@@ -143,15 +140,15 @@ handlers.forEach(url => {
|
||||
it('throws ENOTDIR on existing file', async () => {
|
||||
await handler.outputFile('file', '')
|
||||
const error = await rejectionOf(handler.mkdir('file'))
|
||||
expect(error.code).toBe('ENOTDIR')
|
||||
assert.equal(error.code, 'ENOTDIR')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#mktree()', () => {
|
||||
it('creates a tree of directories', async () => {
|
||||
await handler.mktree('dir/dir')
|
||||
await expect(await handler.list('.')).toEqual(['dir'])
|
||||
await expect(await handler.list('dir')).toEqual(['dir'])
|
||||
assert.deepEqual(await handler.list('.'), ['dir'])
|
||||
assert.deepEqual(await handler.list('dir'), ['dir'])
|
||||
})
|
||||
|
||||
it('does not throw on existing directory', async () => {
|
||||
@@ -162,26 +159,27 @@ handlers.forEach(url => {
|
||||
it('throws ENOTDIR on existing file', async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
const error = await rejectionOf(handler.mktree('dir/file'))
|
||||
expect(error.code).toBe('ENOTDIR')
|
||||
assert.equal(error.code, 'ENOTDIR')
|
||||
})
|
||||
|
||||
it('throws ENOTDIR on existing file in path', async () => {
|
||||
await handler.outputFile('file', '')
|
||||
const error = await rejectionOf(handler.mktree('file/dir'))
|
||||
expect(error.code).toBe('ENOTDIR')
|
||||
assert.equal(error.code, 'ENOTDIR')
|
||||
})
|
||||
})
|
||||
|
||||
describe('#outputFile()', () => {
|
||||
it('writes data to a file', async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
expect(await handler.readFile('file')).toEqual(TEST_DATA)
|
||||
assert.deepEqual(await handler.readFile('file'), TEST_DATA)
|
||||
})
|
||||
|
||||
it('throws on existing files', async () => {
|
||||
await handler.unlink('file')
|
||||
await handler.outputFile('file', '')
|
||||
const error = await rejectionOf(handler.outputFile('file', ''))
|
||||
expect(error.code).toBe('EEXIST')
|
||||
assert.equal(error.code, 'EEXIST')
|
||||
})
|
||||
|
||||
it("shouldn't timeout in case of the respect of the parallel execution restriction", async () => {
|
||||
@@ -192,7 +190,7 @@ handlers.forEach(url => {
|
||||
})
|
||||
|
||||
describe('#read()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
before(() => handler.outputFile('file', TEST_DATA))
|
||||
|
||||
const start = random(TEST_DATA_LEN)
|
||||
const size = random(TEST_DATA_LEN)
|
||||
@@ -200,8 +198,8 @@ handlers.forEach(url => {
|
||||
testWithFileDescriptor('file', 'r', async ({ file }) => {
|
||||
const buffer = Buffer.alloc(size)
|
||||
const result = await handler.read(file, buffer, start)
|
||||
expect(result.buffer).toBe(buffer)
|
||||
expect(result).toEqual({
|
||||
assert.deepEqual(result.buffer, buffer)
|
||||
assert.deepEqual(result, {
|
||||
buffer,
|
||||
bytesRead: Math.min(size, TEST_DATA_LEN - start),
|
||||
})
|
||||
@@ -211,12 +209,13 @@ handlers.forEach(url => {
|
||||
describe('#readFile', () => {
|
||||
it('returns a buffer containing the contents of the file', async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
expect(await handler.readFile('file')).toEqual(TEST_DATA)
|
||||
assert.deepEqual(await handler.readFile('file'), TEST_DATA)
|
||||
})
|
||||
|
||||
it('throws on missing file', async () => {
|
||||
await handler.unlink('file')
|
||||
const error = await rejectionOf(handler.readFile('file'))
|
||||
expect(error.code).toBe('ENOENT')
|
||||
assert.equal(error.code, 'ENOENT')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -225,19 +224,19 @@ handlers.forEach(url => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await handler.rename('file', `file2`)
|
||||
|
||||
expect(await handler.list('.')).toEqual(['file2'])
|
||||
expect(await handler.readFile(`file2`)).toEqual(TEST_DATA)
|
||||
assert.deepEqual(await handler.list('.'), ['file2'])
|
||||
assert.deepEqual(await handler.readFile(`file2`), TEST_DATA)
|
||||
})
|
||||
it(`should rename the file and create dest directory`, async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await handler.rename('file', `sub/file2`)
|
||||
|
||||
expect(await handler.list('sub')).toEqual(['file2'])
|
||||
expect(await handler.readFile(`sub/file2`)).toEqual(TEST_DATA)
|
||||
assert.deepEqual(await handler.list('sub'), ['file2'])
|
||||
assert.deepEqual(await handler.readFile(`sub/file2`), TEST_DATA)
|
||||
})
|
||||
it(`should fail with enoent if source file is missing`, async () => {
|
||||
const error = await rejectionOf(handler.rename('file', `sub/file2`))
|
||||
expect(error.code).toBe('ENOENT')
|
||||
assert.equal(error.code, 'ENOENT')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -245,14 +244,15 @@ handlers.forEach(url => {
|
||||
it('should remove an empty directory', async () => {
|
||||
await handler.mkdir('dir')
|
||||
await handler.rmdir('dir')
|
||||
expect(await handler.list('.')).toEqual([])
|
||||
assert.deepEqual(await handler.list('.'), [])
|
||||
})
|
||||
|
||||
it(`should throw on non-empty directory`, async () => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
|
||||
const error = await rejectionOf(handler.rmdir('.'))
|
||||
await expect(error.code).toEqual('ENOTEMPTY')
|
||||
assert.equal(error.code, 'ENOTEMPTY')
|
||||
await handler.unlink('dir/file')
|
||||
})
|
||||
|
||||
it('does not throw on missing directory', async () => {
|
||||
@@ -265,7 +265,7 @@ handlers.forEach(url => {
|
||||
await handler.outputFile('dir/file', '')
|
||||
await handler.rmtree('dir')
|
||||
|
||||
expect(await handler.list('.')).toEqual([])
|
||||
assert.deepEqual(await handler.list('.'), [])
|
||||
})
|
||||
})
|
||||
|
||||
@@ -273,9 +273,9 @@ handlers.forEach(url => {
|
||||
it('tests the remote appears to be working', async () => {
|
||||
const answer = await handler.test()
|
||||
|
||||
expect(answer.success).toBe(true)
|
||||
expect(typeof answer.writeRate).toBe('number')
|
||||
expect(typeof answer.readRate).toBe('number')
|
||||
assert.equal(answer.success, true)
|
||||
assert.equal(typeof answer.writeRate, 'number')
|
||||
assert.equal(typeof answer.readRate, 'number')
|
||||
})
|
||||
})
|
||||
|
||||
@@ -284,7 +284,7 @@ handlers.forEach(url => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await handler.unlink('file')
|
||||
|
||||
await expect(await handler.list('.')).toEqual([])
|
||||
assert.deepEqual(await handler.list('.'), [])
|
||||
})
|
||||
|
||||
it('does not throw on missing file', async () => {
|
||||
@@ -294,6 +294,7 @@ handlers.forEach(url => {
|
||||
|
||||
describe('#write()', () => {
|
||||
beforeEach(() => handler.outputFile('file', TEST_DATA))
|
||||
afterEach(() => handler.unlink('file'))
|
||||
|
||||
const PATCH_DATA_LEN = Math.ceil(TEST_DATA_LEN / 2)
|
||||
const PATCH_DATA = unsecureRandomBytes(PATCH_DATA_LEN)
|
||||
@@ -322,7 +323,7 @@ handlers.forEach(url => {
|
||||
describe(title, () => {
|
||||
testWithFileDescriptor('file', 'r+', async ({ file }) => {
|
||||
await handler.write(file, PATCH_DATA, offset)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
assert.deepEqual(await handler.readFile('file'), expected)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -330,6 +331,7 @@ handlers.forEach(url => {
|
||||
})
|
||||
|
||||
describe('#truncate()', () => {
|
||||
afterEach(() => handler.unlink('file'))
|
||||
forOwn(
|
||||
{
|
||||
'shrinks file': (() => {
|
||||
@@ -348,7 +350,7 @@ handlers.forEach(url => {
|
||||
it(title, async () => {
|
||||
await handler.outputFile('file', TEST_DATA)
|
||||
await handler.truncate('file', length)
|
||||
await expect(await handler.readFile('file')).toEqual(expected)
|
||||
assert.deepEqual(await handler.readFile('file'), expected)
|
||||
})
|
||||
}
|
||||
)
|
||||
@@ -19,7 +19,12 @@ async function addSyncStackTrace(fn, ...args) {
|
||||
try {
|
||||
return await fn.apply(this, args)
|
||||
} catch (error) {
|
||||
error.syncStack = stackContainer.stack
|
||||
let { stack } = stackContainer
|
||||
|
||||
// remove first line which does not contain stack information, simply `Error`
|
||||
stack = stack.slice(stack.indexOf('\n') + 1)
|
||||
|
||||
error.stack = [error.stack, 'From:', stack].join('\n')
|
||||
throw error
|
||||
}
|
||||
}
|
||||
@@ -29,11 +34,14 @@ function dontAddSyncStackTrace(fn, ...args) {
|
||||
}
|
||||
|
||||
export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
#addSyncStackTrace
|
||||
#retriesOnEagain
|
||||
|
||||
constructor(remote, opts = {}) {
|
||||
super(remote)
|
||||
|
||||
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : dontAddSyncStackTrace
|
||||
this._retriesOnEagain = {
|
||||
this.#addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : dontAddSyncStackTrace
|
||||
this.#retriesOnEagain = {
|
||||
delay: 1e3,
|
||||
retries: 9,
|
||||
...opts.retriesOnEagain,
|
||||
@@ -46,26 +54,26 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
return 'file'
|
||||
}
|
||||
|
||||
_getRealPath() {
|
||||
getRealPath() {
|
||||
return this._remote.path
|
||||
}
|
||||
|
||||
_getFilePath(file) {
|
||||
return this._getRealPath() + file
|
||||
getFilePath(file) {
|
||||
return this.getRealPath() + file
|
||||
}
|
||||
|
||||
async _closeFile(fd) {
|
||||
return this._addSyncStackTrace(fs.close, fd)
|
||||
return this.#addSyncStackTrace(fs.close, fd)
|
||||
}
|
||||
|
||||
async _copy(oldPath, newPath) {
|
||||
return this._addSyncStackTrace(fs.copy, this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
return this.#addSyncStackTrace(fs.copy, this.getFilePath(oldPath), this.getFilePath(newPath))
|
||||
}
|
||||
|
||||
async _createReadStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
const stream = fs.createReadStream(this._getFilePath(file), options)
|
||||
await this._addSyncStackTrace(fromEvent, stream, 'open')
|
||||
const stream = fs.createReadStream(this.getFilePath(file), options)
|
||||
await this.#addSyncStackTrace(fromEvent, stream, 'open')
|
||||
return stream
|
||||
}
|
||||
return fs.createReadStream('', {
|
||||
@@ -77,8 +85,8 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
|
||||
async _createWriteStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
const stream = fs.createWriteStream(this._getFilePath(file), options)
|
||||
await this._addSyncStackTrace(fromEvent, stream, 'open')
|
||||
const stream = fs.createWriteStream(this.getFilePath(file), options)
|
||||
await this.#addSyncStackTrace(fromEvent, stream, 'open')
|
||||
return stream
|
||||
}
|
||||
return fs.createWriteStream('', {
|
||||
@@ -93,7 +101,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
// filesystem, type, size, used, available, capacity and mountpoint.
|
||||
// size, used, available and capacity may be `NaN` so we remove any `NaN`
|
||||
// value from the object.
|
||||
const info = await df.file(this._getFilePath('/'))
|
||||
const info = await df.file(this.getFilePath('/'))
|
||||
Object.keys(info).forEach(key => {
|
||||
if (Number.isNaN(info[key])) {
|
||||
delete info[key]
|
||||
@@ -104,16 +112,16 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
const stats = await this._addSyncStackTrace(fs.stat, this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
const stats = await this.#addSyncStackTrace(fs.stat, this.getFilePath(typeof file === 'string' ? file : file.path))
|
||||
return stats.size
|
||||
}
|
||||
|
||||
async _list(dir) {
|
||||
return this._addSyncStackTrace(fs.readdir, this._getFilePath(dir))
|
||||
return this.#addSyncStackTrace(fs.readdir, this.getFilePath(dir))
|
||||
}
|
||||
|
||||
async _lock(path) {
|
||||
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
|
||||
const acquire = lockfile.lock.bind(undefined, this.getFilePath(path), {
|
||||
async onCompromised(error) {
|
||||
warn('lock compromised', { error })
|
||||
try {
|
||||
@@ -125,11 +133,11 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
},
|
||||
})
|
||||
|
||||
let release = await this._addSyncStackTrace(acquire)
|
||||
let release = await this.#addSyncStackTrace(acquire)
|
||||
|
||||
return async () => {
|
||||
try {
|
||||
await this._addSyncStackTrace(release)
|
||||
await this.#addSyncStackTrace(release)
|
||||
} catch (error) {
|
||||
warn('lock could not be released', { error })
|
||||
}
|
||||
@@ -137,18 +145,18 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
_mkdir(dir, { mode }) {
|
||||
return this._addSyncStackTrace(fs.mkdir, this._getFilePath(dir), { mode })
|
||||
return this.#addSyncStackTrace(fs.mkdir, this.getFilePath(dir), { mode })
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
return this._addSyncStackTrace(fs.open, this._getFilePath(path), flags)
|
||||
return this.#addSyncStackTrace(fs.open, this.getFilePath(path), flags)
|
||||
}
|
||||
|
||||
async _read(file, buffer, position) {
|
||||
const needsClose = typeof file === 'string'
|
||||
file = needsClose ? await this._addSyncStackTrace(fs.open, this._getFilePath(file), 'r') : file.fd
|
||||
file = needsClose ? await this.#addSyncStackTrace(fs.open, this.getFilePath(file), 'r') : file.fd
|
||||
try {
|
||||
return await this._addSyncStackTrace(
|
||||
return await this.#addSyncStackTrace(
|
||||
fs.read,
|
||||
file,
|
||||
buffer,
|
||||
@@ -158,44 +166,44 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
)
|
||||
} finally {
|
||||
if (needsClose) {
|
||||
await this._addSyncStackTrace(fs.close, file)
|
||||
await this.#addSyncStackTrace(fs.close, file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _readFile(file, options) {
|
||||
const filePath = this._getFilePath(file)
|
||||
return await this._addSyncStackTrace(retry, () => fs.readFile(filePath, options), this._retriesOnEagain)
|
||||
const filePath = this.getFilePath(file)
|
||||
return await this.#addSyncStackTrace(retry, () => fs.readFile(filePath, options), this.#retriesOnEagain)
|
||||
}
|
||||
|
||||
async _rename(oldPath, newPath) {
|
||||
return this._addSyncStackTrace(fs.rename, this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
return this.#addSyncStackTrace(fs.rename, this.getFilePath(oldPath), this.getFilePath(newPath))
|
||||
}
|
||||
|
||||
async _rmdir(dir) {
|
||||
return this._addSyncStackTrace(fs.rmdir, this._getFilePath(dir))
|
||||
return this.#addSyncStackTrace(fs.rmdir, this.getFilePath(dir))
|
||||
}
|
||||
|
||||
async _sync() {
|
||||
const path = this._getRealPath('/')
|
||||
await this._addSyncStackTrace(fs.ensureDir, path)
|
||||
await this._addSyncStackTrace(fs.access, path, fs.R_OK | fs.W_OK)
|
||||
const path = this.getRealPath('/')
|
||||
await this.#addSyncStackTrace(fs.ensureDir, path)
|
||||
await this.#addSyncStackTrace(fs.access, path, fs.R_OK | fs.W_OK)
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._addSyncStackTrace(fs.truncate, this._getFilePath(file), len)
|
||||
return this.#addSyncStackTrace(fs.truncate, this.getFilePath(file), len)
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
const filePath = this._getFilePath(file)
|
||||
return await this._addSyncStackTrace(retry, () => fs.unlink(filePath), this._retriesOnEagain)
|
||||
const filePath = this.getFilePath(file)
|
||||
return await this.#addSyncStackTrace(retry, () => fs.unlink(filePath), this.#retriesOnEagain)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return this._addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
|
||||
return this.#addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, { flags }) {
|
||||
return this._addSyncStackTrace(fs.writeFile, this._getFilePath(file), data, { flag: flags })
|
||||
return this.#addSyncStackTrace(fs.writeFile, this.getFilePath(file), data, { flag: flags })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,13 +9,11 @@ import {
|
||||
ListObjectsV2Command,
|
||||
PutObjectCommand,
|
||||
S3Client,
|
||||
UploadPartCommand,
|
||||
UploadPartCopyCommand,
|
||||
} from '@aws-sdk/client-s3'
|
||||
import { Upload } from '@aws-sdk/lib-storage'
|
||||
import { NodeHttpHandler } from '@aws-sdk/node-http-handler'
|
||||
import { getApplyMd5BodyChecksumPlugin } from '@aws-sdk/middleware-apply-body-checksum'
|
||||
import assert from 'assert'
|
||||
import { Agent as HttpAgent } from 'http'
|
||||
import { Agent as HttpsAgent } from 'https'
|
||||
import pRetry from 'promise-toolbox/retry'
|
||||
@@ -24,7 +22,6 @@ import { decorateWith } from '@vates/decorate-with'
|
||||
import { PassThrough, pipeline } from 'stream'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
import copyStreamToBuffer from './_copyStreamToBuffer.js'
|
||||
import createBufferFromStream from './_createBufferFromStream.js'
|
||||
import guessAwsRegion from './_guessAwsRegion.js'
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { basename, join, split } from './path'
|
||||
@@ -33,15 +30,14 @@ import { asyncEach } from '@vates/async-each'
|
||||
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
|
||||
// limits: https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
|
||||
const MIN_PART_SIZE = 1024 * 1024 * 5 // 5MB
|
||||
const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
|
||||
const MAX_PARTS_COUNT = 10000
|
||||
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
|
||||
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
|
||||
|
||||
const { warn } = createLogger('xo:fs:s3')
|
||||
|
||||
export default class S3Handler extends RemoteHandlerAbstract {
|
||||
#bucket
|
||||
#dir
|
||||
#s3
|
||||
|
||||
constructor(remote, _opts) {
|
||||
super(remote)
|
||||
const {
|
||||
@@ -54,7 +50,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
region = guessAwsRegion(host),
|
||||
} = parse(remote.url)
|
||||
|
||||
this._s3 = new S3Client({
|
||||
this.#s3 = new S3Client({
|
||||
apiVersion: '2006-03-01',
|
||||
endpoint: `${protocol}://${host}`,
|
||||
forcePathStyle: true,
|
||||
@@ -77,27 +73,27 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
})
|
||||
|
||||
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
|
||||
this._s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this._s3.config))
|
||||
this.#s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this.#s3.config))
|
||||
|
||||
const parts = split(path)
|
||||
this._bucket = parts.shift()
|
||||
this._dir = join(...parts)
|
||||
this.#bucket = parts.shift()
|
||||
this.#dir = join(...parts)
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 's3'
|
||||
}
|
||||
|
||||
_makeCopySource(path) {
|
||||
return join(this._bucket, this._dir, path)
|
||||
#makeCopySource(path) {
|
||||
return join(this.#bucket, this.#dir, path)
|
||||
}
|
||||
|
||||
_makeKey(file) {
|
||||
return join(this._dir, file)
|
||||
#makeKey(file) {
|
||||
return join(this.#dir, file)
|
||||
}
|
||||
|
||||
_makePrefix(dir) {
|
||||
const prefix = join(this._dir, dir, '/')
|
||||
#makePrefix(dir) {
|
||||
const prefix = join(this.#dir, dir, '/')
|
||||
|
||||
// no prefix for root
|
||||
if (prefix !== './') {
|
||||
@@ -105,20 +101,20 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
_createParams(file) {
|
||||
return { Bucket: this._bucket, Key: this._makeKey(file) }
|
||||
#createParams(file) {
|
||||
return { Bucket: this.#bucket, Key: this.#makeKey(file) }
|
||||
}
|
||||
|
||||
async _multipartCopy(oldPath, newPath) {
|
||||
async #multipartCopy(oldPath, newPath) {
|
||||
const size = await this._getSize(oldPath)
|
||||
const CopySource = this._makeCopySource(oldPath)
|
||||
const multipartParams = await this._s3.send(new CreateMultipartUploadCommand({ ...this._createParams(newPath) }))
|
||||
const CopySource = this.#makeCopySource(oldPath)
|
||||
const multipartParams = await this.#s3.send(new CreateMultipartUploadCommand({ ...this.#createParams(newPath) }))
|
||||
try {
|
||||
const parts = []
|
||||
let start = 0
|
||||
while (start < size) {
|
||||
const partNumber = parts.length + 1
|
||||
const upload = await this._s3.send(
|
||||
const upload = await this.#s3.send(
|
||||
new UploadPartCopyCommand({
|
||||
...multipartParams,
|
||||
CopySource,
|
||||
@@ -129,31 +125,31 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partNumber })
|
||||
start += MAX_PART_SIZE
|
||||
}
|
||||
await this._s3.send(
|
||||
await this.#s3.send(
|
||||
new CompleteMultipartUploadCommand({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
)
|
||||
} catch (e) {
|
||||
await this._s3.send(new AbortMultipartUploadCommand(multipartParams))
|
||||
await this.#s3.send(new AbortMultipartUploadCommand(multipartParams))
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
async _copy(oldPath, newPath) {
|
||||
const CopySource = this._makeCopySource(oldPath)
|
||||
const CopySource = this.#makeCopySource(oldPath)
|
||||
try {
|
||||
await this._s3.send(
|
||||
await this.#s3.send(
|
||||
new CopyObjectCommand({
|
||||
...this._createParams(newPath),
|
||||
...this.#createParams(newPath),
|
||||
CopySource,
|
||||
})
|
||||
)
|
||||
} catch (e) {
|
||||
// object > 5GB must be copied part by part
|
||||
if (e.name === 'EntityTooLarge') {
|
||||
return this._multipartCopy(oldPath, newPath)
|
||||
return this.#multipartCopy(oldPath, newPath)
|
||||
}
|
||||
// normalize this error code
|
||||
if (e.name === 'NoSuchKey') {
|
||||
@@ -167,20 +163,20 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async _isNotEmptyDir(path) {
|
||||
const result = await this._s3.send(
|
||||
async #isNotEmptyDir(path) {
|
||||
const result = await this.#s3.send(
|
||||
new ListObjectsV2Command({
|
||||
Bucket: this._bucket,
|
||||
Bucket: this.#bucket,
|
||||
MaxKeys: 1,
|
||||
Prefix: this._makePrefix(path),
|
||||
Prefix: this.#makePrefix(path),
|
||||
})
|
||||
)
|
||||
return result.Contents?.length > 0
|
||||
}
|
||||
|
||||
async _isFile(path) {
|
||||
async #isFile(path) {
|
||||
try {
|
||||
await this._s3.send(new HeadObjectCommand(this._createParams(path)))
|
||||
await this.#s3.send(new HeadObjectCommand(this.#createParams(path)))
|
||||
return true
|
||||
} catch (error) {
|
||||
if (error.name === 'NotFound') {
|
||||
@@ -197,11 +193,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
pipeline(input, Body, () => {})
|
||||
|
||||
const upload = new Upload({
|
||||
client: this._s3,
|
||||
queueSize: 1,
|
||||
partSize: IDEAL_FRAGMENT_SIZE,
|
||||
client: this.#s3,
|
||||
params: {
|
||||
...this._createParams(path),
|
||||
...this.#createParams(path),
|
||||
Body,
|
||||
},
|
||||
})
|
||||
@@ -212,7 +206,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
try {
|
||||
await validator.call(this, path)
|
||||
} catch (error) {
|
||||
await this.unlink(path)
|
||||
await this.__unlink(path)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
@@ -234,9 +228,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
},
|
||||
})
|
||||
async _writeFile(file, data, options) {
|
||||
return this._s3.send(
|
||||
return this.#s3.send(
|
||||
new PutObjectCommand({
|
||||
...this._createParams(file),
|
||||
...this.#createParams(file),
|
||||
Body: data,
|
||||
})
|
||||
)
|
||||
@@ -244,7 +238,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
|
||||
async _createReadStream(path, options) {
|
||||
try {
|
||||
return (await this._s3.send(new GetObjectCommand(this._createParams(path)))).Body
|
||||
return (await this.#s3.send(new GetObjectCommand(this.#createParams(path)))).Body
|
||||
} catch (e) {
|
||||
if (e.name === 'NoSuchKey') {
|
||||
const error = new Error(`ENOENT: no such file '${path}'`)
|
||||
@@ -257,9 +251,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _unlink(path) {
|
||||
await this._s3.send(new DeleteObjectCommand(this._createParams(path)))
|
||||
await this.#s3.send(new DeleteObjectCommand(this.#createParams(path)))
|
||||
|
||||
if (await this._isNotEmptyDir(path)) {
|
||||
if (await this.#isNotEmptyDir(path)) {
|
||||
const error = new Error(`EISDIR: illegal operation on a directory, unlink '${path}'`)
|
||||
error.code = 'EISDIR'
|
||||
error.path = path
|
||||
@@ -270,12 +264,12 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
async _list(dir) {
|
||||
let NextContinuationToken
|
||||
const uniq = new Set()
|
||||
const Prefix = this._makePrefix(dir)
|
||||
const Prefix = this.#makePrefix(dir)
|
||||
|
||||
do {
|
||||
const result = await this._s3.send(
|
||||
const result = await this.#s3.send(
|
||||
new ListObjectsV2Command({
|
||||
Bucket: this._bucket,
|
||||
Bucket: this.#bucket,
|
||||
Prefix,
|
||||
Delimiter: '/',
|
||||
// will only return path until delimiters
|
||||
@@ -305,7 +299,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _mkdir(path) {
|
||||
if (await this._isFile(path)) {
|
||||
if (await this.#isFile(path)) {
|
||||
const error = new Error(`ENOTDIR: file already exists, mkdir '${path}'`)
|
||||
error.code = 'ENOTDIR'
|
||||
error.path = path
|
||||
@@ -316,15 +310,15 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
|
||||
// s3 doesn't have a rename operation, so copy + delete source
|
||||
async _rename(oldPath, newPath) {
|
||||
await this.copy(oldPath, newPath)
|
||||
await this._s3.send(new DeleteObjectCommand(this._createParams(oldPath)))
|
||||
await this.__copy(oldPath, newPath)
|
||||
await this.#s3.send(new DeleteObjectCommand(this.#createParams(oldPath)))
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const result = await this._s3.send(new HeadObjectCommand(this._createParams(file)))
|
||||
const result = await this.#s3.send(new HeadObjectCommand(this.#createParams(file)))
|
||||
return +result.ContentLength
|
||||
}
|
||||
|
||||
@@ -332,15 +326,15 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const params = this._createParams(file)
|
||||
const params = this.#createParams(file)
|
||||
params.Range = `bytes=${position}-${position + buffer.length - 1}`
|
||||
try {
|
||||
const result = await this._s3.send(new GetObjectCommand(params))
|
||||
const result = await this.#s3.send(new GetObjectCommand(params))
|
||||
const bytesRead = await copyStreamToBuffer(result.Body, buffer)
|
||||
return { bytesRead, buffer }
|
||||
} catch (e) {
|
||||
if (e.name === 'NoSuchKey') {
|
||||
if (await this._isNotEmptyDir(file)) {
|
||||
if (await this.#isNotEmptyDir(file)) {
|
||||
const error = new Error(`${file} is a directory`)
|
||||
error.code = 'EISDIR'
|
||||
error.path = file
|
||||
@@ -352,7 +346,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _rmdir(path) {
|
||||
if (await this._isNotEmptyDir(path)) {
|
||||
if (await this.#isNotEmptyDir(path)) {
|
||||
const error = new Error(`ENOTEMPTY: directory not empty, rmdir '${path}`)
|
||||
error.code = 'ENOTEMPTY'
|
||||
error.path = path
|
||||
@@ -366,11 +360,11 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
// @todo : use parallel processing for unlink
|
||||
async _rmtree(path) {
|
||||
let NextContinuationToken
|
||||
const Prefix = this._makePrefix(path)
|
||||
const Prefix = this.#makePrefix(path)
|
||||
do {
|
||||
const result = await this._s3.send(
|
||||
const result = await this.#s3.send(
|
||||
new ListObjectsV2Command({
|
||||
Bucket: this._bucket,
|
||||
Bucket: this.#bucket,
|
||||
Prefix,
|
||||
ContinuationToken: NextContinuationToken,
|
||||
})
|
||||
@@ -382,9 +376,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
async ({ Key }) => {
|
||||
// _unlink will add the prefix, but Key contains everything
|
||||
// also we don't need to check if we delete a directory, since the list only return files
|
||||
await this._s3.send(
|
||||
await this.#s3.send(
|
||||
new DeleteObjectCommand({
|
||||
Bucket: this._bucket,
|
||||
Bucket: this.#bucket,
|
||||
Key,
|
||||
})
|
||||
)
|
||||
@@ -396,138 +390,6 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
} while (NextContinuationToken !== undefined)
|
||||
}
|
||||
|
||||
async _write(file, buffer, position) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const uploadParams = this._createParams(file)
|
||||
let fileSize
|
||||
try {
|
||||
fileSize = +(await this._s3.send(new HeadObjectCommand(uploadParams))).ContentLength
|
||||
} catch (e) {
|
||||
if (e.name === 'NotFound') {
|
||||
fileSize = 0
|
||||
} else {
|
||||
throw e
|
||||
}
|
||||
}
|
||||
if (fileSize < MIN_PART_SIZE) {
|
||||
const resultBuffer = Buffer.alloc(Math.max(fileSize, position + buffer.length))
|
||||
if (fileSize !== 0) {
|
||||
const result = await this._s3.send(new GetObjectCommand(uploadParams))
|
||||
await copyStreamToBuffer(result.Body, resultBuffer)
|
||||
} else {
|
||||
Buffer.alloc(0).copy(resultBuffer)
|
||||
}
|
||||
buffer.copy(resultBuffer, position)
|
||||
await this._s3.send(
|
||||
new PutObjectCommand({
|
||||
...uploadParams,
|
||||
Body: resultBuffer,
|
||||
})
|
||||
)
|
||||
return { buffer, bytesWritten: buffer.length }
|
||||
} else {
|
||||
// using this trick: https://stackoverflow.com/a/38089437/72637
|
||||
// multipart fragments have a minimum size of 5Mo and a max of 5Go unless they are last
|
||||
// splitting the file in 3 parts: [prefix, edit, suffix]
|
||||
// if `prefix` is bigger than 5Mo, it will be sourced from uploadPartCopy()
|
||||
// otherwise otherwise it will be downloaded, concatenated to `edit`
|
||||
// `edit` will always be an upload part
|
||||
// `suffix` will always be sourced from uploadPartCopy()
|
||||
// Then everything will be sliced in 5Gb parts before getting uploaded
|
||||
const multipartParams = await this._s3.send(new CreateMultipartUploadCommand(uploadParams))
|
||||
const copyMultipartParams = {
|
||||
...multipartParams,
|
||||
CopySource: this._makeCopySource(file),
|
||||
}
|
||||
try {
|
||||
const parts = []
|
||||
const prefixSize = position
|
||||
let suffixOffset = prefixSize + buffer.length
|
||||
let suffixSize = Math.max(0, fileSize - suffixOffset)
|
||||
let hasSuffix = suffixSize > 0
|
||||
let editBuffer = buffer
|
||||
let editBufferOffset = position
|
||||
let partNumber = 1
|
||||
let prefixPosition = 0
|
||||
// use floor() so that last fragment is handled in the if bellow
|
||||
let fragmentsCount = Math.floor(prefixSize / MAX_PART_SIZE)
|
||||
const prefixFragmentSize = MAX_PART_SIZE
|
||||
let prefixLastFragmentSize = prefixSize - prefixFragmentSize * fragmentsCount
|
||||
if (prefixLastFragmentSize >= MIN_PART_SIZE) {
|
||||
// the last fragment of the prefix is smaller than MAX_PART_SIZE, but bigger than the minimum
|
||||
// so we can copy it too
|
||||
fragmentsCount++
|
||||
prefixLastFragmentSize = 0
|
||||
}
|
||||
for (let i = 0; i < fragmentsCount; i++) {
|
||||
const fragmentEnd = Math.min(prefixPosition + prefixFragmentSize, prefixSize)
|
||||
assert.strictEqual(fragmentEnd - prefixPosition <= MAX_PART_SIZE, true)
|
||||
const range = `bytes=${prefixPosition}-${fragmentEnd - 1}`
|
||||
const copyPrefixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: range }
|
||||
const part = await this._s3.send(new UploadPartCopyCommand(copyPrefixParams))
|
||||
parts.push({ ETag: part.CopyPartResult.ETag, PartNumber: copyPrefixParams.PartNumber })
|
||||
prefixPosition += prefixFragmentSize
|
||||
}
|
||||
if (prefixLastFragmentSize) {
|
||||
// grab everything from the prefix that was too small to be copied, download and merge to the edit buffer.
|
||||
const downloadParams = { ...uploadParams, Range: `bytes=${prefixPosition}-${prefixSize - 1}` }
|
||||
let prefixBuffer
|
||||
if (prefixSize > 0) {
|
||||
const result = await this._s3.send(new GetObjectCommand(downloadParams))
|
||||
prefixBuffer = await createBufferFromStream(result.Body)
|
||||
} else {
|
||||
prefixBuffer = Buffer.alloc(0)
|
||||
}
|
||||
editBuffer = Buffer.concat([prefixBuffer, buffer])
|
||||
editBufferOffset -= prefixLastFragmentSize
|
||||
}
|
||||
if (hasSuffix && editBuffer.length < MIN_PART_SIZE) {
|
||||
// the edit fragment is too short and is not the last fragment
|
||||
// let's steal from the suffix fragment to reach the minimum size
|
||||
// the suffix might be too short and itself entirely absorbed in the edit fragment, making it the last one.
|
||||
const complementSize = Math.min(MIN_PART_SIZE - editBuffer.length, suffixSize)
|
||||
const complementOffset = editBufferOffset + editBuffer.length
|
||||
suffixOffset += complementSize
|
||||
suffixSize -= complementSize
|
||||
hasSuffix = suffixSize > 0
|
||||
const prefixRange = `bytes=${complementOffset}-${complementOffset + complementSize - 1}`
|
||||
const downloadParams = { ...uploadParams, Range: prefixRange }
|
||||
const result = await this._s3.send(new GetObjectCommand(downloadParams))
|
||||
const complementBuffer = await createBufferFromStream(result.Body)
|
||||
editBuffer = Buffer.concat([editBuffer, complementBuffer])
|
||||
}
|
||||
const editParams = { ...multipartParams, Body: editBuffer, PartNumber: partNumber++ }
|
||||
const editPart = await this._s3.send(new UploadPartCommand(editParams))
|
||||
parts.push({ ETag: editPart.ETag, PartNumber: editParams.PartNumber })
|
||||
if (hasSuffix) {
|
||||
// use ceil because the last fragment can be arbitrarily small.
|
||||
const suffixFragments = Math.ceil(suffixSize / MAX_PART_SIZE)
|
||||
let suffixFragmentOffset = suffixOffset
|
||||
for (let i = 0; i < suffixFragments; i++) {
|
||||
const fragmentEnd = suffixFragmentOffset + MAX_PART_SIZE
|
||||
assert.strictEqual(Math.min(fileSize, fragmentEnd) - suffixFragmentOffset <= MAX_PART_SIZE, true)
|
||||
const suffixRange = `bytes=${suffixFragmentOffset}-${Math.min(fileSize, fragmentEnd) - 1}`
|
||||
const copySuffixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: suffixRange }
|
||||
const suffixPart = (await this._s3.send(new UploadPartCopyCommand(copySuffixParams))).CopyPartResult
|
||||
parts.push({ ETag: suffixPart.ETag, PartNumber: copySuffixParams.PartNumber })
|
||||
suffixFragmentOffset = fragmentEnd
|
||||
}
|
||||
}
|
||||
await this._s3.send(
|
||||
new CompleteMultipartUploadCommand({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
)
|
||||
} catch (e) {
|
||||
await this._s3.send(new AbortMultipartUploadCommand(multipartParams))
|
||||
throw e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
return path
|
||||
}
|
||||
|
||||
@@ -11,6 +11,12 @@
|
||||
- Display network throughput chart in pool dashboard (PR [#6610](https://github.com/vatesfr/xen-orchestra/pull/6610))
|
||||
- Display RAM usage chart in pool dashboard (PR [#6604](https://github.com/vatesfr/xen-orchestra/pull/6604))
|
||||
- Ability to change the state of a VM (PRs [#6571](https://github.com/vatesfr/xen-orchestra/pull/6571) [#6608](https://github.com/vatesfr/xen-orchestra/pull/6608))
|
||||
- Display CPU provisioning in pool dashboard (PR [#6601](https://github.com/vatesfr/xen-orchestra/pull/6601))
|
||||
- Add a star icon near the pool master (PR [#6712](https://github.com/vatesfr/xen-orchestra/pull/6712))
|
||||
- Display an error message if the data cannot be fetched (PR [#6525](https://github.com/vatesfr/xen-orchestra/pull/6525))
|
||||
- Add "Under Construction" views (PR [#6673](https://github.com/vatesfr/xen-orchestra/pull/6673))
|
||||
- Ability to change the state of selected VMs from the pool's list of VMs (PR [#6782](https://github.com/vatesfr/xen-orchestra/pull/6782))
|
||||
- Ability to copy selected VMs from the pool's list of VMs (PR [#6847](https://github.com/vatesfr/xen-orchestra/pull/6847))
|
||||
|
||||
## **0.1.0**
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user