Compare commits
484 Commits
xen-api-ge
...
feat_add_c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
568a8334ee | ||
|
|
a9c1239149 | ||
|
|
cb1223f72e | ||
|
|
4dc7575d5b | ||
|
|
276d1ce60a | ||
|
|
58ab32a623 | ||
|
|
c1846e6ff3 | ||
|
|
826de17111 | ||
|
|
8a09ea8bc1 | ||
|
|
1297c925ad | ||
|
|
74d15e1a92 | ||
|
|
ae373c3e77 | ||
|
|
e9b90caa3a | ||
|
|
b89e77a6a4 | ||
|
|
61691ac46b | ||
|
|
512b96af24 | ||
|
|
d369593979 | ||
|
|
2f38e0564b | ||
|
|
5e8dd4e4bc | ||
|
|
8f9f1f566d | ||
|
|
d7870b8860 | ||
|
|
97fa23f890 | ||
|
|
f839887da8 | ||
|
|
15bfaa15ca | ||
|
|
4a3183ffa0 | ||
|
|
18d03a076b | ||
|
|
4bed4195ac | ||
|
|
a963878af5 | ||
|
|
d6c3dc87e0 | ||
|
|
5391a9a5ad | ||
|
|
b50e95802c | ||
|
|
75a9799e96 | ||
|
|
dbb9e4d60f | ||
|
|
d27b6bd49d | ||
|
|
c5d2726faa | ||
|
|
a2a98c490f | ||
|
|
e2dc1d98f1 | ||
|
|
658c26d3c9 | ||
|
|
612095789a | ||
|
|
7418d9f670 | ||
|
|
f344c58a62 | ||
|
|
36b94f745d | ||
|
|
08cdcf4112 | ||
|
|
76813737ef | ||
|
|
53d15d6a77 | ||
|
|
dd01b62b87 | ||
|
|
9fab15537b | ||
|
|
d87db05b2b | ||
|
|
f1f32c962c | ||
|
|
ad149740b1 | ||
|
|
9a4e938b91 | ||
|
|
a226760b07 | ||
|
|
a11450c3a7 | ||
|
|
e0cab4f937 | ||
|
|
468250f291 | ||
|
|
d04b93c17e | ||
|
|
911556a1aa | ||
|
|
c7d3230eef | ||
|
|
b63086bf09 | ||
|
|
a4118a5676 | ||
|
|
26e7e6467c | ||
|
|
1c9552fa58 | ||
|
|
9875cb5575 | ||
|
|
d1c6bb8829 | ||
|
|
ef7005a291 | ||
|
|
8068b83ffe | ||
|
|
f01a89710c | ||
|
|
38ced81ada | ||
|
|
9834632d59 | ||
|
|
bb4504dd50 | ||
|
|
8864c2f2db | ||
|
|
19208472e6 | ||
|
|
10c77ba3cc | ||
|
|
cd28fd4945 | ||
|
|
6778d6aa4a | ||
|
|
433851d771 | ||
|
|
d157fd3528 | ||
|
|
9150823c37 | ||
|
|
07c3a44441 | ||
|
|
051bbf9449 | ||
|
|
22ea1c0e2a | ||
|
|
6432a44860 | ||
|
|
493d861de3 | ||
|
|
82452e9616 | ||
|
|
2fbeaa618a | ||
|
|
6c08afaa0e | ||
|
|
af4cc1f574 | ||
|
|
2fb27b26cd | ||
|
|
11e09e1f87 | ||
|
|
9ccb5f8aa9 | ||
|
|
af87d6a0ea | ||
|
|
d847f45cb3 | ||
|
|
38c615609a | ||
|
|
144cc4b82f | ||
|
|
d24ab141e9 | ||
|
|
8505374fcf | ||
|
|
e53d961fc3 | ||
|
|
dc8ca7a8ee | ||
|
|
3d1b87d9dc | ||
|
|
01fa2af5cd | ||
|
|
20a89ca45a | ||
|
|
16ca2f8da9 | ||
|
|
30fe9764ad | ||
|
|
e246c8ee47 | ||
|
|
ba03a48498 | ||
|
|
b96dd0160a | ||
|
|
49890a09b7 | ||
|
|
dfce56cee8 | ||
|
|
a6fee2946a | ||
|
|
34c849ee89 | ||
|
|
c7192ed3bf | ||
|
|
4d3dc0c5f7 | ||
|
|
9ba4afa073 | ||
|
|
3ea4422d13 | ||
|
|
de2e314f7d | ||
|
|
2380fb42fe | ||
|
|
95b76076a3 | ||
|
|
b415d4c34c | ||
|
|
2d82b6dd6e | ||
|
|
16b1935f12 | ||
|
|
50ec614b2a | ||
|
|
9e11a0af6e | ||
|
|
0c3e42e0b9 | ||
|
|
36b31bb0b3 | ||
|
|
c03c41450b | ||
|
|
dfc2b5d88b | ||
|
|
87e3e3ffe3 | ||
|
|
dae37c6a50 | ||
|
|
c7df11cc6f | ||
|
|
87f1f208c3 | ||
|
|
ba8c5d740e | ||
|
|
c275d5d999 | ||
|
|
cfc53c9c94 | ||
|
|
87df917157 | ||
|
|
395d87d290 | ||
|
|
aff8ec08ad | ||
|
|
4d40b56d85 | ||
|
|
667d0724c3 | ||
|
|
a49395553a | ||
|
|
cce09bd9cc | ||
|
|
03a66e4690 | ||
|
|
fd752fee80 | ||
|
|
8a71f84733 | ||
|
|
9ef2c7da4c | ||
|
|
8975073416 | ||
|
|
d1c1378c9d | ||
|
|
7941284a1d | ||
|
|
af2d17b7a5 | ||
|
|
3ca2b01d9a | ||
|
|
67193a2ab7 | ||
|
|
9757aa36de | ||
|
|
29854a9f87 | ||
|
|
b12c179470 | ||
|
|
bbef15e4e4 | ||
|
|
c483929a0d | ||
|
|
1741f395dd | ||
|
|
0f29262797 | ||
|
|
31ed477b96 | ||
|
|
9e5de5413d | ||
|
|
0f297a81a4 | ||
|
|
89313def99 | ||
|
|
8e0be4edaf | ||
|
|
a8dfdfb922 | ||
|
|
f096024248 | ||
|
|
4f50f90213 | ||
|
|
4501902331 | ||
|
|
df19679dba | ||
|
|
9f5a2f67f9 | ||
|
|
2d5c406325 | ||
|
|
151b8a8940 | ||
|
|
cda027b94a | ||
|
|
ee2117abf6 | ||
|
|
6e7294d49f | ||
|
|
062e45f697 | ||
|
|
d18b39990d | ||
|
|
7387ac2411 | ||
|
|
4186592f9f | ||
|
|
6c9d5a72a6 | ||
|
|
83690a4dd4 | ||
|
|
c11e03ab26 | ||
|
|
c7d8709267 | ||
|
|
6579deffad | ||
|
|
e2739e7a4b | ||
|
|
c0d587f541 | ||
|
|
05a96ffc14 | ||
|
|
32a47444d7 | ||
|
|
9ff5de5f33 | ||
|
|
09badf33d0 | ||
|
|
1643d3637f | ||
|
|
b962e9ebe8 | ||
|
|
66f3528e10 | ||
|
|
a5e9f051a2 | ||
|
|
63bfb76516 | ||
|
|
f88f7d41aa | ||
|
|
877383ac85 | ||
|
|
dd5e11e835 | ||
|
|
3d43550ffe | ||
|
|
115bc8fa0a | ||
|
|
15c46e324c | ||
|
|
df38366066 | ||
|
|
28b13ccfff | ||
|
|
26a433ebbe | ||
|
|
1902595190 | ||
|
|
80146cfb58 | ||
|
|
03d2d6fc94 | ||
|
|
379e4d7596 | ||
|
|
9860bd770b | ||
|
|
2af5328a0f | ||
|
|
4084a44f83 | ||
|
|
ba7c7ddb23 | ||
|
|
2351e7b98c | ||
|
|
d353dc622c | ||
|
|
3ef6adfd02 | ||
|
|
5063a6982a | ||
|
|
0008f2845c | ||
|
|
a0994bc428 | ||
|
|
8fe0d97aec | ||
|
|
a8b3c02780 | ||
|
|
f3489fb57c | ||
|
|
434b5b375d | ||
|
|
445120f9f5 | ||
|
|
71b11f0d9c | ||
|
|
8297a9e0e7 | ||
|
|
4999672f2d | ||
|
|
70608ed7e9 | ||
|
|
a0836ebdd7 | ||
|
|
2b1edd1d4c | ||
|
|
42bb7cc973 | ||
|
|
8299c37bb7 | ||
|
|
7a2005c20c | ||
|
|
ae0eb9e66e | ||
|
|
052126613a | ||
|
|
7959657bd6 | ||
|
|
9f8bb376ea | ||
|
|
ee8e2fa906 | ||
|
|
33a380b173 | ||
|
|
6e5b6996fa | ||
|
|
6409dc276c | ||
|
|
98f7ce43e3 | ||
|
|
aa076e1d2d | ||
|
|
7a096d1b5c | ||
|
|
93b17ccddd | ||
|
|
68c118c3e5 | ||
|
|
c0b0ba433f | ||
|
|
d7d81431ef | ||
|
|
7451f45885 | ||
|
|
c9882001a9 | ||
|
|
837b06ef2b | ||
|
|
0e49150b8e | ||
|
|
0ec5f4bf68 | ||
|
|
601730d737 | ||
|
|
28eb4b21bd | ||
|
|
a5afe0bca1 | ||
|
|
ad5691dcb2 | ||
|
|
80974fa1dc | ||
|
|
78330a0e11 | ||
|
|
b6cff2d784 | ||
|
|
cae3555ca7 | ||
|
|
1f9cf458ec | ||
|
|
d9ead2d9f5 | ||
|
|
92660fd03e | ||
|
|
5393d847f0 | ||
|
|
231f09de12 | ||
|
|
b75ca2700b | ||
|
|
bae7ef9067 | ||
|
|
8ec8a3b4d9 | ||
|
|
5b7228ed69 | ||
|
|
b02bf90c8a | ||
|
|
7d3546734e | ||
|
|
030013eb5b | ||
|
|
da181345a6 | ||
|
|
30874b2206 | ||
|
|
2ed6b2dc87 | ||
|
|
41532f35d1 | ||
|
|
7a198a44cd | ||
|
|
77d615d15b | ||
|
|
c7bc397c85 | ||
|
|
38388cc297 | ||
|
|
a7b17b2b8c | ||
|
|
d93afc4648 | ||
|
|
24449e41bb | ||
|
|
df6f3ed165 | ||
|
|
ca5914dbfb | ||
|
|
3c3a1f8981 | ||
|
|
01810f35b2 | ||
|
|
5db4083414 | ||
|
|
8bf3a747f0 | ||
|
|
f0e817a8d9 | ||
|
|
b181c59698 | ||
|
|
cfa094f208 | ||
|
|
9ee5a8d089 | ||
|
|
819127da57 | ||
|
|
6e9659a797 | ||
|
|
07bd9cadd4 | ||
|
|
a1bcd35e26 | ||
|
|
1a741e18fd | ||
|
|
2e133dd0fb | ||
|
|
ecae554a78 | ||
|
|
4bed50b4ed | ||
|
|
c92b371d9e | ||
|
|
35e6bb30db | ||
|
|
1aaa123f47 | ||
|
|
a8c507a1df | ||
|
|
581e3c358f | ||
|
|
e4f1b8f2e0 | ||
|
|
29e8a7fd7e | ||
|
|
4af289c492 | ||
|
|
cd95793054 | ||
|
|
ab71578cf2 | ||
|
|
df07d4a393 | ||
|
|
2518395c03 | ||
|
|
50f3ab7798 | ||
|
|
2d01056ea9 | ||
|
|
f40fb3bab3 | ||
|
|
fe7c60654d | ||
|
|
728b640ff8 | ||
|
|
55c247e5d0 | ||
|
|
6be15b780a | ||
|
|
150c552ef9 | ||
|
|
7005c1f5e5 | ||
|
|
a66ae33d5d | ||
|
|
8ed8447665 | ||
|
|
e740719732 | ||
|
|
bfd9238f6d | ||
|
|
cca47a8149 | ||
|
|
3ecf099fe0 | ||
|
|
6f56dc0339 | ||
|
|
20108208d0 | ||
|
|
0706e6f4ff | ||
|
|
af85df611c | ||
|
|
3c1239cfb8 | ||
|
|
50d144bf93 | ||
|
|
9a5a03d032 | ||
|
|
854ae0f65e | ||
|
|
4fb34ffee9 | ||
|
|
bbf3dae37f | ||
|
|
e69f58eb86 | ||
|
|
c9475ddc65 | ||
|
|
31d085b6a1 | ||
|
|
173866236f | ||
|
|
b176780527 | ||
|
|
89c72fdbad | ||
|
|
7d6e832226 | ||
|
|
c024346475 | ||
|
|
95ec5929b4 | ||
|
|
1646c50a94 | ||
|
|
b1429e1df3 | ||
|
|
6da0aa376f | ||
|
|
1ab5503558 | ||
|
|
4b9db257fd | ||
|
|
96f83d92fc | ||
|
|
7c7ee7fb9b | ||
|
|
2bd3d57f8a | ||
|
|
3f1c41a4f7 | ||
|
|
ef819f4d53 | ||
|
|
23189ed8f9 | ||
|
|
6b8a345241 | ||
|
|
b3cc168571 | ||
|
|
00b740c549 | ||
|
|
472bececb1 | ||
|
|
4fe9a4eb59 | ||
|
|
d331cd934a | ||
|
|
f6e1b95711 | ||
|
|
edec412bc7 | ||
|
|
e142bacb67 | ||
|
|
915e4b66a3 | ||
|
|
ee47a361b1 | ||
|
|
3ab2dad19b | ||
|
|
635c6db83a | ||
|
|
d2a13f531a | ||
|
|
e44857c023 | ||
|
|
749cdd011b | ||
|
|
4fde005b7f | ||
|
|
0a975fc0cc | ||
|
|
f5b7c59203 | ||
|
|
c775559912 | ||
|
|
4b9116ed72 | ||
|
|
178501e252 | ||
|
|
c9391abfd9 | ||
|
|
f7a7d9e52d | ||
|
|
5a65087aeb | ||
|
|
3cfeb8f492 | ||
|
|
0498e2d679 | ||
|
|
19563d0b3c | ||
|
|
4cc1d98a42 | ||
|
|
cd408c1687 | ||
|
|
546859531b | ||
|
|
857e467672 | ||
|
|
5de5a80eee | ||
|
|
5f1333b2a0 | ||
|
|
5e9548edbc | ||
|
|
e42edf2164 | ||
|
|
7e8c524a43 | ||
|
|
8d6ba86118 | ||
|
|
dc27317d0b | ||
|
|
0b8032c7a0 | ||
|
|
d3e18a81aa | ||
|
|
4707573c57 | ||
|
|
218e2deef4 | ||
|
|
47afff3bab | ||
|
|
24ce400429 | ||
|
|
ceac341016 | ||
|
|
7830c89e66 | ||
|
|
fa6ca01de9 | ||
|
|
746bff55a8 | ||
|
|
ec81e90153 | ||
|
|
74c9d06d91 | ||
|
|
3cfbd8b70c | ||
|
|
046d9b80bb | ||
|
|
516fda2a3a | ||
|
|
bc5c103cec | ||
|
|
98f0c068ad | ||
|
|
28e0a5a3a3 | ||
|
|
5f9d6db761 | ||
|
|
9bec52074f | ||
|
|
dfa5009a9b | ||
|
|
fbb5c47358 | ||
|
|
f2ca67a7f4 | ||
|
|
3a9af92571 | ||
|
|
b26148bf62 | ||
|
|
7e27e0bda8 | ||
|
|
94df05566a | ||
|
|
efa46414fd | ||
|
|
311623d71a | ||
|
|
89894b0f08 | ||
|
|
ed590eeb41 | ||
|
|
8956902c52 | ||
|
|
720d9649eb | ||
|
|
ce95041821 | ||
|
|
9cd84ac834 | ||
|
|
eddd6880f2 | ||
|
|
e7f9111ab5 | ||
|
|
0eb2ee72a4 | ||
|
|
1aa4fac528 | ||
|
|
3f07f7ef41 | ||
|
|
886ff2cd70 | ||
|
|
ed09608952 | ||
|
|
6aa5ec6eb6 | ||
|
|
eee4981d4c | ||
|
|
ab2c0f905a | ||
|
|
4eca3d169e | ||
|
|
e69944eaaf | ||
|
|
bc987b2dda | ||
|
|
244b150385 | ||
|
|
3acbc08ec5 | ||
|
|
1ce7e5d8a4 | ||
|
|
7813fe232a | ||
|
|
1b273071b2 | ||
|
|
43236f25ad | ||
|
|
5f2b1acfea | ||
|
|
b820dcd73f | ||
|
|
0a412c0ef7 | ||
|
|
db75f8046c | ||
|
|
7bdd1a4a3a | ||
|
|
b7d39105e8 | ||
|
|
f8b5dbbba5 | ||
|
|
c99120bd24 | ||
|
|
b9ff3db9b0 | ||
|
|
8ce1b4bf71 | ||
|
|
0b41a2b132 | ||
|
|
575ed92593 | ||
|
|
d3fe37b879 | ||
|
|
8318cb991b | ||
|
|
9779fb1d29 | ||
|
|
43b4e8c924 | ||
|
|
756a206a9e | ||
|
|
324ff44886 | ||
|
|
04913cabba | ||
|
|
c67b550435 | ||
|
|
88bf60b8c2 | ||
|
|
08fd2beb52 | ||
|
|
a870813ac5 | ||
|
|
c31f0100cb | ||
|
|
bc7fc750f0 | ||
|
|
9cd1784667 | ||
|
|
fbc7baa9a4 | ||
|
|
a4e5cf3914 | ||
|
|
e14c177f39 | ||
|
|
b5b53c636c | ||
|
|
33b45d2eda | ||
|
|
4c9bb71626 | ||
|
|
e9aa88e637 | ||
|
|
e7c7c2ce3e | ||
|
|
686e7e9527 |
20
.eslintrc.js
20
.eslintrc.js
@@ -1,7 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = {
|
||||
extends: ['plugin:eslint-comments/recommended', 'standard', 'standard-jsx', 'prettier'],
|
||||
extends: ['plugin:eslint-comments/recommended', 'plugin:n/recommended', 'standard', 'standard-jsx', 'prettier'],
|
||||
globals: {
|
||||
__DEV__: true,
|
||||
$Dict: true,
|
||||
@@ -17,6 +17,7 @@ module.exports = {
|
||||
{
|
||||
files: ['cli.{,c,m}js', '*-cli.{,c,m}js', '**/*cli*/**/*.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-process-exit': 'off',
|
||||
'no-console': 'off',
|
||||
},
|
||||
},
|
||||
@@ -26,6 +27,23 @@ module.exports = {
|
||||
sourceType: 'module',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['*.spec.{,c,m}js'],
|
||||
rules: {
|
||||
'n/no-unsupported-features/node-builtins': [
|
||||
'error',
|
||||
{
|
||||
version: '>=16',
|
||||
},
|
||||
],
|
||||
'n/no-unsupported-features/es-syntax': [
|
||||
'error',
|
||||
{
|
||||
version: '>=16',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
parserOptions: {
|
||||
|
||||
16
.flowconfig
16
.flowconfig
@@ -1,16 +0,0 @@
|
||||
[ignore]
|
||||
<PROJECT_ROOT>/node_modules/.*
|
||||
|
||||
[include]
|
||||
|
||||
[libs]
|
||||
|
||||
[lints]
|
||||
|
||||
[options]
|
||||
esproposal.decorators=ignore
|
||||
esproposal.optional_chaining=enable
|
||||
include_warnings=true
|
||||
module.use_strict=true
|
||||
|
||||
[strict]
|
||||
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -6,6 +6,18 @@ labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**XOA or XO from the sources?**
|
||||
|
||||
If XOA:
|
||||
|
||||
- which release channel? (`stable` vs `latest`)
|
||||
- please consider creating a support ticket in [your dedicated support area](https://xen-orchestra.com/#!/member/support)
|
||||
|
||||
If XO from the sources:
|
||||
|
||||
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
|
||||
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
@@ -23,7 +35,7 @@ A clear and concise description of what you expected to happen.
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
**Environment (please provide the following information):**
|
||||
|
||||
- Node: [e.g. 16.12.1]
|
||||
- xo-server: [e.g. 5.82.3]
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
1
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -4,7 +4,6 @@ about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
|
||||
13
.github/workflows/push.yml
vendored
Normal file
13
.github/workflows/push.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: CI
|
||||
on: [push]
|
||||
jobs:
|
||||
build:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
- run: docker-compose -f docker/docker-compose.dev.yml build
|
||||
- run: docker-compose -f docker/docker-compose.dev.yml up
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,5 +1,4 @@
|
||||
/_book/
|
||||
/coverage/
|
||||
/node_modules/
|
||||
/lerna-debug.log
|
||||
/lerna-debug.log.*
|
||||
@@ -11,10 +10,6 @@
|
||||
/packages/*/dist/
|
||||
/packages/*/node_modules/
|
||||
|
||||
/@xen-orchestra/proxy/src/app/mixins/index.mjs
|
||||
|
||||
/packages/vhd-cli/src/commands/index.js
|
||||
|
||||
/packages/xen-api/examples/node_modules/
|
||||
/packages/xen-api/plot.dat
|
||||
|
||||
@@ -36,5 +31,6 @@ yarn-error.log
|
||||
yarn-error.log.*
|
||||
.env
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
# code coverage
|
||||
.nyc_output/
|
||||
coverage/
|
||||
|
||||
23
.travis.yml
23
.travis.yml
@@ -1,23 +0,0 @@
|
||||
language: node_js
|
||||
node_js:
|
||||
- 14
|
||||
|
||||
# Use containers.
|
||||
# http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
||||
sudo: false
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- qemu-utils
|
||||
- blktap-utils
|
||||
- vmdk-stream-converter
|
||||
|
||||
before_install:
|
||||
- curl -o- -L https://yarnpkg.com/install.sh | bash
|
||||
- export PATH="$HOME/.yarn/bin:$PATH"
|
||||
|
||||
cache:
|
||||
yarn: true
|
||||
|
||||
script:
|
||||
- yarn run travis-tests
|
||||
@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
|
||||
|
||||
`opts` is an object that can contains the following options:
|
||||
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
|
||||
- `signal`: an abort signal to stop the iteration
|
||||
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
|
||||
|
||||
`opts` is an object that can contains the following options:
|
||||
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
|
||||
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
|
||||
- `signal`: an abort signal to stop the iteration
|
||||
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
|
||||
|
||||
|
||||
@@ -9,7 +9,16 @@ class AggregateError extends Error {
|
||||
}
|
||||
}
|
||||
|
||||
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
|
||||
/**
|
||||
* @template Item
|
||||
* @param {Iterable<Item>} iterable
|
||||
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
|
||||
if (concurrency === 0) {
|
||||
concurrency = Infinity
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
|
||||
const errors = []
|
||||
|
||||
@@ -36,7 +36,7 @@ describe('asyncEach', () => {
|
||||
it('works', async () => {
|
||||
const iteratee = jest.fn(async () => {})
|
||||
|
||||
await asyncEach.call(thisArg, iterable, iteratee)
|
||||
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
|
||||
|
||||
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
|
||||
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
|
||||
@@ -66,7 +66,7 @@ describe('asyncEach', () => {
|
||||
}
|
||||
})
|
||||
|
||||
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
|
||||
expect(await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true }))).toBe(error)
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
|
||||
@@ -91,7 +91,9 @@ describe('asyncEach', () => {
|
||||
}
|
||||
})
|
||||
|
||||
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
|
||||
await expect(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal })).rejects.toThrow(
|
||||
'asyncEach aborted'
|
||||
)
|
||||
expect(iteratee).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "0.1.0",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
30
@vates/cached-dns.lookup/.USAGE.md
Normal file
30
@vates/cached-dns.lookup/.USAGE.md
Normal file
@@ -0,0 +1,30 @@
|
||||
Node does not cache queries to `dns.lookup`, which can lead application doing a lot of connections to have perf issues and to saturate Node threads pool.
|
||||
|
||||
This library attempts to mitigate these problems by providing a version of this function with a version short cache, applied on both errors and results.
|
||||
|
||||
> Limitation: `verbatim: false` option is not supported.
|
||||
|
||||
It has exactly the same API as the native method and can be used directly:
|
||||
|
||||
```js
|
||||
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||
|
||||
const lookup = createCachedLookup()
|
||||
|
||||
lookup('example.net', { all: true, family: 0 }, (error, result) => {
|
||||
if (error != null) {
|
||||
return console.warn(error)
|
||||
}
|
||||
console.log(result)
|
||||
})
|
||||
```
|
||||
|
||||
Or it can be used to replace the native implementation and speed up the whole app:
|
||||
|
||||
```js
|
||||
// assign our cached implementation to dns.lookup
|
||||
const restore = createCachedLookup().patchGlobal()
|
||||
|
||||
// to restore the previous implementation
|
||||
restore()
|
||||
```
|
||||
1
@vates/cached-dns.lookup/.npmignore
Symbolic link
1
@vates/cached-dns.lookup/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
63
@vates/cached-dns.lookup/README.md
Normal file
63
@vates/cached-dns.lookup/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/cached-dns.lookup
|
||||
|
||||
[](https://npmjs.org/package/@vates/cached-dns.lookup)  [](https://bundlephobia.com/result?p=@vates/cached-dns.lookup) [](https://npmjs.org/package/@vates/cached-dns.lookup)
|
||||
|
||||
> Cached implementation of dns.lookup
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/cached-dns.lookup):
|
||||
|
||||
```
|
||||
> npm install --save @vates/cached-dns.lookup
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Node does not cache queries to `dns.lookup`, which can lead application doing a lot of connections to have perf issues and to saturate Node threads pool.
|
||||
|
||||
This library attempts to mitigate these problems by providing a version of this function with a version short cache, applied on both errors and results.
|
||||
|
||||
> Limitation: `verbatim: false` option is not supported.
|
||||
|
||||
It has exactly the same API as the native method and can be used directly:
|
||||
|
||||
```js
|
||||
import { createCachedLookup } from '@vates/cached-dns.lookup'
|
||||
|
||||
const lookup = createCachedLookup()
|
||||
|
||||
lookup('example.net', { all: true, family: 0 }, (error, result) => {
|
||||
if (error != null) {
|
||||
return console.warn(error)
|
||||
}
|
||||
console.log(result)
|
||||
})
|
||||
```
|
||||
|
||||
Or it can be used to replace the native implementation and speed up the whole app:
|
||||
|
||||
```js
|
||||
// assign our cached implementation to dns.lookup
|
||||
const restore = createCachedLookup().patchGlobal()
|
||||
|
||||
// to restore the previous implementation
|
||||
restore()
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
72
@vates/cached-dns.lookup/index.js
Normal file
72
@vates/cached-dns.lookup/index.js
Normal file
@@ -0,0 +1,72 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const dns = require('dns')
|
||||
const LRU = require('lru-cache')
|
||||
|
||||
function reportResults(all, results, callback) {
|
||||
if (all) {
|
||||
callback(null, results)
|
||||
} else {
|
||||
const first = results[0]
|
||||
callback(null, first.address, first.family)
|
||||
}
|
||||
}
|
||||
|
||||
exports.createCachedLookup = function createCachedLookup({ lookup = dns.lookup } = {}) {
|
||||
const cache = new LRU({
|
||||
max: 500,
|
||||
|
||||
// 1 minute: long enough to be effective, short enough so there is no need to bother with DNS TTLs
|
||||
ttl: 60e3,
|
||||
})
|
||||
|
||||
function cachedLookup(hostname, options, callback) {
|
||||
let all = false
|
||||
let family = 0
|
||||
if (typeof options === 'function') {
|
||||
callback = options
|
||||
} else if (typeof options === 'number') {
|
||||
family = options
|
||||
} else if (options != null) {
|
||||
assert.notStrictEqual(options.verbatim, false, 'not supported by this implementation')
|
||||
;({ all = all, family = family } = options)
|
||||
}
|
||||
|
||||
// cache by family option because there will be an error if there is no
|
||||
// entries for the requestion family so we cannot easily cache all families
|
||||
// and filter on reporting back
|
||||
const key = hostname + '/' + family
|
||||
|
||||
const result = cache.get(key)
|
||||
if (result !== undefined) {
|
||||
setImmediate(reportResults, all, result, callback)
|
||||
} else {
|
||||
lookup(hostname, { all: true, family, verbatim: true }, function onLookup(error, results) {
|
||||
// errors are not cached because this will delay recovery after DNS/network issues
|
||||
//
|
||||
// there are no reliable way to detect if the error is real or simply
|
||||
// that there are no results for the requested hostname
|
||||
//
|
||||
// there should be much fewer errors than success, therefore it should
|
||||
// not be a big deal to not cache them
|
||||
if (error != null) {
|
||||
return callback(error)
|
||||
}
|
||||
|
||||
cache.set(key, results)
|
||||
reportResults(all, results, callback)
|
||||
})
|
||||
}
|
||||
}
|
||||
cachedLookup.patchGlobal = function patchGlobal() {
|
||||
const previous = dns.lookup
|
||||
dns.lookup = cachedLookup
|
||||
return function restoreGlobal() {
|
||||
assert.strictEqual(dns.lookup, cachedLookup)
|
||||
dns.lookup = previous
|
||||
}
|
||||
}
|
||||
|
||||
return cachedLookup
|
||||
}
|
||||
32
@vates/cached-dns.lookup/package.json
Normal file
32
@vates/cached-dns.lookup/package.json
Normal file
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"dependencies": {
|
||||
"lru-cache": "^7.0.4"
|
||||
},
|
||||
"private": false,
|
||||
"name": "@vates/cached-dns.lookup",
|
||||
"description": "Cached implementation of dns.lookup",
|
||||
"keywords": [
|
||||
"cache",
|
||||
"dns",
|
||||
"lookup"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/cached-dns.lookup",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/cached-dns.lookup",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
}
|
||||
}
|
||||
@@ -13,15 +13,19 @@ class Foo {
|
||||
}
|
||||
```
|
||||
|
||||
### `decorateMethodsWith(class, map)`
|
||||
### `decorateClass(class, map)`
|
||||
|
||||
Decorates a number of methods directly, without using the decorator syntax:
|
||||
Decorates a number of accessors and methods directly, without using the decorator syntax:
|
||||
|
||||
```js
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
|
||||
class Foo {
|
||||
bar() {
|
||||
get bar() {
|
||||
// body
|
||||
}
|
||||
|
||||
set bar(value) {
|
||||
// body
|
||||
}
|
||||
|
||||
@@ -30,22 +34,28 @@ class Foo {
|
||||
}
|
||||
}
|
||||
|
||||
decorateMethodsWith(Foo, {
|
||||
// without arguments
|
||||
bar: lodash.curry,
|
||||
decorateClass(Foo, {
|
||||
// getter and/or setter
|
||||
bar: {
|
||||
// without arguments
|
||||
get: lodash.memoize,
|
||||
|
||||
// with arguments
|
||||
baz: [lodash.debounce, 150],
|
||||
// with arguments
|
||||
set: [lodash.debounce, 150],
|
||||
},
|
||||
|
||||
// method (with or without arguments)
|
||||
baz: lodash.curry,
|
||||
})
|
||||
```
|
||||
|
||||
The decorated class is returned, so you can export it directly.
|
||||
|
||||
To apply multiple transforms to a method, you can either call `decorateMethodsWith` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
|
||||
To apply multiple transforms to an accessor/method, you can either call `decorateClass` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
|
||||
|
||||
```js
|
||||
decorateMethodsWith(Foo, {
|
||||
bar: compose([
|
||||
decorateClass(Foo, {
|
||||
baz: compose([
|
||||
[lodash.debounce, 150]
|
||||
lodash.curry,
|
||||
])
|
||||
@@ -69,4 +79,8 @@ class Foo {
|
||||
}
|
||||
```
|
||||
|
||||
Because it's a normal function, it can also be used with `decorateMethodsWith`, with `compose` or even by itself.
|
||||
Because it's a normal function, it can also be used with `decorateClass`, with `compose` or even by itself.
|
||||
|
||||
### `decorateMethodsWith(class, map)`
|
||||
|
||||
> Deprecated alias for [`decorateClass(class, map)`](#decorateclassclass-map).
|
||||
|
||||
@@ -31,15 +31,19 @@ class Foo {
|
||||
}
|
||||
```
|
||||
|
||||
### `decorateMethodsWith(class, map)`
|
||||
### `decorateClass(class, map)`
|
||||
|
||||
Decorates a number of methods directly, without using the decorator syntax:
|
||||
Decorates a number of accessors and methods directly, without using the decorator syntax:
|
||||
|
||||
```js
|
||||
import { decorateMethodsWith } from '@vates/decorate-with'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
|
||||
class Foo {
|
||||
bar() {
|
||||
get bar() {
|
||||
// body
|
||||
}
|
||||
|
||||
set bar(value) {
|
||||
// body
|
||||
}
|
||||
|
||||
@@ -48,22 +52,28 @@ class Foo {
|
||||
}
|
||||
}
|
||||
|
||||
decorateMethodsWith(Foo, {
|
||||
// without arguments
|
||||
bar: lodash.curry,
|
||||
decorateClass(Foo, {
|
||||
// getter and/or setter
|
||||
bar: {
|
||||
// without arguments
|
||||
get: lodash.memoize,
|
||||
|
||||
// with arguments
|
||||
baz: [lodash.debounce, 150],
|
||||
// with arguments
|
||||
set: [lodash.debounce, 150],
|
||||
},
|
||||
|
||||
// method (with or without arguments)
|
||||
baz: lodash.curry,
|
||||
})
|
||||
```
|
||||
|
||||
The decorated class is returned, so you can export it directly.
|
||||
|
||||
To apply multiple transforms to a method, you can either call `decorateMethodsWith` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
|
||||
To apply multiple transforms to an accessor/method, you can either call `decorateClass` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
|
||||
|
||||
```js
|
||||
decorateMethodsWith(Foo, {
|
||||
bar: compose([
|
||||
decorateClass(Foo, {
|
||||
baz: compose([
|
||||
[lodash.debounce, 150]
|
||||
lodash.curry,
|
||||
])
|
||||
@@ -87,7 +97,11 @@ class Foo {
|
||||
}
|
||||
```
|
||||
|
||||
Because it's a normal function, it can also be used with `decorateMethodsWith`, with `compose` or even by itself.
|
||||
Because it's a normal function, it can also be used with `decorateClass`, with `compose` or even by itself.
|
||||
|
||||
### `decorateMethodsWith(class, map)`
|
||||
|
||||
> Deprecated alias for [`decorateClass(class, map)`](#decorateclassclass-map).
|
||||
|
||||
## Contributions
|
||||
|
||||
|
||||
@@ -9,14 +9,27 @@ exports.decorateWith = function decorateWith(fn, ...args) {
|
||||
|
||||
const { getOwnPropertyDescriptor, defineProperty } = Object
|
||||
|
||||
exports.decorateMethodsWith = function decorateMethodsWith(klass, map) {
|
||||
function applyDecorator(decorator, value) {
|
||||
return typeof decorator === 'function' ? decorator(value) : decorator[0](value, ...decorator.slice(1))
|
||||
}
|
||||
|
||||
exports.decorateClass = exports.decorateMethodsWith = function decorateClass(klass, map) {
|
||||
const { prototype } = klass
|
||||
for (const name of Object.keys(map)) {
|
||||
const descriptor = getOwnPropertyDescriptor(prototype, name)
|
||||
const { value } = descriptor
|
||||
|
||||
const decorator = map[name]
|
||||
descriptor.value = typeof decorator === 'function' ? decorator(value) : decorator[0](value, ...decorator.slice(1))
|
||||
const descriptor = getOwnPropertyDescriptor(prototype, name)
|
||||
if (typeof decorator === 'function' || Array.isArray(decorator)) {
|
||||
descriptor.value = applyDecorator(decorator, descriptor.value)
|
||||
} else {
|
||||
const { get, set } = decorator
|
||||
if (get !== undefined) {
|
||||
descriptor.get = applyDecorator(get, descriptor.get)
|
||||
}
|
||||
if (set !== undefined) {
|
||||
descriptor.set = applyDecorator(set, descriptor.set)
|
||||
}
|
||||
}
|
||||
|
||||
defineProperty(prototype, name, descriptor)
|
||||
}
|
||||
return klass
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
const assert = require('assert')
|
||||
const { describe, it } = require('tap').mocha
|
||||
|
||||
const { decorateWith, decorateMethodsWith, perInstance } = require('./')
|
||||
const { decorateClass, decorateWith, decorateMethodsWith, perInstance } = require('./')
|
||||
|
||||
const identity = _ => _
|
||||
|
||||
describe('decorateWith', () => {
|
||||
it('works', () => {
|
||||
@@ -31,11 +33,14 @@ describe('decorateWith', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('decorateMethodsWith', () => {
|
||||
describe('decorateClass', () => {
|
||||
it('works', () => {
|
||||
class C {
|
||||
foo() {}
|
||||
bar() {}
|
||||
get baz() {}
|
||||
// eslint-disable-next-line accessor-pairs
|
||||
set qux(_) {}
|
||||
}
|
||||
|
||||
const expectedArgs = [Math.random(), Math.random()]
|
||||
@@ -45,27 +50,74 @@ describe('decorateMethodsWith', () => {
|
||||
|
||||
const newFoo = () => {}
|
||||
const newBar = () => {}
|
||||
const newGetBaz = () => {}
|
||||
const newSetQux = _ => {}
|
||||
|
||||
decorateMethodsWith(C, {
|
||||
foo(method) {
|
||||
decorateClass(C, {
|
||||
foo(fn) {
|
||||
assert.strictEqual(arguments.length, 1)
|
||||
assert.strictEqual(method, P.foo)
|
||||
assert.strictEqual(fn, P.foo)
|
||||
return newFoo
|
||||
},
|
||||
bar: [
|
||||
function (method, ...args) {
|
||||
assert.strictEqual(method, P.bar)
|
||||
function (fn, ...args) {
|
||||
assert.strictEqual(fn, P.bar)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
return newBar
|
||||
},
|
||||
...expectedArgs,
|
||||
],
|
||||
baz: {
|
||||
get(fn) {
|
||||
assert.strictEqual(arguments.length, 1)
|
||||
assert.strictEqual(fn, descriptors.baz.get)
|
||||
return newGetBaz
|
||||
},
|
||||
},
|
||||
qux: {
|
||||
set: [
|
||||
function (fn, ...args) {
|
||||
assert.strictEqual(fn, descriptors.qux.set)
|
||||
assert.deepStrictEqual(args, expectedArgs)
|
||||
return newSetQux
|
||||
},
|
||||
...expectedArgs,
|
||||
],
|
||||
},
|
||||
})
|
||||
|
||||
const newDescriptors = Object.getOwnPropertyDescriptors(P)
|
||||
assert.deepStrictEqual(newDescriptors.foo, { ...descriptors.foo, value: newFoo })
|
||||
assert.deepStrictEqual(newDescriptors.bar, { ...descriptors.bar, value: newBar })
|
||||
assert.deepStrictEqual(newDescriptors.baz, { ...descriptors.baz, get: newGetBaz })
|
||||
assert.deepStrictEqual(newDescriptors.qux, { ...descriptors.qux, set: newSetQux })
|
||||
})
|
||||
|
||||
it('throws if using an accessor decorator for a method', function () {
|
||||
assert.throws(() =>
|
||||
decorateClass(
|
||||
class {
|
||||
foo() {}
|
||||
},
|
||||
{ foo: { get: identity, set: identity } }
|
||||
)
|
||||
)
|
||||
})
|
||||
|
||||
it('throws if using a method decorator for an accessor', function () {
|
||||
assert.throws(() =>
|
||||
decorateClass(
|
||||
class {
|
||||
get foo() {}
|
||||
},
|
||||
{ foo: identity }
|
||||
)
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it('decorateMethodsWith is an alias of decorateClass', function () {
|
||||
assert.strictEqual(decorateMethodsWith, decorateClass)
|
||||
})
|
||||
|
||||
describe('perInstance', () => {
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.0",
|
||||
"version": "2.0.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
@@ -29,6 +29,6 @@
|
||||
"test": "tap"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^15.1.6"
|
||||
"tap": "^16.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
50
@vates/event-listeners-manager/.USAGE.md
Normal file
50
@vates/event-listeners-manager/.USAGE.md
Normal file
@@ -0,0 +1,50 @@
|
||||
> This library is compatible with Node's `EventEmitter` and web browsers' `EventTarget` APIs.
|
||||
|
||||
### API
|
||||
|
||||
```js
|
||||
import { EventListenersManager } from '@vates/event-listeners-manager'
|
||||
|
||||
const events = new EventListenersManager(emitter)
|
||||
|
||||
// adding listeners
|
||||
events.add('foo', onFoo).add('bar', onBar).on('baz', onBaz)
|
||||
|
||||
// removing a specific listener
|
||||
events.remove('foo', onFoo)
|
||||
|
||||
// removing all listeners for a specific event
|
||||
events.removeAll('foo')
|
||||
|
||||
// removing all listeners
|
||||
events.removeAll()
|
||||
```
|
||||
|
||||
### Typical use case
|
||||
|
||||
> Removing all listeners when no longer necessary.
|
||||
|
||||
Manually:
|
||||
|
||||
```js
|
||||
const onFoo = () => {}
|
||||
const onBar = () => {}
|
||||
const onBaz = () => {}
|
||||
emitter.on('foo', onFoo).on('bar', onBar).on('baz', onBaz)
|
||||
|
||||
// CODE LOGIC
|
||||
|
||||
emitter.off('foo', onFoo).off('bar', onBar).off('baz', onBaz)
|
||||
```
|
||||
|
||||
With this library:
|
||||
|
||||
```js
|
||||
const events = new EventListenersManager(emitter)
|
||||
|
||||
events.add('foo', () => {})).add('bar', () => {})).add('baz', () => {}))
|
||||
|
||||
// CODE LOGIC
|
||||
|
||||
events.removeAll()
|
||||
```
|
||||
1
@vates/event-listeners-manager/.npmignore
Symbolic link
1
@vates/event-listeners-manager/.npmignore
Symbolic link
@@ -0,0 +1 @@
|
||||
../../scripts/npmignore
|
||||
81
@vates/event-listeners-manager/README.md
Normal file
81
@vates/event-listeners-manager/README.md
Normal file
@@ -0,0 +1,81 @@
|
||||
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
|
||||
|
||||
# @vates/event-listeners-manager
|
||||
|
||||
[](https://npmjs.org/package/@vates/event-listeners-manager)  [](https://bundlephobia.com/result?p=@vates/event-listeners-manager) [](https://npmjs.org/package/@vates/event-listeners-manager)
|
||||
|
||||
## Install
|
||||
|
||||
Installation of the [npm package](https://npmjs.org/package/@vates/event-listeners-manager):
|
||||
|
||||
```
|
||||
> npm install --save @vates/event-listeners-manager
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
> This library is compatible with Node's `EventEmitter` and web browsers' `EventTarget` APIs.
|
||||
|
||||
### API
|
||||
|
||||
```js
|
||||
import { EventListenersManager } from '@vates/event-listeners-manager'
|
||||
|
||||
const events = new EventListenersManager(emitter)
|
||||
|
||||
// adding listeners
|
||||
events.add('foo', onFoo).add('bar', onBar).on('baz', onBaz)
|
||||
|
||||
// removing a specific listener
|
||||
events.remove('foo', onFoo)
|
||||
|
||||
// removing all listeners for a specific event
|
||||
events.removeAll('foo')
|
||||
|
||||
// removing all listeners
|
||||
events.removeAll()
|
||||
```
|
||||
|
||||
### Typical use case
|
||||
|
||||
> Removing all listeners when no longer necessary.
|
||||
|
||||
Manually:
|
||||
|
||||
```js
|
||||
const onFoo = () => {}
|
||||
const onBar = () => {}
|
||||
const onBaz = () => {}
|
||||
emitter.on('foo', onFoo).on('bar', onBar).on('baz', onBaz)
|
||||
|
||||
// CODE LOGIC
|
||||
|
||||
emitter.off('foo', onFoo).off('bar', onBar).off('baz', onBaz)
|
||||
```
|
||||
|
||||
With this library:
|
||||
|
||||
```js
|
||||
const events = new EventListenersManager(emitter)
|
||||
|
||||
events.add('foo', () => {})).add('bar', () => {})).add('baz', () => {}))
|
||||
|
||||
// CODE LOGIC
|
||||
|
||||
events.removeAll()
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
the code.
|
||||
|
||||
You may:
|
||||
|
||||
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
|
||||
you've encountered;
|
||||
- fork and create a pull request.
|
||||
|
||||
## License
|
||||
|
||||
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)
|
||||
56
@vates/event-listeners-manager/index.js
Normal file
56
@vates/event-listeners-manager/index.js
Normal file
@@ -0,0 +1,56 @@
|
||||
'use strict'
|
||||
|
||||
exports.EventListenersManager = class EventListenersManager {
|
||||
constructor(emitter) {
|
||||
this._listeners = new Map()
|
||||
|
||||
this._add = (emitter.addListener || emitter.addEventListener).bind(emitter)
|
||||
this._remove = (emitter.removeListener || emitter.removeEventListener).bind(emitter)
|
||||
}
|
||||
|
||||
add(type, listener) {
|
||||
let listeners = this._listeners.get(type)
|
||||
if (listeners === undefined) {
|
||||
listeners = new Set()
|
||||
this._listeners.set(type, listeners)
|
||||
}
|
||||
|
||||
// don't add the same listener multiple times (allowed on Node.js)
|
||||
if (!listeners.has(listener)) {
|
||||
listeners.add(listener)
|
||||
this._add(type, listener)
|
||||
}
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
remove(type, listener) {
|
||||
const allListeners = this._listeners
|
||||
const listeners = allListeners.get(type)
|
||||
if (listeners !== undefined && listeners.delete(listener)) {
|
||||
this._remove(type, listener)
|
||||
if (listeners.size === 0) {
|
||||
allListeners.delete(type)
|
||||
}
|
||||
}
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
removeAll(type) {
|
||||
const allListeners = this._listeners
|
||||
const remove = this._remove
|
||||
const types = type !== undefined ? [type] : allListeners.keys()
|
||||
for (const type of types) {
|
||||
const listeners = allListeners.get(type)
|
||||
if (listeners !== undefined) {
|
||||
allListeners.delete(type)
|
||||
for (const listener of listeners) {
|
||||
remove(type, listener)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return this
|
||||
}
|
||||
}
|
||||
67
@vates/event-listeners-manager/index.spec.js
Normal file
67
@vates/event-listeners-manager/index.spec.js
Normal file
@@ -0,0 +1,67 @@
|
||||
'use strict'
|
||||
|
||||
const t = require('tap')
|
||||
const { EventEmitter } = require('events')
|
||||
|
||||
const { EventListenersManager } = require('./')
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
// function spy (impl = Function.prototype) {
|
||||
// function spy() {
|
||||
// spy.calls.push([Array.from(arguments), this])
|
||||
// }
|
||||
// spy.calls = []
|
||||
// return spy
|
||||
// }
|
||||
|
||||
function assertListeners(t, event, listeners) {
|
||||
t.strictSame(t.context.ee.listeners(event), listeners)
|
||||
}
|
||||
|
||||
t.beforeEach(function (t) {
|
||||
t.context.ee = new EventEmitter()
|
||||
t.context.em = new EventListenersManager(t.context.ee)
|
||||
})
|
||||
|
||||
t.test('.add adds a listener', function (t) {
|
||||
t.context.em.add('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.add does not add a duplicate listener', function (t) {
|
||||
t.context.em.add('foo', noop).add('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.remove removes a listener', function (t) {
|
||||
t.context.em.add('foo', noop).remove('foo', noop)
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.removeAll removes all listeners of a given type', function (t) {
|
||||
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
assertListeners(t, 'bar', [noop])
|
||||
|
||||
t.end()
|
||||
})
|
||||
|
||||
t.test('.removeAll removes all listeners', function (t) {
|
||||
t.context.em.add('foo', noop).add('bar', noop).removeAll()
|
||||
|
||||
assertListeners(t, 'foo', [])
|
||||
assertListeners(t, 'bar', [])
|
||||
|
||||
t.end()
|
||||
})
|
||||
46
@vates/event-listeners-manager/package.json
Normal file
46
@vates/event-listeners-manager/package.json
Normal file
@@ -0,0 +1,46 @@
|
||||
{
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
},
|
||||
"private": false,
|
||||
"name": "@vates/event-listeners-manager",
|
||||
"descriptions": "Easy way to clean up event listeners",
|
||||
"keywords": [
|
||||
"add",
|
||||
"addEventListener",
|
||||
"addListener",
|
||||
"browser",
|
||||
"clear",
|
||||
"DOM",
|
||||
"emitter",
|
||||
"event",
|
||||
"EventEmitter",
|
||||
"EventTarget",
|
||||
"management",
|
||||
"manager",
|
||||
"node",
|
||||
"remove",
|
||||
"removeEventListener",
|
||||
"removeListener"
|
||||
],
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/event-listeners-manager",
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"repository": {
|
||||
"directory": "@vates/event-listeners-manager",
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"license": "ISC",
|
||||
"version": "1.0.1",
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public",
|
||||
"test": "tap --branches=72"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.2.0"
|
||||
}
|
||||
}
|
||||
@@ -35,6 +35,6 @@
|
||||
"test": "tap"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^15.1.6"
|
||||
"tap": "^16.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
### `readChunk(stream, [size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns `null` if the stream has ended
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
@@ -11,3 +14,13 @@ import { readChunk } from '@vates/read-chunk'
|
||||
}
|
||||
})()
|
||||
```
|
||||
|
||||
### `readChunkStrict(stream, [size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
@@ -16,9 +16,12 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
|
||||
|
||||
## Usage
|
||||
|
||||
### `readChunk(stream, [size])`
|
||||
|
||||
- returns the next available chunk of data
|
||||
- like `stream.read()`, a number of bytes can be specified
|
||||
- returns `null` if the stream has ended
|
||||
- returns with less data than expected if stream has ended
|
||||
- returns `null` if the stream has ended and no data has been read
|
||||
|
||||
```js
|
||||
import { readChunk } from '@vates/read-chunk'
|
||||
@@ -30,6 +33,16 @@ import { readChunk } from '@vates/read-chunk'
|
||||
})()
|
||||
```
|
||||
|
||||
### `readChunkStrict(stream, [size])`
|
||||
|
||||
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
|
||||
|
||||
```js
|
||||
import { readChunkStrict } from '@vates/read-chunk'
|
||||
|
||||
const chunk = await readChunkStrict(stream, 1024)
|
||||
```
|
||||
|
||||
## Contributions
|
||||
|
||||
Contributions are _very_ welcomed, either on the documentation or on
|
||||
|
||||
@@ -30,3 +30,22 @@ const readChunk = (stream, size) =>
|
||||
onReadable()
|
||||
})
|
||||
exports.readChunk = readChunk
|
||||
|
||||
exports.readChunkStrict = async function readChunkStrict(stream, size) {
|
||||
const chunk = await readChunk(stream, size)
|
||||
if (chunk === null) {
|
||||
throw new Error('stream has ended without data')
|
||||
}
|
||||
|
||||
if (size !== undefined && chunk.length !== size) {
|
||||
const error = new Error('stream has ended with not enough data')
|
||||
Object.defineProperties(error, {
|
||||
chunk: {
|
||||
value: chunk,
|
||||
},
|
||||
})
|
||||
throw error
|
||||
}
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
const { Readable } = require('stream')
|
||||
|
||||
const { readChunk } = require('./')
|
||||
const { readChunk, readChunkStrict } = require('./')
|
||||
|
||||
const makeStream = it => Readable.from(it, { objectMode: false })
|
||||
makeStream.obj = Readable.from
|
||||
@@ -43,3 +43,27 @@ describe('readChunk', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
const rejectionOf = promise =>
|
||||
promise.then(
|
||||
value => {
|
||||
throw value
|
||||
},
|
||||
error => error
|
||||
)
|
||||
|
||||
describe('readChunkStrict', function () {
|
||||
it('throws if stream is empty', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream([])))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended without data')
|
||||
expect(error.chunk).toEqual(undefined)
|
||||
})
|
||||
|
||||
it('throws if stream ends with not enough data', async () => {
|
||||
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
|
||||
expect(error).toBeInstanceOf(Error)
|
||||
expect(error.message).toBe('stream has ended with not enough data')
|
||||
expect(error.chunk).toEqual(Buffer.from('foobar'))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.1.2",
|
||||
"version": "1.0.0",
|
||||
"engines": {
|
||||
"node": ">=8.10"
|
||||
},
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,12 +1,14 @@
|
||||
import assert from 'assert'
|
||||
import hash from 'object-hash'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const hash = require('object-hash')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { decorateClass } = require('@vates/decorate-with')
|
||||
const { defer } = require('golike-defer')
|
||||
|
||||
const log = createLogger('xo:audit-core')
|
||||
|
||||
export class Storage {
|
||||
exports.Storage = class Storage {
|
||||
constructor() {
|
||||
this._lock = Promise.resolve()
|
||||
}
|
||||
@@ -29,7 +31,7 @@ const ID_TO_ALGORITHM = {
|
||||
5: 'sha256',
|
||||
}
|
||||
|
||||
export class AlteredRecordError extends Error {
|
||||
class AlteredRecordError extends Error {
|
||||
constructor(id, nValid, record) {
|
||||
super('altered record')
|
||||
|
||||
@@ -38,8 +40,9 @@ export class AlteredRecordError extends Error {
|
||||
this.record = record
|
||||
}
|
||||
}
|
||||
exports.AlteredRecordError = AlteredRecordError
|
||||
|
||||
export class MissingRecordError extends Error {
|
||||
class MissingRecordError extends Error {
|
||||
constructor(id, nValid) {
|
||||
super('missing record')
|
||||
|
||||
@@ -47,8 +50,10 @@ export class MissingRecordError extends Error {
|
||||
this.nValid = nValid
|
||||
}
|
||||
}
|
||||
exports.MissingRecordError = MissingRecordError
|
||||
|
||||
export const NULL_ID = 'nullId'
|
||||
const NULL_ID = 'nullId'
|
||||
exports.NULL_ID = NULL_ID
|
||||
|
||||
const HASH_ALGORITHM_ID = '5'
|
||||
const createHash = (data, algorithmId = HASH_ALGORITHM_ID) =>
|
||||
@@ -57,13 +62,12 @@ const createHash = (data, algorithmId = HASH_ALGORITHM_ID) =>
|
||||
excludeKeys: key => key === 'id',
|
||||
})}`
|
||||
|
||||
export class AuditCore {
|
||||
class AuditCore {
|
||||
constructor(storage) {
|
||||
assert.notStrictEqual(storage, undefined)
|
||||
this._storage = storage
|
||||
}
|
||||
|
||||
@decorateWith(defer)
|
||||
async add($defer, subject, event, data) {
|
||||
const time = Date.now()
|
||||
$defer(await this._storage.acquireLock())
|
||||
@@ -148,7 +152,6 @@ export class AuditCore {
|
||||
}
|
||||
}
|
||||
|
||||
@decorateWith(defer)
|
||||
async deleteRangeAndRewrite($defer, newest, oldest) {
|
||||
assert.notStrictEqual(newest, undefined)
|
||||
assert.notStrictEqual(oldest, undefined)
|
||||
@@ -189,3 +192,9 @@ export class AuditCore {
|
||||
}
|
||||
}
|
||||
}
|
||||
exports.AuditCore = AuditCore
|
||||
|
||||
decorateClass(AuditCore, {
|
||||
add: defer,
|
||||
deleteRangeAndRewrite: defer,
|
||||
})
|
||||
@@ -1,6 +1,9 @@
|
||||
/* eslint-env jest */
|
||||
'use strict'
|
||||
|
||||
import { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } from '.'
|
||||
const assert = require('assert/strict')
|
||||
const { afterEach, describe, it } = require('tap').mocha
|
||||
|
||||
const { AlteredRecordError, AuditCore, MissingRecordError, NULL_ID, Storage } = require('.')
|
||||
|
||||
const asyncIteratorToArray = async asyncIterator => {
|
||||
const array = []
|
||||
@@ -72,7 +75,7 @@ const auditCore = new AuditCore(db)
|
||||
const storeAuditRecords = async () => {
|
||||
await Promise.all(DATA.map(data => auditCore.add(...data)))
|
||||
const records = await asyncIteratorToArray(auditCore.getFrom())
|
||||
expect(records.length).toBe(DATA.length)
|
||||
assert.equal(records.length, DATA.length)
|
||||
return records
|
||||
}
|
||||
|
||||
@@ -83,10 +86,11 @@ describe('auditCore', () => {
|
||||
const [newestRecord, deletedRecord] = await storeAuditRecords()
|
||||
|
||||
const nValidRecords = await auditCore.checkIntegrity(NULL_ID, newestRecord.id)
|
||||
expect(nValidRecords).toBe(DATA.length)
|
||||
assert.equal(nValidRecords, DATA.length)
|
||||
|
||||
await db.del(deletedRecord.id)
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
await assert.rejects(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id),
|
||||
new MissingRecordError(deletedRecord.id, 1)
|
||||
)
|
||||
})
|
||||
@@ -97,7 +101,8 @@ describe('auditCore', () => {
|
||||
alteredRecord.event = ''
|
||||
await db.put(alteredRecord)
|
||||
|
||||
await expect(auditCore.checkIntegrity(NULL_ID, newestRecord.id)).rejects.toEqual(
|
||||
await assert.rejects(
|
||||
auditCore.checkIntegrity(NULL_ID, newestRecord.id),
|
||||
new AlteredRecordError(alteredRecord.id, 1, alteredRecord)
|
||||
)
|
||||
})
|
||||
@@ -107,8 +112,8 @@ describe('auditCore', () => {
|
||||
|
||||
await auditCore.deleteFrom(secondRecord.id)
|
||||
|
||||
expect(await db.get(firstRecord.id)).toBe(undefined)
|
||||
expect(await db.get(secondRecord.id)).toBe(undefined)
|
||||
assert.equal(await db.get(firstRecord.id), undefined)
|
||||
assert.equal(await db.get(secondRecord.id), undefined)
|
||||
|
||||
await auditCore.checkIntegrity(secondRecord.id, thirdRecord.id)
|
||||
})
|
||||
@@ -9,28 +9,14 @@
|
||||
},
|
||||
"version": "0.2.0",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
"node": ">=14"
|
||||
},
|
||||
"main": "dist/",
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"postversion": "npm publish --access public",
|
||||
"prebuild": "rimraf dist/",
|
||||
"predev": "yarn run prebuild",
|
||||
"prepublishOnly": "yarn run build"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.7.4",
|
||||
"@babel/core": "^7.7.4",
|
||||
"@babel/plugin-proposal-decorators": "^7.8.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.0",
|
||||
"@babel/preset-env": "^7.7.4",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
"test": "tap --lines 67 --functions 92 --branches 52 --statements 67"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/decorate-with": "^1.0.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
"object-hash": "^2.0.1"
|
||||
@@ -40,5 +26,8 @@
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tap": "^16.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.3"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict'
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
@@ -28,7 +26,13 @@ module.exports = async function main(args) {
|
||||
await asyncMap(_, async vmDir => {
|
||||
vmDir = resolve(vmDir)
|
||||
try {
|
||||
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
|
||||
await adapter.cleanVm(vmDir, {
|
||||
fixMetadata: fix,
|
||||
remove,
|
||||
merge,
|
||||
logInfo: (...args) => console.log(...args),
|
||||
logWarn: (...args) => console.warn(...args),
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('adapter.cleanVm', vmDir, error)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@
|
||||
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
|
||||
"dependencies": {
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/backups": "^0.20.0",
|
||||
"@xen-orchestra/fs": "^0.20.0",
|
||||
"@xen-orchestra/backups": "^0.27.4",
|
||||
"@xen-orchestra/fs": "^3.0.0",
|
||||
"filenamify": "^4.1.0",
|
||||
"getopts": "^2.2.5",
|
||||
"lodash": "^4.17.15",
|
||||
@@ -27,7 +27,7 @@
|
||||
"scripts": {
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"version": "0.7.0",
|
||||
"version": "0.7.7",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
|
||||
@@ -6,7 +6,7 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compileTemplate } = require('@xen-orchestra/template')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
|
||||
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
|
||||
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
|
||||
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { VmBackup } = require('./_VmBackup.js')
|
||||
@@ -24,6 +24,34 @@ const getAdaptersByRemote = adapters => {
|
||||
|
||||
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
|
||||
|
||||
const DEFAULT_SETTINGS = {
|
||||
reportWhen: 'failure',
|
||||
}
|
||||
|
||||
const DEFAULT_VM_SETTINGS = {
|
||||
bypassVdiChainsCheck: false,
|
||||
checkpointSnapshot: false,
|
||||
concurrency: 2,
|
||||
copyRetention: 0,
|
||||
deleteFirst: false,
|
||||
exportRetention: 0,
|
||||
fullInterval: 0,
|
||||
healthCheckSr: undefined,
|
||||
healthCheckVmsWithTags: [],
|
||||
maxMergedDeltasPerRun: 2,
|
||||
offlineBackup: false,
|
||||
offlineSnapshot: false,
|
||||
snapshotRetention: 0,
|
||||
timeout: 0,
|
||||
unconditionalSnapshot: false,
|
||||
vmTimeout: 0,
|
||||
}
|
||||
|
||||
const DEFAULT_METADATA_SETTINGS = {
|
||||
retentionPoolMetadata: 0,
|
||||
retentionXoMetadata: 0,
|
||||
}
|
||||
|
||||
exports.Backup = class Backup {
|
||||
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
|
||||
this._config = config
|
||||
@@ -42,17 +70,22 @@ exports.Backup = class Backup {
|
||||
'{job.name}': job.name,
|
||||
'{vm.name_label}': vm => vm.name_label,
|
||||
})
|
||||
}
|
||||
|
||||
run() {
|
||||
const type = this._job.type
|
||||
const { type } = job
|
||||
const baseSettings = { ...DEFAULT_SETTINGS }
|
||||
if (type === 'backup') {
|
||||
return this._runVmBackup()
|
||||
Object.assign(baseSettings, DEFAULT_VM_SETTINGS, config.defaultSettings, config.vm?.defaultSettings)
|
||||
this.run = this._runVmBackup
|
||||
} else if (type === 'metadataBackup') {
|
||||
return this._runMetadataBackup()
|
||||
Object.assign(baseSettings, DEFAULT_METADATA_SETTINGS, config.defaultSettings, config.metadata?.defaultSettings)
|
||||
this.run = this._runMetadataBackup
|
||||
} else {
|
||||
throw new Error(`No runner for the backup type ${type}`)
|
||||
}
|
||||
Object.assign(baseSettings, job.settings[''])
|
||||
|
||||
this._baseSettings = baseSettings
|
||||
this._settings = { ...baseSettings, ...job.settings[schedule.id] }
|
||||
}
|
||||
|
||||
async _runMetadataBackup() {
|
||||
@@ -64,13 +97,6 @@ exports.Backup = class Backup {
|
||||
}
|
||||
|
||||
const config = this._config
|
||||
const settings = {
|
||||
...config.defaultSettings,
|
||||
...config.metadata.defaultSettings,
|
||||
...job.settings[''],
|
||||
...job.settings[schedule.id],
|
||||
}
|
||||
|
||||
const poolIds = extractIdsFromSimplePattern(job.pools)
|
||||
const isEmptyPools = poolIds.length === 0
|
||||
const isXoMetadata = job.xoMetadata !== undefined
|
||||
@@ -78,6 +104,8 @@ exports.Backup = class Backup {
|
||||
throw new Error('no metadata mode found')
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
|
||||
const { retentionPoolMetadata, retentionXoMetadata } = settings
|
||||
|
||||
if (
|
||||
@@ -189,14 +217,7 @@ exports.Backup = class Backup {
|
||||
const schedule = this._schedule
|
||||
|
||||
const config = this._config
|
||||
const { settings } = job
|
||||
const scheduleSettings = {
|
||||
...config.defaultSettings,
|
||||
...config.vm.defaultSettings,
|
||||
...settings[''],
|
||||
...settings[schedule.id],
|
||||
}
|
||||
|
||||
const settings = this._settings
|
||||
await Disposable.use(
|
||||
Disposable.all(
|
||||
extractIdsFromSimplePattern(job.srs).map(id =>
|
||||
@@ -224,14 +245,15 @@ exports.Backup = class Backup {
|
||||
})
|
||||
)
|
||||
),
|
||||
async (srs, remoteAdapters) => {
|
||||
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
|
||||
async (srs, remoteAdapters, healthCheckSr) => {
|
||||
// remove adapters that failed (already handled)
|
||||
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
|
||||
|
||||
// remove srs that failed (already handled)
|
||||
srs = srs.filter(_ => _ !== undefined)
|
||||
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && scheduleSettings.snapshotRetention === 0) {
|
||||
if (remoteAdapters.length === 0 && srs.length === 0 && settings.snapshotRetention === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -241,23 +263,27 @@ exports.Backup = class Backup {
|
||||
|
||||
remoteAdapters = getAdaptersByRemote(remoteAdapters)
|
||||
|
||||
const allSettings = this._job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
|
||||
const handleVm = vmUuid =>
|
||||
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
|
||||
Disposable.use(this._getRecord('VM', vmUuid), vm =>
|
||||
new VmBackup({
|
||||
baseSettings,
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
// remotes,
|
||||
remoteAdapters,
|
||||
schedule,
|
||||
settings: { ...scheduleSettings, ...settings[vmUuid] },
|
||||
settings: { ...settings, ...allSettings[vm.uuid] },
|
||||
srs,
|
||||
vm,
|
||||
}).run()
|
||||
)
|
||||
)
|
||||
const { concurrency } = scheduleSettings
|
||||
const { concurrency } = settings
|
||||
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
|
||||
}
|
||||
)
|
||||
|
||||
64
@xen-orchestra/backups/HealthCheckVmBackup.js
Normal file
64
@xen-orchestra/backups/HealthCheckVmBackup.js
Normal file
@@ -0,0 +1,64 @@
|
||||
'use strict'
|
||||
|
||||
const { Task } = require('./Task')
|
||||
|
||||
exports.HealthCheckVmBackup = class HealthCheckVmBackup {
|
||||
#xapi
|
||||
#restoredVm
|
||||
|
||||
constructor({ restoredVm, xapi }) {
|
||||
this.#restoredVm = restoredVm
|
||||
this.#xapi = xapi
|
||||
}
|
||||
|
||||
async run() {
|
||||
return Task.run(
|
||||
{
|
||||
name: 'vmstart',
|
||||
},
|
||||
async () => {
|
||||
let restoredVm = this.#restoredVm
|
||||
const xapi = this.#xapi
|
||||
const restoredId = restoredVm.uuid
|
||||
|
||||
// remove vifs
|
||||
await Promise.all(restoredVm.$VIFs.map(vif => xapi.callAsync('VIF.destroy', vif.$ref)))
|
||||
|
||||
const start = new Date()
|
||||
// start Vm
|
||||
|
||||
await xapi.callAsync(
|
||||
'VM.start',
|
||||
restoredVm.$ref,
|
||||
false, // Start paused?
|
||||
false // Skip pre-boot checks?
|
||||
)
|
||||
const started = new Date()
|
||||
const timeout = 10 * 60 * 1000
|
||||
const startDuration = started - start
|
||||
|
||||
let remainingTimeout = timeout - startDuration
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`VM ${restoredId} not started after ${timeout / 1000} second`)
|
||||
}
|
||||
|
||||
// wait for the 'Running' event to be really stored in local xapi object cache
|
||||
restoredVm = await xapi.waitObjectState(restoredVm.$ref, vm => vm.power_state === 'Running', {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
|
||||
const running = new Date()
|
||||
remainingTimeout -= running - started
|
||||
|
||||
if (remainingTimeout < 0) {
|
||||
throw new Error(`local xapi did not get Runnig state for VM ${restoredId} after ${timeout / 1000} second`)
|
||||
}
|
||||
// wait for the guest tool version to be defined
|
||||
await xapi.waitObjectState(restoredVm.guest_metrics, gm => gm?.PV_drivers_version?.major !== undefined, {
|
||||
timeout: remainingTimeout,
|
||||
})
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ const { Task } = require('./Task.js')
|
||||
const { watchStreamSize } = require('./_watchStreamSize.js')
|
||||
|
||||
exports.ImportVmBackup = class ImportVmBackup {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs } = {} }) {
|
||||
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs = {} } = {} }) {
|
||||
this._adapter = adapter
|
||||
this._importDeltaVmSettings = { newMacAddresses, mapVdisSrs }
|
||||
this._metadata = metadata
|
||||
@@ -30,7 +30,12 @@ exports.ImportVmBackup = class ImportVmBackup {
|
||||
} else {
|
||||
assert.strictEqual(metadata.mode, 'delta')
|
||||
|
||||
backup = await adapter.readDeltaVmBackup(metadata)
|
||||
const ignoredVdis = new Set(
|
||||
Object.entries(this._importDeltaVmSettings.mapVdisSrs)
|
||||
.filter(([_, srUuid]) => srUuid === null)
|
||||
.map(([vdiUuid]) => vdiUuid)
|
||||
)
|
||||
backup = await adapter.readDeltaVmBackup(metadata, ignoredVdis)
|
||||
Object.values(backup.streams).forEach(stream => watchStreamSize(stream, sizeContainer))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,21 +1,24 @@
|
||||
'use strict'
|
||||
|
||||
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const fromEvent = require('promise-toolbox/fromEvent')
|
||||
const pDefer = require('promise-toolbox/defer')
|
||||
const groupBy = require('lodash/groupBy.js')
|
||||
const pickBy = require('lodash/pickBy.js')
|
||||
const { dirname, join, normalize, resolve } = require('path')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
|
||||
const { createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
|
||||
const { deduped } = require('@vates/disposable/deduped.js')
|
||||
const { decorateMethodsWith } = require('@vates/decorate-with')
|
||||
const { compose } = require('@vates/compose')
|
||||
const { execFile } = require('child_process')
|
||||
const { readdir, stat } = require('fs-extra')
|
||||
const { readdir, lstat } = require('fs-extra')
|
||||
const { v4: uuidv4 } = require('uuid')
|
||||
const { ZipFile } = require('yazl')
|
||||
const zlib = require('zlib')
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
@@ -44,13 +47,12 @@ const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
|
||||
const RE_VHDI = /^vhdi(\d+)$/
|
||||
|
||||
async function addDirectory(files, realPath, metadataPath) {
|
||||
try {
|
||||
const subFiles = await readdir(realPath)
|
||||
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOTDIR') {
|
||||
throw error
|
||||
}
|
||||
const stats = await lstat(realPath)
|
||||
if (stats.isDirectory()) {
|
||||
await asyncMap(await readdir(realPath), file =>
|
||||
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
|
||||
)
|
||||
} else if (stats.isFile()) {
|
||||
files.push({
|
||||
realPath,
|
||||
metadataPath,
|
||||
@@ -77,6 +79,7 @@ class RemoteAdapter {
|
||||
this._dirMode = dirMode
|
||||
this._handler = handler
|
||||
this._vhdDirectoryCompression = vhdDirectoryCompression
|
||||
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
|
||||
}
|
||||
|
||||
get handler() {
|
||||
@@ -260,7 +263,8 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
async deleteVmBackups(files) {
|
||||
const { delta, full, ...others } = groupBy(await asyncMap(files, file => this.readVmBackupMetadata(file)), 'mode')
|
||||
const metadatas = await asyncMap(files, file => this.readVmBackupMetadata(file))
|
||||
const { delta, full, ...others } = groupBy(metadatas, 'mode')
|
||||
|
||||
const unsupportedModes = Object.keys(others)
|
||||
if (unsupportedModes.length !== 0) {
|
||||
@@ -275,8 +279,11 @@ class RemoteAdapter {
|
||||
const dirs = new Set(files.map(file => dirname(file)))
|
||||
for (const dir of dirs) {
|
||||
// don't merge in main process, unused VHDs will be merged in the next backup run
|
||||
await this.cleanVm(dir, { remove: true, onLog: warn })
|
||||
await this.cleanVm(dir, { remove: true, logWarn: warn })
|
||||
}
|
||||
|
||||
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
|
||||
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
|
||||
}
|
||||
|
||||
#getCompressionType() {
|
||||
@@ -284,7 +291,7 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
#useVhdDirectory() {
|
||||
return this.handler.type === 's3'
|
||||
return this.handler.useVhdDirectory()
|
||||
}
|
||||
|
||||
#useAlias() {
|
||||
@@ -375,8 +382,12 @@ class RemoteAdapter {
|
||||
const entriesMap = {}
|
||||
await asyncMap(await readdir(path), async name => {
|
||||
try {
|
||||
const stats = await stat(`${path}/${name}`)
|
||||
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
|
||||
const stats = await lstat(`${path}/${name}`)
|
||||
if (stats.isDirectory()) {
|
||||
entriesMap[name + '/'] = {}
|
||||
} else if (stats.isFile()) {
|
||||
entriesMap[name] = {}
|
||||
}
|
||||
} catch (error) {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
@@ -447,34 +458,94 @@ class RemoteAdapter {
|
||||
return backupsByPool
|
||||
}
|
||||
|
||||
async listVmBackups(vmUuid, predicate) {
|
||||
async invalidateVmBackupListCache(vmUuid) {
|
||||
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
|
||||
}
|
||||
|
||||
async #getCachabledDataListVmBackups(dir) {
|
||||
const handler = this._handler
|
||||
const backups = []
|
||||
const backups = {}
|
||||
|
||||
try {
|
||||
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
|
||||
const files = await handler.list(dir, {
|
||||
filter: isMetadataFile,
|
||||
prependDir: true,
|
||||
})
|
||||
await asyncMap(files, async file => {
|
||||
try {
|
||||
const metadata = await this.readVmBackupMetadata(file)
|
||||
if (predicate === undefined || predicate(metadata)) {
|
||||
// inject an id usable by importVmBackupNg()
|
||||
metadata.id = metadata._filename
|
||||
|
||||
backups.push(metadata)
|
||||
}
|
||||
// inject an id usable by importVmBackupNg()
|
||||
metadata.id = metadata._filename
|
||||
backups[file] = metadata
|
||||
} catch (error) {
|
||||
warn(`listVmBackups ${file}`, { error })
|
||||
warn(`can't read vm backup metadata`, { error, file, dir })
|
||||
}
|
||||
})
|
||||
return backups
|
||||
} catch (error) {
|
||||
let code
|
||||
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use _ to mark this method as private by convention
|
||||
// since we decorate it with synchronized.withKey in the constructor
|
||||
// and # function are not writeable.
|
||||
//
|
||||
// read the list of backup of a Vm from cache
|
||||
// if cache is missing or broken => regenerate it and return
|
||||
|
||||
async _readCacheListVmBackups(vmUuid) {
|
||||
const dir = `${BACKUP_DIR}/${vmUuid}`
|
||||
const path = `${dir}/cache.json.gz`
|
||||
|
||||
try {
|
||||
const gzipped = await this.handler.readFile(path)
|
||||
const text = await fromCallback(zlib.gunzip, gzipped)
|
||||
return JSON.parse(text)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
warn('Cache file was unreadable', { vmUuid, error })
|
||||
}
|
||||
}
|
||||
|
||||
// nothing cached, or cache unreadable => regenerate it
|
||||
const backups = await this.#getCachabledDataListVmBackups(dir)
|
||||
if (backups === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// detached async action, will not reject
|
||||
this.#writeVmBackupsCache(path, backups)
|
||||
|
||||
return backups
|
||||
}
|
||||
|
||||
async #writeVmBackupsCache(cacheFile, backups) {
|
||||
try {
|
||||
const text = JSON.stringify(backups)
|
||||
const zipped = await fromCallback(zlib.gzip, text)
|
||||
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
|
||||
} catch (error) {
|
||||
warn('writeVmBackupsCache', { cacheFile, error })
|
||||
}
|
||||
}
|
||||
|
||||
async listVmBackups(vmUuid, predicate) {
|
||||
const backups = []
|
||||
const cached = await this._readCacheListVmBackups(vmUuid)
|
||||
|
||||
if (cached === undefined) {
|
||||
return []
|
||||
}
|
||||
|
||||
Object.values(cached).forEach(metadata => {
|
||||
if (predicate === undefined || predicate(metadata)) {
|
||||
backups.push(metadata)
|
||||
}
|
||||
})
|
||||
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
@@ -530,60 +601,42 @@ class RemoteAdapter {
|
||||
})
|
||||
}
|
||||
|
||||
async _createSyntheticStream(handler, paths) {
|
||||
let disposableVhds = []
|
||||
|
||||
// if it's a path : open all hierarchy of parent
|
||||
if (typeof paths === 'string') {
|
||||
let vhd
|
||||
let vhdPath = paths
|
||||
do {
|
||||
const disposable = await openVhd(handler, vhdPath)
|
||||
vhd = disposable.value
|
||||
disposableVhds.push(disposable)
|
||||
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
|
||||
} while (vhd.footer.diskType !== Constants.DISK_TYPES.DYNAMIC)
|
||||
} else {
|
||||
// only open the list of path given
|
||||
disposableVhds = paths.map(path => openVhd(handler, path))
|
||||
}
|
||||
|
||||
// open the hierarchy of ancestors until we find a full one
|
||||
async _createSyntheticStream(handler, path) {
|
||||
const disposableSynthetic = await VhdSynthetic.fromVhdChain(handler, path)
|
||||
// I don't want the vhds to be disposed on return
|
||||
// but only when the stream is done ( or failed )
|
||||
const disposables = await Disposable.all(disposableVhds)
|
||||
const vhds = disposables.value
|
||||
|
||||
let disposed = false
|
||||
const disposeOnce = async () => {
|
||||
if (!disposed) {
|
||||
disposed = true
|
||||
|
||||
try {
|
||||
await disposables.dispose()
|
||||
await disposableSynthetic.dispose()
|
||||
} catch (error) {
|
||||
warn('_createSyntheticStream: failed to dispose VHDs', { error })
|
||||
warn('openVhd: failed to dispose VHDs', { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const synthetic = new VhdSynthetic(vhds)
|
||||
await synthetic.readHeaderAndFooter()
|
||||
const synthetic = disposableSynthetic.value
|
||||
await synthetic.readBlockAllocationTable()
|
||||
const stream = await synthetic.stream()
|
||||
|
||||
stream.on('end', disposeOnce)
|
||||
stream.on('close', disposeOnce)
|
||||
stream.on('error', disposeOnce)
|
||||
return stream
|
||||
}
|
||||
|
||||
async readDeltaVmBackup(metadata) {
|
||||
async readDeltaVmBackup(metadata, ignoredVdis) {
|
||||
const handler = this._handler
|
||||
const { vbds, vdis, vhds, vifs, vm } = metadata
|
||||
const { vbds, vhds, vifs, vm } = metadata
|
||||
const dir = dirname(metadata._filename)
|
||||
const vdis = ignoredVdis === undefined ? metadata.vdis : pickBy(metadata.vdis, vdi => !ignoredVdis.has(vdi.uuid))
|
||||
|
||||
const streams = {}
|
||||
await asyncMapSettled(Object.keys(vdis), async id => {
|
||||
streams[`${id}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[id]))
|
||||
await asyncMapSettled(Object.keys(vdis), async ref => {
|
||||
streams[`${ref}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[ref]))
|
||||
})
|
||||
|
||||
return {
|
||||
@@ -601,7 +654,10 @@ class RemoteAdapter {
|
||||
}
|
||||
|
||||
async readVmBackupMetadata(path) {
|
||||
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
|
||||
// _filename is a private field used to compute the backup id
|
||||
//
|
||||
// it's enumerable to make it cacheable
|
||||
return { ...JSON.parse(await this._handler.readFile(path)), _filename: path }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
const CancelToken = require('promise-toolbox/CancelToken')
|
||||
const Zone = require('node-zone')
|
||||
|
||||
const logAfterEnd = () => {
|
||||
throw new Error('task has already ended')
|
||||
const logAfterEnd = log => {
|
||||
const error = new Error('task has already ended')
|
||||
error.log = log
|
||||
throw error
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
@@ -45,7 +45,18 @@ const forkDeltaExport = deltaExport =>
|
||||
})
|
||||
|
||||
class VmBackup {
|
||||
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
|
||||
constructor({
|
||||
config,
|
||||
getSnapshotNameLabel,
|
||||
healthCheckSr,
|
||||
job,
|
||||
remoteAdapters,
|
||||
remotes,
|
||||
schedule,
|
||||
settings,
|
||||
srs,
|
||||
vm,
|
||||
}) {
|
||||
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
|
||||
// don't match replicated VMs created by this very job otherwise they
|
||||
// will be replicated again and again
|
||||
@@ -55,7 +66,6 @@ class VmBackup {
|
||||
this.config = config
|
||||
this.job = job
|
||||
this.remoteAdapters = remoteAdapters
|
||||
this.remotes = remotes
|
||||
this.scheduleId = schedule.id
|
||||
this.timestamp = undefined
|
||||
|
||||
@@ -69,6 +79,7 @@ class VmBackup {
|
||||
this._fullVdisRequired = undefined
|
||||
this._getSnapshotNameLabel = getSnapshotNameLabel
|
||||
this._isDelta = job.mode === 'delta'
|
||||
this._healthCheckSr = healthCheckSr
|
||||
this._jobId = job.id
|
||||
this._jobSnapshots = undefined
|
||||
this._xapi = vm.$xapi
|
||||
@@ -95,7 +106,6 @@ class VmBackup {
|
||||
: [FullBackupWriter, FullReplicationWriter]
|
||||
|
||||
const allSettings = job.settings
|
||||
|
||||
Object.keys(remoteAdapters).forEach(remoteId => {
|
||||
const targetSettings = {
|
||||
...settings,
|
||||
@@ -118,35 +128,49 @@ class VmBackup {
|
||||
}
|
||||
|
||||
// calls fn for each function, warns of any errors, and throws only if there are no writers left
|
||||
async _callWriters(fn, warnMessage, parallel = true) {
|
||||
async _callWriters(fn, step, parallel = true) {
|
||||
const writers = this._writers
|
||||
const n = writers.size
|
||||
if (n === 0) {
|
||||
return
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
|
||||
async function callWriter(writer) {
|
||||
const { name } = writer.constructor
|
||||
try {
|
||||
debug('writer step starting', { step, writer: name })
|
||||
await fn(writer)
|
||||
debug('writer step succeeded', { duration: step, writer: name })
|
||||
} catch (error) {
|
||||
writers.delete(writer)
|
||||
|
||||
warn('writer step failed', { error, step, writer: name })
|
||||
|
||||
// these two steps are the only one that are not already in their own sub tasks
|
||||
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
|
||||
Task.warning(
|
||||
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
|
||||
)
|
||||
}
|
||||
|
||||
throw error
|
||||
}
|
||||
return
|
||||
}
|
||||
if (n === 1) {
|
||||
const [writer] = writers
|
||||
return callWriter(writer)
|
||||
}
|
||||
|
||||
const errors = []
|
||||
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
|
||||
try {
|
||||
await fn(writer)
|
||||
await callWriter(writer)
|
||||
} catch (error) {
|
||||
errors.push(error)
|
||||
this.delete(writer)
|
||||
warn(warnMessage, { error, writer: writer.constructor.name })
|
||||
}
|
||||
})
|
||||
if (writers.size === 0) {
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
|
||||
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,7 +197,10 @@ class VmBackup {
|
||||
const settings = this._settings
|
||||
|
||||
const doSnapshot =
|
||||
this._isDelta || (!settings.offlineBackup && vm.power_state === 'Running') || settings.snapshotRetention !== 0
|
||||
settings.unconditionalSnapshot ||
|
||||
this._isDelta ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0
|
||||
if (doSnapshot) {
|
||||
await Task.run({ name: 'snapshot' }, async () => {
|
||||
if (!settings.bypassVdiChainsCheck) {
|
||||
@@ -181,7 +208,9 @@ class VmBackup {
|
||||
}
|
||||
|
||||
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot']({
|
||||
ignoreNobakVdis: true,
|
||||
name_label: this._getSnapshotNameLabel(vm),
|
||||
unplugVusbs: true,
|
||||
})
|
||||
this.timestamp = Date.now()
|
||||
|
||||
@@ -303,22 +332,17 @@ class VmBackup {
|
||||
}
|
||||
|
||||
async _removeUnusedSnapshots() {
|
||||
const jobSettings = this.job.settings
|
||||
const allSettings = this.job.settings
|
||||
const baseSettings = this._baseSettings
|
||||
const baseVmRef = this._baseVm?.$ref
|
||||
const { config } = this
|
||||
const baseSettings = {
|
||||
...config.defaultSettings,
|
||||
...config.metadata.defaultSettings,
|
||||
...jobSettings[''],
|
||||
}
|
||||
|
||||
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
|
||||
const xapi = this._xapi
|
||||
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
|
||||
const settings = {
|
||||
...baseSettings,
|
||||
...jobSettings[scheduleId],
|
||||
...jobSettings[this.vm.uuid],
|
||||
...allSettings[scheduleId],
|
||||
...allSettings[this.vm.uuid],
|
||||
}
|
||||
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
|
||||
if ($ref !== baseVmRef) {
|
||||
@@ -397,6 +421,24 @@ class VmBackup {
|
||||
this._fullVdisRequired = fullVdisRequired
|
||||
}
|
||||
|
||||
async _healthCheck() {
|
||||
const settings = this._settings
|
||||
|
||||
if (this._healthCheckSr === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// check if current VM has tags
|
||||
const { tags } = this.vm
|
||||
const intersect = settings.healthCheckVmsWithTags.some(t => tags.includes(t))
|
||||
|
||||
if (settings.healthCheckVmsWithTags.length !== 0 && !intersect) {
|
||||
return
|
||||
}
|
||||
|
||||
await this._callWriters(writer => writer.healthCheck(this._healthCheckSr), 'writer.healthCheck()')
|
||||
}
|
||||
|
||||
async run($defer) {
|
||||
const settings = this._settings
|
||||
assert(
|
||||
@@ -406,7 +448,9 @@ class VmBackup {
|
||||
|
||||
await this._callWriters(async writer => {
|
||||
await writer.beforeBackup()
|
||||
$defer(() => writer.afterBackup())
|
||||
$defer(async () => {
|
||||
await writer.afterBackup()
|
||||
})
|
||||
}, 'writer.beforeBackup()')
|
||||
|
||||
await this._fetchJobSnapshots()
|
||||
@@ -442,6 +486,7 @@ class VmBackup {
|
||||
await this._fetchJobSnapshots()
|
||||
await this._removeUnusedSnapshots()
|
||||
}
|
||||
await this._healthCheck()
|
||||
}
|
||||
}
|
||||
exports.VmBackup = VmBackup
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
exports.isMetadataFile = filename => filename.endsWith('.json')
|
||||
exports.isVhdFile = filename => filename.endsWith('.vhd')
|
||||
exports.isXvaFile = filename => filename.endsWith('.xva')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.cheksum')
|
||||
exports.isXvaSumFile = filename => filename.endsWith('.xva.checksum')
|
||||
|
||||
@@ -4,6 +4,8 @@ require('@xen-orchestra/log/configure.js').catchGlobalErrors(
|
||||
require('@xen-orchestra/log').createLogger('xo:backups:worker')
|
||||
)
|
||||
|
||||
require('@vates/cached-dns.lookup').createCachedLookup().patchGlobal()
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const ignoreErrors = require('promise-toolbox/ignoreErrors')
|
||||
const { compose } = require('@vates/compose')
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
const rimraf = require('rimraf')
|
||||
const tmp = require('tmp')
|
||||
const fs = require('fs-extra')
|
||||
const uuid = require('uuid')
|
||||
const { getHandler } = require('@xen-orchestra/fs')
|
||||
const { pFromCallback } = require('promise-toolbox')
|
||||
const crypto = require('crypto')
|
||||
const { RemoteAdapter } = require('./RemoteAdapter')
|
||||
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
|
||||
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
|
||||
@@ -34,7 +34,8 @@ afterEach(async () => {
|
||||
await handler.forget()
|
||||
})
|
||||
|
||||
const uniqueId = () => crypto.randomBytes(16).toString('hex')
|
||||
const uniqueId = () => uuid.v1()
|
||||
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
|
||||
|
||||
async function generateVhd(path, opts = {}) {
|
||||
let vhd
|
||||
@@ -53,10 +54,9 @@ async function generateVhd(path, opts = {}) {
|
||||
}
|
||||
|
||||
vhd.header = { ...VHDHEADER, ...opts.header }
|
||||
vhd.footer = { ...VHDFOOTER, ...opts.footer }
|
||||
vhd.footer.uuid = Buffer.from(crypto.randomBytes(16))
|
||||
vhd.footer = { ...VHDFOOTER, ...opts.footer, uuid: uniqueIdBuffer() }
|
||||
|
||||
if (vhd.header.parentUnicodeName) {
|
||||
if (vhd.header.parentUuid) {
|
||||
vhd.footer.diskType = Constants.DISK_TYPES.DIFFERENCING
|
||||
} else {
|
||||
vhd.footer.diskType = Constants.DISK_TYPES.DYNAMIC
|
||||
@@ -78,48 +78,53 @@ test('It remove broken vhd', async () => {
|
||||
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued += message
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: false, onLog })
|
||||
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
|
||||
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
|
||||
expect(loggued).toEqual(`VHD check error`)
|
||||
// not removed
|
||||
expect((await handler.list(basePath)).length).toEqual(1)
|
||||
// really remove it
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
expect((await handler.list(basePath)).length).toEqual(0)
|
||||
})
|
||||
|
||||
test('it remove vhd with missing or multiple ancestors', async () => {
|
||||
// one with a broken parent
|
||||
// one with a broken parent, should be deleted
|
||||
await generateVhd(`${basePath}/abandonned.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'gone.vhd',
|
||||
parentUid: Buffer.from(crypto.randomBytes(16)),
|
||||
parentUuid: uniqueIdBuffer(),
|
||||
},
|
||||
})
|
||||
|
||||
// one orphan, which is a full vhd, no parent
|
||||
// one orphan, which is a full vhd, no parent : should stay
|
||||
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
|
||||
// a child to the orphan
|
||||
// a child to the orphan in the metadata : should stay
|
||||
await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
parentUuid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
await handler.writeFile(
|
||||
`metadata.json`,
|
||||
JSON.stringify({
|
||||
mode: 'delta',
|
||||
vhds: [`${basePath}/child.vhd`, `${basePath}/abandonned.vhd`],
|
||||
}),
|
||||
{ flags: 'w' }
|
||||
)
|
||||
// clean
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
|
||||
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
|
||||
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
|
||||
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
|
||||
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
|
||||
|
||||
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
|
||||
})
|
||||
@@ -147,19 +152,17 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
|
||||
await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
parentUuid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
let loggued = ''
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued += message + '\n'
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
let matched = loggued.match(/deleting unused VHD /g) || []
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
let matched = loggued.match(/deleting unused VHD/g) || []
|
||||
expect(matched.length).toEqual(1) // only one vhd should have been deleted
|
||||
matched = loggued.match(/abandonned.vhd is unused/g) || []
|
||||
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
|
||||
|
||||
// a missing vhd cause clean to remove all vhds
|
||||
await handler.writeFile(
|
||||
@@ -176,8 +179,8 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
|
||||
{ flags: 'w' }
|
||||
)
|
||||
loggued = ''
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
matched = loggued.match(/deleting unused VHD /g) || []
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
|
||||
matched = loggued.match(/deleting unused VHD/g) || []
|
||||
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
|
||||
})
|
||||
|
||||
@@ -201,30 +204,28 @@ test('it merges delta of non destroyed chain', async () => {
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
parentUuid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
// a grand child
|
||||
await generateVhd(`${basePath}/grandchild.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'child.vhd',
|
||||
parentUid: child.footer.uuid,
|
||||
parentUuid: child.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
let loggued = []
|
||||
const onLog = message => {
|
||||
const logInfo = message => {
|
||||
loggued.push(message)
|
||||
}
|
||||
await adapter.cleanVm('/', { remove: true, onLog })
|
||||
expect(loggued[0]).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
|
||||
expect(loggued[1]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
|
||||
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
|
||||
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
|
||||
|
||||
loggued = []
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
|
||||
const [unused, merging] = loggued
|
||||
expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
|
||||
expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`)
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
|
||||
const [merging] = loggued
|
||||
expect(merging).toEqual(`merging VHD chain`)
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
|
||||
// size should be the size of children + grand children after the merge
|
||||
@@ -254,7 +255,7 @@ test('it finish unterminated merge ', async () => {
|
||||
const child = await generateVhd(`${basePath}/child.vhd`, {
|
||||
header: {
|
||||
parentUnicodeName: 'orphan.vhd',
|
||||
parentUid: orphan.footer.uuid,
|
||||
parentUuid: orphan.footer.uuid,
|
||||
},
|
||||
})
|
||||
// a merge in progress file
|
||||
@@ -270,7 +271,7 @@ test('it finish unterminated merge ', async () => {
|
||||
})
|
||||
)
|
||||
|
||||
await adapter.cleanVm('/', { remove: true, merge: true })
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
|
||||
|
||||
// only check deletion
|
||||
@@ -310,7 +311,7 @@ describe('tests multiple combination ', () => {
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'gone.vhd',
|
||||
parentUid: crypto.randomBytes(16),
|
||||
parentUuid: uniqueIdBuffer(),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -324,7 +325,7 @@ describe('tests multiple combination ', () => {
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUid: ancestor.footer.uuid,
|
||||
parentUuid: ancestor.footer.uuid,
|
||||
},
|
||||
})
|
||||
// a grand child vhd in metadata
|
||||
@@ -333,7 +334,7 @@ describe('tests multiple combination ', () => {
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUid: child.footer.uuid,
|
||||
parentUuid: child.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -348,7 +349,7 @@ describe('tests multiple combination ', () => {
|
||||
mode: vhdMode,
|
||||
header: {
|
||||
parentUnicodeName: 'cleanAncestor.vhd' + (useAlias ? '.alias.vhd' : ''),
|
||||
parentUid: cleanAncestor.footer.uuid,
|
||||
parentUuid: cleanAncestor.footer.uuid,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -377,7 +378,7 @@ describe('tests multiple combination ', () => {
|
||||
})
|
||||
)
|
||||
|
||||
await adapter.cleanVm('/', { remove: true, merge: true })
|
||||
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
|
||||
|
||||
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
|
||||
// size should be the size of children + grand children + clean after the merge
|
||||
@@ -413,7 +414,7 @@ describe('tests multiple combination ', () => {
|
||||
test('it cleans orphan merge states ', async () => {
|
||||
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
|
||||
|
||||
await adapter.cleanVm('/', { remove: true })
|
||||
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
|
||||
|
||||
expect(await handler.list(basePath)).toEqual([])
|
||||
})
|
||||
@@ -428,7 +429,11 @@ test('check Aliases should work alone', async () => {
|
||||
|
||||
await generateVhd(`vhds/data/missingalias.vhd`)
|
||||
|
||||
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
|
||||
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
|
||||
remove: true,
|
||||
handler,
|
||||
logWarn: () => {},
|
||||
})
|
||||
|
||||
// only ok have suvived
|
||||
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
|
||||
|
||||
@@ -1,22 +1,27 @@
|
||||
'use strict'
|
||||
|
||||
const assert = require('assert')
|
||||
const sum = require('lodash/sum')
|
||||
const UUID = require('uuid')
|
||||
const { asyncMap } = require('@xen-orchestra/async-map')
|
||||
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
|
||||
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
|
||||
const { dirname, resolve } = require('path')
|
||||
const { DISK_TYPES } = Constants
|
||||
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
|
||||
const { limitConcurrency } = require('limit-concurrency-decorator')
|
||||
const { mergeVhdChain } = require('vhd-lib/merge')
|
||||
|
||||
const { Task } = require('./Task.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const handlerPath = require('@xen-orchestra/fs/path')
|
||||
|
||||
// checking the size of a vhd directory is costly
|
||||
// 1 Http Query per 1000 blocks
|
||||
// we only check size of all the vhd are VhdFiles
|
||||
function shouldComputeVhdsSize(vhds) {
|
||||
function shouldComputeVhdsSize(handler, vhds) {
|
||||
if (handler.isEncrypted) {
|
||||
return false
|
||||
}
|
||||
return vhds.every(vhd => vhd instanceof VhdFile)
|
||||
}
|
||||
|
||||
@@ -24,86 +29,48 @@ const computeVhdsSize = (handler, vhdPaths) =>
|
||||
Disposable.use(
|
||||
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
|
||||
async vhds => {
|
||||
if (shouldComputeVhdsSize(vhds)) {
|
||||
if (shouldComputeVhdsSize(handler, vhds)) {
|
||||
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
|
||||
return sum(sizes)
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
// chain is an array of VHDs from child to parent
|
||||
//
|
||||
// the whole chain will be merged into parent, parent will be renamed to child
|
||||
// and all the others will deleted
|
||||
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
|
||||
assert(chain.length >= 2)
|
||||
|
||||
let child = chain[0]
|
||||
const parent = chain[chain.length - 1]
|
||||
const children = chain.slice(0, -1).reverse()
|
||||
|
||||
chain
|
||||
.slice(1)
|
||||
.reverse()
|
||||
.forEach(parent => {
|
||||
onLog(`the parent ${parent} of the child ${child} is unused`)
|
||||
})
|
||||
|
||||
// chain is [ ancestor, child_1, ..., child_n ]
|
||||
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge }) {
|
||||
if (merge) {
|
||||
// `mergeVhd` does not work with a stream, either
|
||||
// - make it accept a stream
|
||||
// - or create synthetic VHD which is not a stream
|
||||
if (children.length !== 1) {
|
||||
// TODO: implement merging multiple children
|
||||
children.length = 1
|
||||
child = children[0]
|
||||
}
|
||||
|
||||
onLog(`merging ${child} into ${parent}`)
|
||||
logInfo(`merging VHD chain`, { chain })
|
||||
|
||||
let done, total
|
||||
const handle = setInterval(() => {
|
||||
if (done !== undefined) {
|
||||
onLog(`merging ${child}: ${done}/${total}`)
|
||||
logInfo('merge in progress', {
|
||||
done,
|
||||
parent: chain[0],
|
||||
progress: Math.round((100 * done) / total),
|
||||
total,
|
||||
})
|
||||
}
|
||||
}, 10e3)
|
||||
|
||||
const mergedSize = await mergeVhd(
|
||||
handler,
|
||||
parent,
|
||||
handler,
|
||||
child,
|
||||
// children.length === 1
|
||||
// ? child
|
||||
// : await createSyntheticStream(handler, children),
|
||||
{
|
||||
try {
|
||||
return await mergeVhdChain(handler, chain, {
|
||||
logInfo,
|
||||
onProgress({ done: d, total: t }) {
|
||||
done = d
|
||||
total = t
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
clearInterval(handle)
|
||||
await Promise.all([
|
||||
VhdAbstract.rename(handler, parent, child),
|
||||
asyncMap(children.slice(0, -1), child => {
|
||||
onLog(`the VHD ${child} is unused`)
|
||||
if (remove) {
|
||||
onLog(`deleting unused VHD ${child}`)
|
||||
return VhdAbstract.unlink(handler, child)
|
||||
}
|
||||
}),
|
||||
])
|
||||
|
||||
return mergedSize
|
||||
removeUnused: remove,
|
||||
})
|
||||
} finally {
|
||||
clearInterval(handle)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const noop = Function.prototype
|
||||
|
||||
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
|
||||
const listVhds = async (handler, vmDir) => {
|
||||
const listVhds = async (handler, vmDir, logWarn) => {
|
||||
const vhds = new Set()
|
||||
const aliases = {}
|
||||
const interruptedVhds = new Map()
|
||||
@@ -123,12 +90,23 @@ const listVhds = async (handler, vmDir) => {
|
||||
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
|
||||
})
|
||||
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
|
||||
list.forEach(file => {
|
||||
|
||||
await asyncMap(list, async file => {
|
||||
const res = INTERRUPTED_VHDS_REG.exec(file)
|
||||
if (res === null) {
|
||||
vhds.add(`${vdiDir}/${file}`)
|
||||
} else {
|
||||
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
|
||||
try {
|
||||
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
|
||||
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
|
||||
statePath: `${vdiDir}/${file}`,
|
||||
chain: mergeState.chain,
|
||||
})
|
||||
} catch (error) {
|
||||
// fall back to a non resuming merge
|
||||
vhds.add(`${vdiDir}/${file}`)
|
||||
logWarn('failed to read existing merge state', { path: file, error })
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -138,16 +116,21 @@ const listVhds = async (handler, vmDir) => {
|
||||
return { vhds, interruptedVhds, aliases }
|
||||
}
|
||||
|
||||
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
|
||||
async function checkAliases(
|
||||
aliasPaths,
|
||||
targetDataRepository,
|
||||
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
|
||||
) {
|
||||
const aliasFound = []
|
||||
for (const path of aliasPaths) {
|
||||
const target = await resolveVhdAlias(handler, path)
|
||||
for (const alias of aliasPaths) {
|
||||
const target = await resolveVhdAlias(handler, alias)
|
||||
|
||||
if (!isVhdFile(target)) {
|
||||
onLog(`Alias ${path} references a non vhd target: ${target}`)
|
||||
logWarn('alias references non VHD target', { alias, target })
|
||||
if (remove) {
|
||||
logInfo('removing alias and non VHD target', { alias, target })
|
||||
await handler.unlink(target)
|
||||
await handler.unlink(path)
|
||||
await handler.unlink(alias)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -160,13 +143,13 @@ async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog =
|
||||
// error during dispose should not trigger a deletion
|
||||
}
|
||||
} catch (error) {
|
||||
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
|
||||
logWarn('missing or broken alias target', { alias, target, error })
|
||||
if (remove) {
|
||||
try {
|
||||
await VhdAbstract.unlink(handler, path)
|
||||
} catch (e) {
|
||||
if (e.code !== 'ENOENT') {
|
||||
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
|
||||
await VhdAbstract.unlink(handler, alias)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
logWarn('error deleting alias target', { alias, target, error })
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -176,42 +159,45 @@ async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog =
|
||||
aliasFound.push(resolve('/', target))
|
||||
}
|
||||
|
||||
const entries = await handler.list(targetDataRepository, {
|
||||
const vhds = await handler.list(targetDataRepository, {
|
||||
ignoreMissing: true,
|
||||
prependDir: true,
|
||||
})
|
||||
|
||||
entries.forEach(async entry => {
|
||||
if (!aliasFound.includes(entry)) {
|
||||
onLog(`the Vhd ${entry} is not referenced by a an alias`)
|
||||
await asyncMap(vhds, async path => {
|
||||
if (!aliasFound.includes(path)) {
|
||||
logWarn('no alias references VHD', { path })
|
||||
if (remove) {
|
||||
await VhdAbstract.unlink(handler, entry)
|
||||
logInfo('deleting unused VHD', { path })
|
||||
await VhdAbstract.unlink(handler, path)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
exports.checkAliases = checkAliases
|
||||
|
||||
const defaultMergeLimiter = limitConcurrency(1)
|
||||
|
||||
exports.cleanVm = async function cleanVm(
|
||||
vmDir,
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
|
||||
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
|
||||
) {
|
||||
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
|
||||
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
|
||||
|
||||
const handler = this._handler
|
||||
|
||||
const vhdsToJSons = new Set()
|
||||
const vhdById = new Map()
|
||||
const vhdParents = { __proto__: null }
|
||||
const vhdChildren = { __proto__: null }
|
||||
|
||||
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
|
||||
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
|
||||
|
||||
// remove broken VHDs
|
||||
await asyncMap(vhds, async path => {
|
||||
try {
|
||||
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !interruptedVhds.has(path) }), vhd => {
|
||||
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !interruptedVhds.has(path) }), async vhd => {
|
||||
if (vhd.footer.diskType === DISK_TYPES.DIFFERENCING) {
|
||||
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
|
||||
vhdParents[path] = parent
|
||||
@@ -224,12 +210,32 @@ exports.cleanVm = async function cleanVm(
|
||||
}
|
||||
vhdChildren[parent] = path
|
||||
}
|
||||
// Detect VHDs with the same UUIDs
|
||||
//
|
||||
// Due to a bug introduced in a1bcd35e2
|
||||
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
|
||||
let vhdKept = vhd
|
||||
if (duplicate !== undefined) {
|
||||
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
|
||||
if (duplicate.containsAllDataOf(vhd)) {
|
||||
logWarn(`should delete ${path}`)
|
||||
vhdKept = duplicate
|
||||
vhds.delete(path)
|
||||
} else if (vhd.containsAllDataOf(duplicate)) {
|
||||
logWarn(`should delete ${duplicate._path}`)
|
||||
vhds.delete(duplicate._path)
|
||||
} else {
|
||||
logWarn('same ids but different content')
|
||||
}
|
||||
}
|
||||
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
|
||||
await vhd.check()
|
||||
})
|
||||
} catch (error) {
|
||||
vhds.delete(path)
|
||||
onLog(`error while checking the VHD with path ${path}`, { error })
|
||||
logWarn('VHD check error', { path, error })
|
||||
if (error?.code === 'ERR_ASSERTION' && remove) {
|
||||
onLog(`deleting broken ${path}`)
|
||||
logInfo('deleting broken VHD', { path })
|
||||
return VhdAbstract.unlink(handler, path)
|
||||
}
|
||||
}
|
||||
@@ -238,15 +244,15 @@ exports.cleanVm = async function cleanVm(
|
||||
// remove interrupted merge states for missing VHDs
|
||||
for (const interruptedVhd of interruptedVhds.keys()) {
|
||||
if (!vhds.has(interruptedVhd)) {
|
||||
const statePath = interruptedVhds.get(interruptedVhd)
|
||||
const { statePath } = interruptedVhds.get(interruptedVhd)
|
||||
interruptedVhds.delete(interruptedVhd)
|
||||
|
||||
onLog('orphan merge state', {
|
||||
logWarn('orphan merge state', {
|
||||
mergeStatePath: statePath,
|
||||
missingVhdPath: interruptedVhd,
|
||||
})
|
||||
if (remove) {
|
||||
onLog(`deleting orphan merge state ${statePath}`)
|
||||
logInfo('deleting orphan merge state', { statePath })
|
||||
await handler.unlink(statePath)
|
||||
}
|
||||
}
|
||||
@@ -255,7 +261,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// check if alias are correct
|
||||
// check if all vhd in data subfolder have a corresponding alias
|
||||
await asyncMap(Object.keys(aliases), async dir => {
|
||||
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
|
||||
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
|
||||
})
|
||||
|
||||
// remove VHDs with missing ancestors
|
||||
@@ -277,9 +283,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (!vhds.has(parent)) {
|
||||
vhds.delete(vhdPath)
|
||||
|
||||
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
|
||||
logWarn('parent VHD is missing', { parent, child: vhdPath })
|
||||
if (remove) {
|
||||
onLog(`deleting orphan VHD ${vhdPath}`)
|
||||
logInfo('deleting orphan VHD', { path: vhdPath })
|
||||
deletions.push(VhdAbstract.unlink(handler, vhdPath))
|
||||
}
|
||||
}
|
||||
@@ -316,7 +322,7 @@ exports.cleanVm = async function cleanVm(
|
||||
// check is not good enough to delete the file, the best we can do is report
|
||||
// it
|
||||
if (!(await this.isValidXva(path))) {
|
||||
onLog(`the XVA with path ${path} is potentially broken`)
|
||||
logWarn('XVA might be broken', { path })
|
||||
}
|
||||
})
|
||||
|
||||
@@ -330,7 +336,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
metadata = JSON.parse(await handler.readFile(json))
|
||||
} catch (error) {
|
||||
onLog(`failed to read metadata file ${json}`, { error })
|
||||
logWarn('failed to read backup metadata', { path: json, error })
|
||||
jsons.delete(json)
|
||||
return
|
||||
}
|
||||
@@ -341,9 +347,9 @@ exports.cleanVm = async function cleanVm(
|
||||
if (xvas.has(linkedXva)) {
|
||||
unusedXvas.delete(linkedXva)
|
||||
} else {
|
||||
onLog(`the XVA linked to the metadata ${json} is missing`)
|
||||
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
|
||||
if (remove) {
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
logInfo('deleting incomplete backup', { path: json })
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -364,9 +370,9 @@ exports.cleanVm = async function cleanVm(
|
||||
vhdsToJSons[path] = json
|
||||
})
|
||||
} else {
|
||||
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
|
||||
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
|
||||
if (remove) {
|
||||
onLog(`deleting incomplete backup ${json}`)
|
||||
logInfo('deleting incomplete backup', { path: json })
|
||||
jsons.delete(json)
|
||||
await handler.unlink(json)
|
||||
}
|
||||
@@ -378,7 +384,7 @@ exports.cleanVm = async function cleanVm(
|
||||
const unusedVhdsDeletion = []
|
||||
const toMerge = []
|
||||
{
|
||||
// VHD chains (as list from child to ancestor) to merge indexed by last
|
||||
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
|
||||
// ancestor
|
||||
const vhdChainsToMerge = { __proto__: null }
|
||||
|
||||
@@ -402,14 +408,14 @@ exports.cleanVm = async function cleanVm(
|
||||
if (child !== undefined) {
|
||||
const chain = getUsedChildChainOrDelete(child)
|
||||
if (chain !== undefined) {
|
||||
chain.push(vhd)
|
||||
chain.unshift(vhd)
|
||||
return chain
|
||||
}
|
||||
}
|
||||
|
||||
onLog(`the VHD ${vhd} is unused`)
|
||||
logWarn('unused VHD', { path: vhd })
|
||||
if (remove) {
|
||||
onLog(`deleting unused VHD ${vhd}`)
|
||||
logInfo('deleting unused VHD', { path: vhd })
|
||||
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
|
||||
}
|
||||
}
|
||||
@@ -420,7 +426,13 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// merge interrupted VHDs
|
||||
for (const parent of interruptedVhds.keys()) {
|
||||
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
|
||||
// before #6349 the chain wasn't in the mergeState
|
||||
const { chain, statePath } = interruptedVhds.get(parent)
|
||||
if (chain === undefined) {
|
||||
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
|
||||
} else {
|
||||
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
|
||||
}
|
||||
}
|
||||
|
||||
Object.values(vhdChainsToMerge).forEach(chain => {
|
||||
@@ -433,9 +445,9 @@ exports.cleanVm = async function cleanVm(
|
||||
const metadataWithMergedVhd = {}
|
||||
const doMerge = async () => {
|
||||
await asyncMap(toMerge, async chain => {
|
||||
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
|
||||
const merged = await limitedMergeVhdChain(handler, chain, { logInfo, logWarn, remove, merge })
|
||||
if (merged !== undefined) {
|
||||
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
|
||||
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
|
||||
metadataWithMergedVhd[metadataPath] = true
|
||||
}
|
||||
})
|
||||
@@ -445,18 +457,18 @@ exports.cleanVm = async function cleanVm(
|
||||
...unusedVhdsDeletion,
|
||||
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
|
||||
asyncMap(unusedXvas, path => {
|
||||
onLog(`the XVA ${path} is unused`)
|
||||
logWarn('unused XVA', { path })
|
||||
if (remove) {
|
||||
onLog(`deleting unused XVA ${path}`)
|
||||
logInfo('deleting unused XVA', { path })
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}),
|
||||
asyncMap(xvaSums, path => {
|
||||
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
|
||||
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
|
||||
onLog(`the XVA checksum ${path} is unused`)
|
||||
logInfo('unused XVA checksum', { path })
|
||||
if (remove) {
|
||||
onLog(`deleting unused XVA checksum ${path}`)
|
||||
logInfo('deleting unused XVA checksum', { path })
|
||||
return handler.unlink(path)
|
||||
}
|
||||
}
|
||||
@@ -478,7 +490,11 @@ exports.cleanVm = async function cleanVm(
|
||||
if (mode === 'full') {
|
||||
// a full backup : check size
|
||||
const linkedXva = resolve('/', vmDir, xva)
|
||||
fileSystemSize = await handler.getSize(linkedXva)
|
||||
try {
|
||||
fileSystemSize = await handler.getSize(linkedXva)
|
||||
} catch (error) {
|
||||
// can fail with encrypted remote
|
||||
}
|
||||
} else if (mode === 'delta') {
|
||||
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
|
||||
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
|
||||
@@ -490,11 +506,15 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// don't warn if the size has changed after a merge
|
||||
if (!merged && fileSystemSize !== size) {
|
||||
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
|
||||
logWarn('incorrect backup size in metadata', {
|
||||
path: metadataPath,
|
||||
actual: size ?? 'none',
|
||||
expected: fileSystemSize,
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
onLog(`failed to get size of ${metadataPath}`, { error })
|
||||
logWarn('failed to get backup size', { backup: metadataPath, error })
|
||||
return
|
||||
}
|
||||
|
||||
@@ -504,7 +524,7 @@ exports.cleanVm = async function cleanVm(
|
||||
try {
|
||||
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
|
||||
} catch (error) {
|
||||
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
|
||||
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -11,6 +11,8 @@ const { createVhdStreamWithLength } = require('vhd-lib')
|
||||
const { defer } = require('golike-defer')
|
||||
|
||||
const { cancelableMap } = require('./_cancelableMap.js')
|
||||
const { Task } = require('./Task.js')
|
||||
const { pick } = require('lodash')
|
||||
|
||||
const TAG_BASE_DELTA = 'xo:base_delta'
|
||||
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
|
||||
@@ -20,6 +22,9 @@ exports.TAG_COPY_SRC = TAG_COPY_SRC
|
||||
|
||||
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
|
||||
const resolveUuid = async (xapi, cache, uuid, type) => {
|
||||
if (uuid == null) {
|
||||
return uuid
|
||||
}
|
||||
let ref = cache.get(uuid)
|
||||
if (ref === undefined) {
|
||||
ref = await xapi.call(`${type}.get_by_uuid`, uuid)
|
||||
@@ -60,17 +65,6 @@ exports.exportDeltaVm = async function exportDeltaVm(
|
||||
return
|
||||
}
|
||||
|
||||
// If the VDI name start with `[NOBAK]`, do not export it.
|
||||
if (vdi.name_label.startsWith('[NOBAK]')) {
|
||||
// FIXME: find a way to not create the VDI snapshot in the
|
||||
// first time.
|
||||
//
|
||||
// The snapshot must not exist otherwise it could break the
|
||||
// next export.
|
||||
ignoreErrors.call(vdi.$destroy())
|
||||
return
|
||||
}
|
||||
|
||||
vbds[vbd.$ref] = vbd
|
||||
|
||||
const vdiRef = vdi.$ref
|
||||
@@ -195,19 +189,25 @@ exports.importDeltaVm = defer(async function importDeltaVm(
|
||||
let suspendVdi
|
||||
if (vmRecord.power_state === 'Suspended') {
|
||||
const vdi = vdiRecords[vmRecord.suspend_VDI]
|
||||
suspendVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
sr: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
||||
if (vdi === undefined) {
|
||||
Task.warning('Suspend VDI not available for this suspended VM', {
|
||||
vm: pick(vmRecord, 'uuid', 'name_label'),
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => suspendVdi.$destroy())
|
||||
} else {
|
||||
suspendVdi = await xapi.getRecord(
|
||||
'VDI',
|
||||
await xapi.VDI_create({
|
||||
...vdi,
|
||||
other_config: {
|
||||
...vdi.other_config,
|
||||
[TAG_BASE_DELTA]: undefined,
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
},
|
||||
sr: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
|
||||
})
|
||||
)
|
||||
$defer.onFailure(() => suspendVdi.$destroy())
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Create the VM.
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
const eos = require('end-of-stream')
|
||||
const { PassThrough } = require('stream')
|
||||
|
||||
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
|
||||
|
||||
// create a new readable stream from an existing one which may be piped later
|
||||
//
|
||||
// in case of error in the new readable stream, it will simply be unpiped
|
||||
@@ -11,18 +13,23 @@ exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
|
||||
const { forks = 0 } = stream
|
||||
stream.forks = forks + 1
|
||||
|
||||
debug('forking', { forks: stream.forks })
|
||||
|
||||
const proxy = new PassThrough()
|
||||
stream.pipe(proxy)
|
||||
eos(stream, error => {
|
||||
if (error !== undefined) {
|
||||
debug('error on original stream, destroying fork', { error })
|
||||
proxy.destroy(error)
|
||||
}
|
||||
})
|
||||
eos(proxy, _ => {
|
||||
stream.forks--
|
||||
eos(proxy, error => {
|
||||
debug('end of stream, unpiping', { error, forks: --stream.forks })
|
||||
|
||||
stream.unpipe(proxy)
|
||||
|
||||
if (stream.forks === 0) {
|
||||
debug('no more forks, destroying original stream')
|
||||
stream.destroy(new Error('no more consumers for this stream'))
|
||||
}
|
||||
})
|
||||
|
||||
@@ -49,6 +49,11 @@ const isValidTar = async (handler, size, fd) => {
|
||||
// TODO: find an heuristic for compressed files
|
||||
async function isValidXva(path) {
|
||||
const handler = this._handler
|
||||
|
||||
// size is longer when encrypted + reading part of an encrypted file is not implemented
|
||||
if (handler.isEncrypted) {
|
||||
return true
|
||||
}
|
||||
try {
|
||||
const fd = await handler.openFile(path, 'r')
|
||||
try {
|
||||
@@ -66,7 +71,6 @@ async function isValidXva(path) {
|
||||
}
|
||||
} catch (error) {
|
||||
// never throw, log and report as valid to avoid side effects
|
||||
console.error('isValidXva', path, error)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
52
@xen-orchestra/backups/docs/Metadata backups/README.md
Normal file
52
@xen-orchestra/backups/docs/Metadata backups/README.md
Normal file
@@ -0,0 +1,52 @@
|
||||
- [File structure on remote](#file-structure-on-remote)
|
||||
- [Structure of `metadata.json`](#structure-of-metadatajson)
|
||||
- [Task logs](#task-logs)
|
||||
- [During backup](#during-backup)
|
||||
|
||||
## File structure on remote
|
||||
|
||||
```
|
||||
<remote>
|
||||
├─ xo-config-backups
|
||||
│ └─ <schedule ID>
|
||||
│ └─ <YYYYMMDD>T<HHmmss>
|
||||
│ ├─ metadata.json
|
||||
│ └─ data.json
|
||||
└─ xo-pool-metadata-backups
|
||||
└─ <schedule ID>
|
||||
└─ <pool UUID>
|
||||
└─ <YYYYMMDD>T<HHmmss>
|
||||
├─ metadata.json
|
||||
└─ data
|
||||
```
|
||||
|
||||
## Structure of `metadata.json`
|
||||
|
||||
```ts
|
||||
interface Metadata {
|
||||
jobId: String
|
||||
jobName: String
|
||||
scheduleId: String
|
||||
scheduleName: String
|
||||
timestamp: number
|
||||
pool?: Pool
|
||||
poolMaster?: Host
|
||||
}
|
||||
```
|
||||
|
||||
## Task logs
|
||||
|
||||
### During backup
|
||||
|
||||
```
|
||||
job.start(data: { reportWhen: ReportWhen })
|
||||
├─ task.start(data: { type: 'pool', id: string, pool?: Pool, poolMaster?: Host })
|
||||
│ ├─ task.start(data: { type: 'remote', id: string })
|
||||
│ │ └─ task.end
|
||||
│ └─ task.end
|
||||
├─ task.start(data: { type: 'xo' })
|
||||
│ ├─ task.start(data: { type: 'remote', id: string })
|
||||
│ │ └─ task.end
|
||||
│ └─ task.end
|
||||
└─ job.end
|
||||
```
|
||||
222
@xen-orchestra/backups/docs/VM backups/README.md
Normal file
222
@xen-orchestra/backups/docs/VM backups/README.md
Normal file
@@ -0,0 +1,222 @@
|
||||
- [File structure on remote](#file-structure-on-remote)
|
||||
- [Attributes](#attributes)
|
||||
- [Of created snapshots](#of-created-snapshots)
|
||||
- [Of created VMs and snapshots](#of-created-vms-and-snapshots)
|
||||
- [Of created VMs](#of-created-vms)
|
||||
- [Task logs](#task-logs)
|
||||
- [During backup](#during-backup)
|
||||
- [During restoration](#during-restoration)
|
||||
- [API](#api)
|
||||
- [Run description object](#run-description-object)
|
||||
- [`IdPattern`](#idpattern)
|
||||
- [Settings](#settings)
|
||||
- [Writer API](#writer-api)
|
||||
|
||||
## File structure on remote
|
||||
|
||||
### with vhd files
|
||||
|
||||
```
|
||||
<remote>
|
||||
└─ xo-vm-backups
|
||||
├─ index.json // TODO
|
||||
└─ <VM UUID>
|
||||
├─ index.json // TODO
|
||||
├─ vdis
|
||||
│ └─ <job UUID>
|
||||
│ └─ <VDI UUID>
|
||||
│ ├─ index.json // TODO
|
||||
│ └─ <YYYYMMDD>T<HHmmss>.vhd
|
||||
├─ <YYYYMMDD>T<HHmmss>.json // backup metadata
|
||||
├─ <YYYYMMDD>T<HHmmss>.xva
|
||||
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
|
||||
```
|
||||
|
||||
### with vhd directories
|
||||
|
||||
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
|
||||
|
||||
```
|
||||
<vdis>/<job UUID>/<VDI UUID>
|
||||
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
|
||||
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
|
||||
└─ data
|
||||
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
|
||||
└─ <uuid>.vhd
|
||||
```
|
||||
|
||||
## Attributes
|
||||
|
||||
### Of created snapshots
|
||||
|
||||
- `other_config`:
|
||||
- `xo:backup:deltaChainLength` = n (number of delta copies/replicated since a full)
|
||||
- `xo:backup:exported` = 'true' (added at the end of the backup)
|
||||
|
||||
### Of created VMs and snapshots
|
||||
|
||||
- `other_config`:
|
||||
- `xo:backup:datetime`: format is UTC %Y%m%dT%H:%M:%SZ
|
||||
- from snapshots: snapshot.snapshot_time
|
||||
- with offline backup: formatDateTime(Date.now())
|
||||
- `xo:backup:job` = job.id
|
||||
- `xo:backup:schedule` = schedule.id
|
||||
- `xo:backup:vm` = vm.uuid
|
||||
|
||||
### Of created VMs
|
||||
|
||||
- `name_label`: `${original name} - ${job name} - (${safeDateFormat(backup timestamp)})`
|
||||
- tag:
|
||||
- copy in delta mode: `Continuous Replication`
|
||||
- copy in full mode: `Disaster Recovery`
|
||||
- imported from backup: `restored from backup`
|
||||
- `blocked_operations.start`: message
|
||||
- for copies/replications only, added after complete transfer
|
||||
- `other_config[xo:backup:sr]` = sr.uuid
|
||||
|
||||
## Task logs
|
||||
|
||||
### During backup
|
||||
|
||||
```
|
||||
job.start(data: { mode: Mode, reportWhen: ReportWhen })
|
||||
├─ task.info(message: 'vms', data: { vms: string[] })
|
||||
├─ task.warning(message: string)
|
||||
├─ task.start(data: { type: 'VM', id: string })
|
||||
│ ├─ task.warning(message: string)
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'snapshot')
|
||||
│ │ └─ task.end
|
||||
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
|
||||
│ │ ├─ task.warning(message: string)
|
||||
│ │ ├─ task.start(message: 'transfer')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
│ │ │ └─ task.end(result: { size: number })
|
||||
│ │ │
|
||||
│ │ │ // in case there is a healthcheck scheduled for this vm in this job
|
||||
│ │ ├─ task.start(message: 'health check')
|
||||
│ │ │ ├─ task.start(message: 'transfer')
|
||||
│ │ │ │ └─ task.end(result: { size: number })
|
||||
│ │ │ ├─ task.start(message: 'vmstart')
|
||||
│ │ │ │ └─ task.end
|
||||
│ │ │ └─ task.end
|
||||
│ │ │
|
||||
│ │ │ // in case of full backup, DR and CR
|
||||
│ │ ├─ task.start(message: 'clean')
|
||||
│ │ │ ├─ task.warning(message: string)
|
||||
│ │ │ └─ task.end
|
||||
│ │ └─ task.end
|
||||
| ├─ task.start(message: 'clean-vm')
|
||||
│ │ └─ task.end
|
||||
│ └─ task.end
|
||||
└─ job.end
|
||||
```
|
||||
|
||||
### During restoration
|
||||
|
||||
```
|
||||
task.start(message: 'restore', data: { jobId: string, srId: string, time: number })
|
||||
├─ task.start(message: 'transfer')
|
||||
│ └─ task.end(result: { id: string, size: number })
|
||||
└─ task.end
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
### Run description object
|
||||
|
||||
This is a JavaScript object containing all the information necessary to run a backup job.
|
||||
|
||||
```coffee
|
||||
# Information about the job itself
|
||||
job:
|
||||
|
||||
# Unique identifier
|
||||
id: string
|
||||
|
||||
# Human readable identifier
|
||||
name: string
|
||||
|
||||
# Whether this job is doing Full Backup / Disaster Recovery or
|
||||
# Delta Backup / Continuous Replication
|
||||
mode: 'full' | 'delta'
|
||||
|
||||
# For backup jobs, indicates which remotes to use
|
||||
remotes: IdPattern
|
||||
|
||||
settings:
|
||||
|
||||
# Used for the whole job
|
||||
'': Settings
|
||||
|
||||
# Used for a specific schedule
|
||||
[ScheduleId]: Settings
|
||||
|
||||
# Used for a specific VM
|
||||
[VmId]: Settings
|
||||
|
||||
# For replication jobs, indicates which SRs to use
|
||||
srs: IdPattern
|
||||
|
||||
# Here for historical reasons
|
||||
type: 'backup'
|
||||
|
||||
# Indicates which VMs to backup/replicate
|
||||
vms: IdPattern
|
||||
|
||||
# Indicates which XAPI to use to connect to a specific VM or SR
|
||||
recordToXapi:
|
||||
[ObjectId]: XapiId
|
||||
|
||||
# Information necessary to connect to each remote
|
||||
remotes:
|
||||
[RemoteId]:
|
||||
url: string
|
||||
|
||||
# Indicates which schedule is used for this run
|
||||
schedule:
|
||||
id: ScheduleId
|
||||
|
||||
# Information necessary to connect to each XAPI
|
||||
xapis:
|
||||
[XapiId]:
|
||||
allowUnauthorized: boolean
|
||||
credentials:
|
||||
password: string
|
||||
username: string
|
||||
url: string
|
||||
```
|
||||
|
||||
### `IdPattern`
|
||||
|
||||
For a single object:
|
||||
|
||||
```
|
||||
{ id: string }
|
||||
```
|
||||
|
||||
For multiple objects:
|
||||
|
||||
```
|
||||
{ id: { __or: string[] } }
|
||||
```
|
||||
|
||||
> This syntax is compatible with [`value-matcher`](https://github.com/vatesfr/xen-orchestra/tree/master/packages/value-matcher).
|
||||
|
||||
### Settings
|
||||
|
||||
Settings are described in [`@xen-orchestra/backups/Backup.js](https://github.com/vatesfr/xen-orchestra/blob/master/%40xen-orchestra/backups/Backup.js).
|
||||
|
||||
## Writer API
|
||||
|
||||
- `beforeBackup()`
|
||||
- **Delta**
|
||||
- `checkBaseVdis(baseUuidToSrcVdi, baseVm)`
|
||||
- `prepare({ isFull })`
|
||||
- `transfer({ timestamp, deltaExport, sizeContainers })`
|
||||
- `cleanup()`
|
||||
- `healthCheck(sr)`
|
||||
- **Full**
|
||||
- `run({ timestamp, sizeContainer, stream })`
|
||||
- `afterBackup()`
|
||||
@@ -1,4 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
// eslint-disable-next-line eslint-comments/disable-enable-pair
|
||||
/* eslint-disable n/shebang */
|
||||
|
||||
'use strict'
|
||||
|
||||
@@ -62,7 +64,7 @@ const main = Disposable.wrap(async function* main(args) {
|
||||
try {
|
||||
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
|
||||
try {
|
||||
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
|
||||
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
|
||||
} catch (error) {
|
||||
// consider the clean successful if the VM dir is missing
|
||||
if (error.code !== 'ENOENT') {
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
"type": "git",
|
||||
"url": "https://github.com/vatesfr/xen-orchestra.git"
|
||||
},
|
||||
"version": "0.20.0",
|
||||
"version": "0.27.4",
|
||||
"engines": {
|
||||
"node": ">=14.6"
|
||||
},
|
||||
@@ -16,16 +16,18 @@
|
||||
"postversion": "npm publish --access public"
|
||||
},
|
||||
"dependencies": {
|
||||
"@vates/cached-dns.lookup": "^1.0.0",
|
||||
"@vates/compose": "^2.1.0",
|
||||
"@vates/decorate-with": "^1.0.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/disposable": "^0.1.1",
|
||||
"@vates/parse-duration": "^0.1.1",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/fs": "^0.20.0",
|
||||
"@xen-orchestra/fs": "^3.0.0",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"@xen-orchestra/template": "^0.1.0",
|
||||
"compare-versions": "^4.0.1",
|
||||
"d3-time-format": "^3.0.0",
|
||||
"decorator-synchronized": "^0.6.0",
|
||||
"end-of-stream": "^1.4.4",
|
||||
"fs-extra": "^10.0.0",
|
||||
"golike-defer": "^0.5.1",
|
||||
@@ -36,11 +38,15 @@
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"uuid": "^8.3.2",
|
||||
"vhd-lib": "^3.1.0",
|
||||
"vhd-lib": "^4.0.0",
|
||||
"yazl": "^2.5.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"rimraf": "^3.0.2",
|
||||
"tmp": "^0.2.1"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@xen-orchestra/xapi": "^0.9.0"
|
||||
"@xen-orchestra/xapi": "^1.4.2"
|
||||
},
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"author": {
|
||||
|
||||
@@ -19,6 +19,8 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
|
||||
const { checkVhd } = require('./_checkVhd.js')
|
||||
const { packUuid } = require('./_packUuid.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
|
||||
const { ImportVmBackup } = require('../ImportVmBackup.js')
|
||||
|
||||
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
|
||||
|
||||
@@ -69,6 +71,35 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
return this._cleanVm({ merge: true })
|
||||
}
|
||||
|
||||
healthCheck(sr) {
|
||||
return Task.run(
|
||||
{
|
||||
name: 'health check',
|
||||
},
|
||||
async () => {
|
||||
const xapi = sr.$xapi
|
||||
const srUuid = sr.uuid
|
||||
const adapter = this._adapter
|
||||
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
|
||||
const { id: restoredId } = await new ImportVmBackup({
|
||||
adapter,
|
||||
metadata,
|
||||
srUuid,
|
||||
xapi,
|
||||
}).run()
|
||||
const restoredVm = xapi.getObject(restoredId)
|
||||
try {
|
||||
await new HealthCheckVmBackup({
|
||||
restoredVm,
|
||||
xapi,
|
||||
}).run()
|
||||
} finally {
|
||||
await xapi.VM_destroy(restoredVm.$ref)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
prepare({ isFull }) {
|
||||
// create the task related to this export and ensure all methods are called in this context
|
||||
const task = new Task({
|
||||
@@ -80,7 +111,9 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
},
|
||||
})
|
||||
this.transfer = task.wrapFn(this.transfer)
|
||||
this.cleanup = task.wrapFn(this.cleanup, true)
|
||||
this.healthCheck = task.wrapFn(this.healthCheck)
|
||||
this.cleanup = task.wrapFn(this.cleanup)
|
||||
this.afterBackup = task.wrapFn(this.afterBackup, true)
|
||||
|
||||
return task.run(() => this._prepare())
|
||||
}
|
||||
@@ -156,7 +189,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
|
||||
const metadataFilename = `${backupDir}/${basename}.json`
|
||||
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
|
||||
const metadataContent = {
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
|
||||
@@ -9,4 +9,6 @@ exports.AbstractWriter = class AbstractWriter {
|
||||
beforeBackup() {}
|
||||
|
||||
afterBackup() {}
|
||||
|
||||
healthCheck(sr) {}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,9 @@ const { join } = require('path')
|
||||
const { getVmBackupDir } = require('../_getVmBackupDir.js')
|
||||
const MergeWorker = require('../merge-worker/index.js')
|
||||
const { formatFilenameDate } = require('../_filenameDate.js')
|
||||
const { Task } = require('../Task.js')
|
||||
|
||||
const { warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
|
||||
|
||||
exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
class MixinBackupWriter extends BaseClass {
|
||||
@@ -25,11 +26,17 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
|
||||
async _cleanVm(options) {
|
||||
try {
|
||||
return await this._adapter.cleanVm(this.#vmBackupDir, {
|
||||
...options,
|
||||
fixMetadata: true,
|
||||
onLog: warn,
|
||||
lock: false,
|
||||
return await Task.run({ name: 'clean-vm' }, () => {
|
||||
return this._adapter.cleanVm(this.#vmBackupDir, {
|
||||
...options,
|
||||
fixMetadata: true,
|
||||
logInfo: info,
|
||||
logWarn: (message, data) => {
|
||||
warn(message, data)
|
||||
Task.warning(message, data)
|
||||
},
|
||||
lock: false,
|
||||
})
|
||||
})
|
||||
} catch (error) {
|
||||
warn(error)
|
||||
@@ -64,5 +71,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
const remotePath = handler._getRealPath()
|
||||
await MergeWorker.run(remotePath)
|
||||
}
|
||||
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"preferGlobal": true,
|
||||
"dependencies": {
|
||||
"golike-defer": "^0.5.1",
|
||||
"xen-api": "^0.36.0"
|
||||
"xen-api": "^1.2.2"
|
||||
},
|
||||
"scripts": {
|
||||
"postversion": "npm publish"
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))
|
||||
@@ -1 +0,0 @@
|
||||
../../scripts/babel-eslintrc.js
|
||||
@@ -1,7 +1,9 @@
|
||||
import moment from 'moment-timezone'
|
||||
'use strict'
|
||||
|
||||
import next from './next'
|
||||
import parse from './parse'
|
||||
const moment = require('moment-timezone')
|
||||
|
||||
const next = require('./next')
|
||||
const parse = require('./parse')
|
||||
|
||||
const MAX_DELAY = 2 ** 31 - 1
|
||||
|
||||
@@ -94,4 +96,5 @@ class Schedule {
|
||||
}
|
||||
}
|
||||
|
||||
export const createSchedule = (...args) => new Schedule(...args)
|
||||
const createSchedule = (...args) => new Schedule(...args)
|
||||
exports.createSchedule = createSchedule
|
||||
@@ -1,6 +1,8 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { createSchedule } from './'
|
||||
'use strict'
|
||||
|
||||
const { createSchedule } = require('./')
|
||||
|
||||
jest.useFakeTimers()
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import moment from 'moment-timezone'
|
||||
import sortedIndex from 'lodash/sortedIndex'
|
||||
'use strict'
|
||||
|
||||
const moment = require('moment-timezone')
|
||||
const sortedIndex = require('lodash/sortedIndex')
|
||||
|
||||
const NEXT_MAPPING = {
|
||||
month: { year: 1 },
|
||||
@@ -31,7 +33,7 @@ const setFirstAvailable = (date, unit, values) => {
|
||||
}
|
||||
|
||||
// returns the next run, after the passed date
|
||||
export default (schedule, fromDate) => {
|
||||
module.exports = (schedule, fromDate) => {
|
||||
let date = moment(fromDate)
|
||||
.set({
|
||||
second: 0,
|
||||
@@ -1,10 +1,12 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import mapValues from 'lodash/mapValues'
|
||||
import moment from 'moment-timezone'
|
||||
'use strict'
|
||||
|
||||
import next from './next'
|
||||
import parse from './parse'
|
||||
const mapValues = require('lodash/mapValues')
|
||||
const moment = require('moment-timezone')
|
||||
|
||||
const next = require('./next')
|
||||
const parse = require('./parse')
|
||||
|
||||
const N = (pattern, fromDate = '2018-04-09T06:25') => {
|
||||
const iso = next(parse(pattern), moment.utc(fromDate)).toISOString()
|
||||
@@ -27,31 +27,17 @@
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"preferGlobal": false,
|
||||
"main": "dist/",
|
||||
"browserslist": [
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
"moment-timezone": "^0.5.14"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"cross-env": "^7.0.2",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
|
||||
"clean": "rimraf dist/",
|
||||
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
|
||||
"prebuild": "yarn run clean",
|
||||
"predev": "yarn run clean",
|
||||
"prepublishOnly": "yarn run build",
|
||||
"postversion": "npm publish"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
'use strict'
|
||||
|
||||
const compareNumbers = (a, b) => a - b
|
||||
|
||||
const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
@@ -148,7 +150,7 @@ const createParser = ({ fields: [...fields], presets: { ...presets } }) => {
|
||||
return parse
|
||||
}
|
||||
|
||||
export default createParser({
|
||||
module.exports = createParser({
|
||||
fields: [
|
||||
{
|
||||
name: 'minute',
|
||||
@@ -1,6 +1,8 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import parse from './parse'
|
||||
'use strict'
|
||||
|
||||
const parse = require('./parse')
|
||||
|
||||
describe('parse()', () => {
|
||||
it('works', () => {
|
||||
@@ -22,7 +22,7 @@ await ee.emitAsync('start')
|
||||
// error handling though:
|
||||
await ee.emitAsync(
|
||||
{
|
||||
onError(error) {
|
||||
onError(error, event, listener) {
|
||||
console.warn(error)
|
||||
},
|
||||
},
|
||||
|
||||
@@ -40,7 +40,7 @@ await ee.emitAsync('start')
|
||||
// error handling though:
|
||||
await ee.emitAsync(
|
||||
{
|
||||
onError(error) {
|
||||
onError(error, event, listener) {
|
||||
console.warn(error)
|
||||
},
|
||||
},
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
'use strict'
|
||||
|
||||
const identity = v => v
|
||||
|
||||
module.exports = function emitAsync(event) {
|
||||
let opts
|
||||
let i = 1
|
||||
@@ -17,12 +19,18 @@ module.exports = function emitAsync(event) {
|
||||
}
|
||||
|
||||
const onError = opts != null && opts.onError
|
||||
const addErrorHandler = onError
|
||||
? (promise, listener) => promise.catch(error => onError(error, event, listener))
|
||||
: identity
|
||||
|
||||
return Promise.all(
|
||||
this.listeners(event).map(listener =>
|
||||
new Promise(resolve => {
|
||||
resolve(listener.apply(this, args))
|
||||
}).catch(onError)
|
||||
addErrorHandler(
|
||||
new Promise(resolve => {
|
||||
resolve(listener.apply(this, args))
|
||||
}),
|
||||
listener
|
||||
)
|
||||
)
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/emit-async",
|
||||
"version": "0.1.0",
|
||||
"version": "1.0.0",
|
||||
"license": "ISC",
|
||||
"description": "Emit an event for async listeners to settle",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",
|
||||
|
||||
62
@xen-orchestra/fs/cli.js
Executable file
62
@xen-orchestra/fs/cli.js
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
'use strict'
|
||||
|
||||
const Disposable = require('promise-toolbox/Disposable')
|
||||
const { getBoundPropertyDescriptor } = require('bind-property-descriptor')
|
||||
|
||||
const { getSyncedHandler } = require('./')
|
||||
|
||||
const { getPrototypeOf, ownKeys } = Reflect
|
||||
function getAllBoundDescriptors(object) {
|
||||
const descriptors = { __proto__: null }
|
||||
let current = object
|
||||
do {
|
||||
ownKeys(current).forEach(key => {
|
||||
if (!(key in descriptors)) {
|
||||
descriptors[key] = getBoundPropertyDescriptor(current, key, object)
|
||||
}
|
||||
})
|
||||
} while ((current = getPrototypeOf(current)) !== null)
|
||||
return descriptors
|
||||
}
|
||||
|
||||
// https://gist.github.com/julien-f/18161f6032e808d6fa08782951ce3bfb
|
||||
async function repl({ prompt, context } = {}) {
|
||||
const repl = require('repl').start({
|
||||
ignoreUndefined: true,
|
||||
prompt,
|
||||
})
|
||||
if (context !== undefined) {
|
||||
Object.defineProperties(repl.context, Object.getOwnPropertyDescriptors(context))
|
||||
}
|
||||
const { eval: evaluate } = repl
|
||||
repl.eval = (cmd, context, filename, cb) => {
|
||||
evaluate.call(repl, cmd, context, filename, (error, result) => {
|
||||
if (error != null) {
|
||||
return cb(error)
|
||||
}
|
||||
Promise.resolve(result).then(result => cb(undefined, result), cb)
|
||||
})
|
||||
}
|
||||
return new Promise((resolve, reject) => {
|
||||
repl.on('error', reject).on('exit', resolve)
|
||||
})
|
||||
}
|
||||
|
||||
async function* main([url]) {
|
||||
if (url === undefined) {
|
||||
throw new TypeError('missing arg <url>')
|
||||
}
|
||||
|
||||
const handler = yield getSyncedHandler({ url })
|
||||
await repl({
|
||||
prompt: handler.type + '> ',
|
||||
context: Object.create(null, getAllBoundDescriptors(handler)),
|
||||
})
|
||||
}
|
||||
|
||||
Disposable.wrap(main)(process.argv.slice(2)).catch(error => {
|
||||
console.error('FATAL:', error)
|
||||
process.exitCode = 1
|
||||
})
|
||||
19
@xen-orchestra/fs/docs/encryption.md
Normal file
19
@xen-orchestra/fs/docs/encryption.md
Normal file
@@ -0,0 +1,19 @@
|
||||
## metadata files
|
||||
|
||||
- Older remotes dont have any metadata file
|
||||
- Remote used since 5.75 have two files : encryption.json and metadata.json
|
||||
|
||||
The metadata files are checked by the sync() method. If the check fails it MUST throw an error and dismount.
|
||||
|
||||
If the remote is empty, the `sync` method creates them
|
||||
|
||||
### encryption.json
|
||||
|
||||
A non encrypted file contain the algorithm and parameters used for this remote.
|
||||
This MUST NOT contains the key.
|
||||
|
||||
### metadata.json
|
||||
|
||||
An encrypted JSON file containing the settings of a remote. Today this is an empty JSON file ( `{random: <randomuuid>}` ), it serves to check if the encryption key set in the remote is valid, but in the future will be able to store some remote settings to ease disaster recovery.
|
||||
|
||||
If this file can't be read (decrypted, decompressed, .. ), that means that the remote settings have been updated. If the remote is empty, update the `encryption.json` and `metadata.json` files , else raise an error.
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"private": false,
|
||||
"name": "@xen-orchestra/fs",
|
||||
"version": "0.20.0",
|
||||
"version": "3.0.0",
|
||||
"license": "AGPL-3.0-or-later",
|
||||
"description": "The File System for Xen Orchestra backups.",
|
||||
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
|
||||
@@ -13,18 +13,25 @@
|
||||
},
|
||||
"preferGlobal": true,
|
||||
"main": "dist/",
|
||||
"bin": {
|
||||
"xo-fs": "./cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
"node": ">=14.13"
|
||||
},
|
||||
"dependencies": {
|
||||
"@marsaud/smb2": "^0.18.0",
|
||||
"@aws-sdk/client-s3": "^3.54.0",
|
||||
"@aws-sdk/lib-storage": "^3.54.0",
|
||||
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
|
||||
"@aws-sdk/node-http-handler": "^3.54.0",
|
||||
"@sindresorhus/df": "^3.1.1",
|
||||
"@sullux/aws-sdk": "^1.0.5",
|
||||
"@vates/async-each": "^1.0.0",
|
||||
"@vates/coalesce-calls": "^0.1.0",
|
||||
"@vates/decorate-with": "^1.0.0",
|
||||
"@vates/decorate-with": "^2.0.0",
|
||||
"@vates/read-chunk": "^1.0.0",
|
||||
"@xen-orchestra/async-map": "^0.1.2",
|
||||
"@xen-orchestra/log": "^0.3.0",
|
||||
"aws-sdk": "^2.686.0",
|
||||
"bind-property-descriptor": "^2.0.0",
|
||||
"decorator-synchronized": "^0.6.0",
|
||||
"execa": "^5.0.0",
|
||||
"fs-extra": "^10.0.0",
|
||||
@@ -33,21 +40,20 @@
|
||||
"lodash": "^4.17.4",
|
||||
"promise-toolbox": "^0.21.0",
|
||||
"proper-lockfile": "^4.1.2",
|
||||
"readable-stream": "^3.0.6",
|
||||
"pumpify": "^2.0.1",
|
||||
"readable-stream": "^4.1.0",
|
||||
"through2": "^4.0.2",
|
||||
"xo-remote-parser": "^0.8.0"
|
||||
"xo-remote-parser": "^0.9.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.0.0",
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/plugin-proposal-decorators": "^7.1.6",
|
||||
"@babel/plugin-proposal-function-bind": "^7.0.0",
|
||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.4.4",
|
||||
"@babel/preset-env": "^7.0.0",
|
||||
"async-iterator-to-stream": "^1.1.0",
|
||||
"@babel/preset-env": "^7.8.0",
|
||||
"babel-plugin-lodash": "^3.3.2",
|
||||
"cross-env": "^7.0.2",
|
||||
"dotenv": "^15.0.0",
|
||||
"dotenv": "^16.0.0",
|
||||
"rimraf": "^3.0.0"
|
||||
},
|
||||
"scripts": {
|
||||
@@ -62,5 +68,9 @@
|
||||
"author": {
|
||||
"name": "Vates SAS",
|
||||
"url": "https://vates.fr"
|
||||
},
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
"./path": "./dist/path.js"
|
||||
}
|
||||
}
|
||||
|
||||
18
@xen-orchestra/fs/src/_copyStreamToBuffer.js
Normal file
18
@xen-orchestra/fs/src/_copyStreamToBuffer.js
Normal file
@@ -0,0 +1,18 @@
|
||||
/**
|
||||
* @param {Readable} inputStream
|
||||
* @param {Buffer} destinationBuffer
|
||||
* @returns {Promise<int>} Buffer length
|
||||
* @private
|
||||
*/
|
||||
export default function copyStreamToBuffer(inputStream, destinationBuffer) {
|
||||
return new Promise((resolve, reject) => {
|
||||
let index = 0
|
||||
|
||||
inputStream.on('data', chunk => {
|
||||
chunk.copy(destinationBuffer, index)
|
||||
index += chunk.length
|
||||
})
|
||||
inputStream.on('end', () => resolve(index))
|
||||
inputStream.on('error', err => reject(err))
|
||||
})
|
||||
}
|
||||
21
@xen-orchestra/fs/src/_copyStreamToBuffer.spec.js
Normal file
21
@xen-orchestra/fs/src/_copyStreamToBuffer.spec.js
Normal file
@@ -0,0 +1,21 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { Readable } from 'readable-stream'
|
||||
import copyStreamToBuffer from './_copyStreamToBuffer.js'
|
||||
|
||||
describe('copyStreamToBuffer', () => {
|
||||
it('should copy the stream to the buffer', async () => {
|
||||
const stream = new Readable({
|
||||
read() {
|
||||
this.push('hello')
|
||||
this.push(null)
|
||||
},
|
||||
})
|
||||
|
||||
const buffer = Buffer.alloc(3)
|
||||
|
||||
await copyStreamToBuffer(stream, buffer)
|
||||
|
||||
expect(buffer.toString()).toBe('hel')
|
||||
})
|
||||
})
|
||||
13
@xen-orchestra/fs/src/_createBufferFromStream.js
Normal file
13
@xen-orchestra/fs/src/_createBufferFromStream.js
Normal file
@@ -0,0 +1,13 @@
|
||||
/**
|
||||
* @param {Readable} stream
|
||||
* @returns {Promise<Buffer>}
|
||||
* @private
|
||||
*/
|
||||
export default function createBufferFromStream(stream) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks = []
|
||||
stream.on('data', chunk => chunks.push(chunk))
|
||||
stream.on('end', () => resolve(Buffer.concat(chunks)))
|
||||
stream.on('error', error => reject(error))
|
||||
})
|
||||
}
|
||||
19
@xen-orchestra/fs/src/_createBufferFromStream.spec.js
Normal file
19
@xen-orchestra/fs/src/_createBufferFromStream.spec.js
Normal file
@@ -0,0 +1,19 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import { Readable } from 'readable-stream'
|
||||
import createBufferFromStream from './_createBufferFromStream.js'
|
||||
|
||||
describe('createBufferFromStream', () => {
|
||||
it('should create a buffer from a stream', async () => {
|
||||
const stream = new Readable({
|
||||
read() {
|
||||
this.push('hello')
|
||||
this.push(null)
|
||||
},
|
||||
})
|
||||
|
||||
const buffer = await createBufferFromStream(stream)
|
||||
|
||||
expect(buffer.toString()).toBe('hello')
|
||||
})
|
||||
})
|
||||
71
@xen-orchestra/fs/src/_encryptor.js
Normal file
71
@xen-orchestra/fs/src/_encryptor.js
Normal file
@@ -0,0 +1,71 @@
|
||||
const { readChunk } = require('@vates/read-chunk')
|
||||
const crypto = require('crypto')
|
||||
const pumpify = require('pumpify')
|
||||
|
||||
function getEncryptor(key) {
|
||||
if (key === undefined) {
|
||||
return {
|
||||
id: 'NULL_ENCRYPTOR',
|
||||
algorithm: 'none',
|
||||
key: 'none',
|
||||
ivLength: 0,
|
||||
encryptData: buffer => buffer,
|
||||
encryptStream: stream => stream,
|
||||
decryptData: buffer => buffer,
|
||||
decryptStream: stream => stream,
|
||||
}
|
||||
}
|
||||
const algorithm = 'aes-256-cbc'
|
||||
const ivLength = 16
|
||||
|
||||
function encryptStream(input) {
|
||||
const iv = crypto.randomBytes(ivLength)
|
||||
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
|
||||
|
||||
const encrypted = pumpify(input, cipher)
|
||||
encrypted.unshift(iv)
|
||||
return encrypted
|
||||
}
|
||||
|
||||
async function decryptStream(encryptedStream) {
|
||||
const iv = await readChunk(encryptedStream, ivLength)
|
||||
const cipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
|
||||
/**
|
||||
* WARNING
|
||||
*
|
||||
* the crytped size has an initializtion vector + a padding at the end
|
||||
* whe can't predict the decrypted size from the start of the encrypted size
|
||||
* thus, we can't set decrypted.length reliably
|
||||
*
|
||||
*/
|
||||
return pumpify(encryptedStream, cipher)
|
||||
}
|
||||
|
||||
function encryptData(buffer) {
|
||||
const iv = crypto.randomBytes(ivLength)
|
||||
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
|
||||
const encrypted = cipher.update(buffer)
|
||||
return Buffer.concat([iv, encrypted, cipher.final()])
|
||||
}
|
||||
|
||||
function decryptData(buffer) {
|
||||
const iv = buffer.slice(0, ivLength)
|
||||
const encrypted = buffer.slice(ivLength)
|
||||
const decipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
|
||||
const decrypted = decipher.update(encrypted)
|
||||
return Buffer.concat([decrypted, decipher.final()])
|
||||
}
|
||||
|
||||
return {
|
||||
id: algorithm,
|
||||
algorithm,
|
||||
key,
|
||||
ivLength,
|
||||
encryptData,
|
||||
encryptStream,
|
||||
decryptData,
|
||||
decryptStream,
|
||||
}
|
||||
}
|
||||
|
||||
exports._getEncryptor = getEncryptor
|
||||
4
@xen-orchestra/fs/src/_guessAwsRegion.js
Normal file
4
@xen-orchestra/fs/src/_guessAwsRegion.js
Normal file
@@ -0,0 +1,4 @@
|
||||
export default function guessAwsRegion(host) {
|
||||
const matches = /^s3\.([^.]+)\.amazonaws.com$/.exec(host)
|
||||
return matches !== null ? matches[1] : 'us-east-1'
|
||||
}
|
||||
17
@xen-orchestra/fs/src/_guessAwsRegion.spec.js
Normal file
17
@xen-orchestra/fs/src/_guessAwsRegion.spec.js
Normal file
@@ -0,0 +1,17 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import guessAwsRegion from './_guessAwsRegion.js'
|
||||
|
||||
describe('guessAwsRegion', () => {
|
||||
it('should return region from AWS URL', async () => {
|
||||
const region = guessAwsRegion('s3.test-region.amazonaws.com')
|
||||
|
||||
expect(region).toBe('test-region')
|
||||
})
|
||||
|
||||
it('should return default region if none is found is AWS URL', async () => {
|
||||
const region = guessAwsRegion('s3.amazonaws.com')
|
||||
|
||||
expect(region).toBe('us-east-1')
|
||||
})
|
||||
})
|
||||
@@ -1,9 +0,0 @@
|
||||
import path from 'path'
|
||||
|
||||
const { resolve } = path.posix
|
||||
|
||||
// normalize the path:
|
||||
// - does not contains `.` or `..` (cannot escape root dir)
|
||||
// - always starts with `/`
|
||||
const normalizePath = path => resolve('/', path)
|
||||
export { normalizePath as default }
|
||||
@@ -1,18 +1,20 @@
|
||||
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
|
||||
import assert from 'assert'
|
||||
import getStream from 'get-stream'
|
||||
import path, { basename } from 'path'
|
||||
import { coalesceCalls } from '@vates/coalesce-calls'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
|
||||
import { limitConcurrency } from 'limit-concurrency-decorator'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
import { pipeline } from 'stream'
|
||||
import { randomBytes } from 'crypto'
|
||||
import { randomBytes, randomUUID } from 'crypto'
|
||||
import { synchronized } from 'decorator-synchronized'
|
||||
|
||||
import normalizePath from './_normalizePath'
|
||||
import { basename, dirname, normalize as normalizePath } from './path'
|
||||
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
|
||||
import { _getEncryptor } from './_encryptor'
|
||||
|
||||
const { dirname } = path.posix
|
||||
const { info, warn } = createLogger('@xen-orchestra:fs')
|
||||
|
||||
const checksumFile = file => file + '.checksum'
|
||||
const computeRate = (hrtime, size) => {
|
||||
@@ -23,6 +25,9 @@ const computeRate = (hrtime, size) => {
|
||||
const DEFAULT_TIMEOUT = 6e5 // 10 min
|
||||
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
|
||||
|
||||
const ENCRYPTION_DESC_FILENAME = 'encryption.json'
|
||||
const ENCRYPTION_METADATA_FILENAME = 'metadata.json'
|
||||
|
||||
const ignoreEnoent = error => {
|
||||
if (error == null || error.code !== 'ENOENT') {
|
||||
throw error
|
||||
@@ -63,6 +68,7 @@ class PrefixWrapper {
|
||||
}
|
||||
|
||||
export default class RemoteHandlerAbstract {
|
||||
_encryptor
|
||||
constructor(remote, options = {}) {
|
||||
if (remote.url === 'test://') {
|
||||
this._remote = remote
|
||||
@@ -73,6 +79,7 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
;({ highWaterMark: this._highWaterMark, timeout: this._timeout = DEFAULT_TIMEOUT } = options)
|
||||
this._encryptor = _getEncryptor(this._remote.encryptionKey)
|
||||
|
||||
const sharedLimit = limitConcurrency(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
|
||||
this.closeFile = sharedLimit(this.closeFile)
|
||||
@@ -111,90 +118,51 @@ export default class RemoteHandlerAbstract {
|
||||
await this.__closeFile(fd)
|
||||
}
|
||||
|
||||
// TODO: remove method
|
||||
async createOutputStream(file, { checksum = false, dirMode, ...options } = {}) {
|
||||
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
|
||||
if (options.end !== undefined || options.start !== undefined) {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
|
||||
}
|
||||
if (typeof file === 'string') {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout.call(
|
||||
this._createOutputStream(file, {
|
||||
dirMode,
|
||||
flags: 'wx',
|
||||
...options,
|
||||
}),
|
||||
|
||||
let stream = await timeout.call(
|
||||
this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }),
|
||||
this._timeout
|
||||
)
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
}
|
||||
// detect early errors
|
||||
await fromEvent(stream, 'readable')
|
||||
|
||||
const checksumStream = createChecksumStream()
|
||||
const forwardError = error => {
|
||||
checksumStream.emit('error', error)
|
||||
}
|
||||
if (checksum) {
|
||||
try {
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const checksum = await this._readFile(checksumFile(path), { flags: 'r' })
|
||||
|
||||
const stream = await streamP
|
||||
stream.on('error', forwardError)
|
||||
checksumStream.pipe(stream)
|
||||
|
||||
checksumStream.checksumWritten = checksumStream.checksum
|
||||
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
|
||||
.catch(forwardError)
|
||||
|
||||
return checksumStream
|
||||
}
|
||||
|
||||
createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
|
||||
if (typeof file === 'string') {
|
||||
file = normalizePath(file)
|
||||
}
|
||||
const path = typeof file === 'string' ? file : file.path
|
||||
const streamP = timeout
|
||||
.call(this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }), this._timeout)
|
||||
.then(stream => {
|
||||
// detect early errors
|
||||
let promise = fromEvent(stream, 'readable')
|
||||
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
|
||||
promise = Promise.all([
|
||||
promise,
|
||||
ignoreErrors.call(
|
||||
this._getSize(file).then(size => {
|
||||
stream.length = size
|
||||
})
|
||||
),
|
||||
])
|
||||
const { length } = stream
|
||||
stream = validChecksumOfReadStream(stream, String(checksum).trim())
|
||||
stream.length = length
|
||||
} catch (error) {
|
||||
if (!(ignoreMissingChecksum && error.code === 'ENOENT')) {
|
||||
throw error
|
||||
}
|
||||
|
||||
return promise.then(() => stream)
|
||||
})
|
||||
|
||||
if (!checksum) {
|
||||
return streamP
|
||||
}
|
||||
|
||||
// avoid a unhandled rejection warning
|
||||
ignoreErrors.call(streamP)
|
||||
|
||||
return this._readFile(checksumFile(path), { flags: 'r' }).then(
|
||||
checksum =>
|
||||
streamP.then(stream => {
|
||||
const { length } = stream
|
||||
stream = validChecksumOfReadStream(stream, String(checksum).trim())
|
||||
stream.length = length
|
||||
|
||||
return stream
|
||||
}),
|
||||
error => {
|
||||
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
|
||||
return streamP
|
||||
}
|
||||
throw error
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
if (this.isEncrypted) {
|
||||
stream = this._encryptor.decryptStream(stream)
|
||||
} else {
|
||||
// try to add the length prop if missing and not a range stream
|
||||
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
|
||||
try {
|
||||
stream.length = await this._getSize(file)
|
||||
} catch (error) {
|
||||
// ignore errors
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stream
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -210,6 +178,8 @@ export default class RemoteHandlerAbstract {
|
||||
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
|
||||
path = normalizePath(path)
|
||||
let checksumStream
|
||||
|
||||
input = this._encryptor.encryptStream(input)
|
||||
if (checksum) {
|
||||
checksumStream = createChecksumStream()
|
||||
pipeline(input, checksumStream, noop)
|
||||
@@ -220,6 +190,8 @@ export default class RemoteHandlerAbstract {
|
||||
validator,
|
||||
})
|
||||
if (checksum) {
|
||||
// using _outpuFile means the checksum will NOT be encrypted
|
||||
// it is by design to allow checking of encrypted files without the key
|
||||
await this._outputFile(checksumFile(path), await checksumStream.checksum, { dirMode, flags: 'wx' })
|
||||
}
|
||||
}
|
||||
@@ -239,8 +211,13 @@ export default class RemoteHandlerAbstract {
|
||||
return timeout.call(this._getInfo(), this._timeout)
|
||||
}
|
||||
|
||||
// when using encryption, the file size is aligned with the encryption block size ( 16 bytes )
|
||||
// that means that the size will be 1 to 16 bytes more than the content size + the initialized vector length (16 bytes)
|
||||
async getSize(file) {
|
||||
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
|
||||
|
||||
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
|
||||
return size - this._encryptor.ivLength
|
||||
}
|
||||
|
||||
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
|
||||
@@ -286,15 +263,18 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
|
||||
await this._outputFile(normalizePath(file), data, { dirMode, flags })
|
||||
const encryptedData = this._encryptor.encryptData(data)
|
||||
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
|
||||
}
|
||||
|
||||
async read(file, buffer, position) {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't read part of an encrypted file ${file}`)
|
||||
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async readFile(file, { flags = 'r' } = {}) {
|
||||
return this._readFile(normalizePath(file), { flags })
|
||||
const data = await this._readFile(normalizePath(file), { flags })
|
||||
return this._encryptor.decryptData(data)
|
||||
}
|
||||
|
||||
async rename(oldPath, newPath, { checksum = false } = {}) {
|
||||
@@ -334,6 +314,61 @@ export default class RemoteHandlerAbstract {
|
||||
@synchronized()
|
||||
async sync() {
|
||||
await this._sync()
|
||||
try {
|
||||
await this._checkMetadata()
|
||||
} catch (error) {
|
||||
await this._forget()
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async _canWriteMetadata() {
|
||||
const list = await this.list('/', {
|
||||
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
|
||||
})
|
||||
return list.length === 0
|
||||
}
|
||||
|
||||
async _createMetadata() {
|
||||
await Promise.all([
|
||||
this._writeFile(
|
||||
normalizePath(ENCRYPTION_DESC_FILENAME),
|
||||
JSON.stringify({ algorithm: this._encryptor.algorithm }),
|
||||
{
|
||||
flags: 'w',
|
||||
}
|
||||
), // not encrypted
|
||||
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
|
||||
])
|
||||
}
|
||||
|
||||
async _checkMetadata() {
|
||||
try {
|
||||
// this file is not encrypted
|
||||
const data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME))
|
||||
JSON.parse(data)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// this file is encrypted
|
||||
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME)
|
||||
JSON.parse(data)
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT' || (await this._canWriteMetadata())) {
|
||||
info('will update metadata of this remote')
|
||||
return this._createMetadata()
|
||||
}
|
||||
warn(
|
||||
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
|
||||
{ error }
|
||||
)
|
||||
// will probably send a ERR_OSSL_EVP_BAD_DECRYPT if key is incorrect
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
async test() {
|
||||
@@ -360,11 +395,12 @@ export default class RemoteHandlerAbstract {
|
||||
readRate: computeRate(readDuration, SIZE),
|
||||
}
|
||||
} catch (error) {
|
||||
warn(`error while testing the remote at step ${step}`, { error })
|
||||
return {
|
||||
success: false,
|
||||
step,
|
||||
file: testFileName,
|
||||
error: error.message || String(error),
|
||||
error,
|
||||
}
|
||||
} finally {
|
||||
ignoreErrors.call(this._unlink(testFileName))
|
||||
@@ -386,11 +422,13 @@ export default class RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async write(file, buffer, position) {
|
||||
assert.strictEqual(this.isEncrypted, false, `Can't write part of a file with encryption ${file}`)
|
||||
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
|
||||
}
|
||||
|
||||
async writeFile(file, data, { flags = 'wx' } = {}) {
|
||||
await this._writeFile(normalizePath(file), data, { flags })
|
||||
const encryptedData = this._encryptor.encryptData(data)
|
||||
await this._writeFile(normalizePath(file), encryptedData, { flags })
|
||||
}
|
||||
|
||||
// Methods that can be called by private methods to avoid parallel limit on public methods
|
||||
@@ -423,6 +461,10 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
// Methods that can be implemented by inheriting classes
|
||||
|
||||
useVhdDirectory() {
|
||||
return this._remote.useVhdDirectory ?? false
|
||||
}
|
||||
|
||||
async _closeFile(fd) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
@@ -505,9 +547,13 @@ export default class RemoteHandlerAbstract {
|
||||
|
||||
async _outputStream(path, input, { dirMode, validator }) {
|
||||
const tmpPath = `${dirname(path)}/.${basename(path)}`
|
||||
const output = await this.createOutputStream(tmpPath, {
|
||||
dirMode,
|
||||
})
|
||||
const output = await timeout.call(
|
||||
this._createOutputStream(tmpPath, {
|
||||
dirMode,
|
||||
flags: 'wx',
|
||||
}),
|
||||
this._timeout
|
||||
)
|
||||
try {
|
||||
await fromCallback(pipeline, input, output)
|
||||
if (validator !== undefined) {
|
||||
@@ -551,7 +597,9 @@ export default class RemoteHandlerAbstract {
|
||||
const files = await this._list(dir)
|
||||
await asyncMapSettled(files, file =>
|
||||
this._unlink(`${dir}/${file}`).catch(error => {
|
||||
if (error.code === 'EISDIR') {
|
||||
// Unlink dir behavior is not consistent across platforms
|
||||
// https://github.com/nodejs/node-v0.x-archive/issues/5791
|
||||
if (error.code === 'EISDIR' || error.code === 'EPERM') {
|
||||
return this._rmtree(`${dir}/${file}`)
|
||||
}
|
||||
throw error
|
||||
@@ -588,6 +636,10 @@ export default class RemoteHandlerAbstract {
|
||||
async _writeFile(file, data, options) {
|
||||
throw new Error('Not implemented')
|
||||
}
|
||||
|
||||
get isEncrypted() {
|
||||
return this._encryptor.id !== 'NULL_ENCRYPTOR'
|
||||
}
|
||||
}
|
||||
|
||||
function createPrefixWrapperMethods() {
|
||||
|
||||
@@ -30,18 +30,6 @@ describe('closeFile()', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('createOutputStream()', () => {
|
||||
it(`throws in case of timeout`, async () => {
|
||||
const testHandler = new TestHandler({
|
||||
createOutputStream: () => new Promise(() => {}),
|
||||
})
|
||||
|
||||
const promise = testHandler.createOutputStream('File')
|
||||
jest.advanceTimersByTime(TIMEOUT)
|
||||
await expect(promise).rejects.toThrowError(TimeoutError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('getInfo()', () => {
|
||||
it('throws in case of timeout', async () => {
|
||||
const testHandler = new TestHandler({
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
/* eslint-env jest */
|
||||
|
||||
import 'dotenv/config'
|
||||
import asyncIteratorToStream from 'async-iterator-to-stream'
|
||||
import { forOwn, random } from 'lodash'
|
||||
import { fromCallback } from 'promise-toolbox'
|
||||
import { pipeline } from 'readable-stream'
|
||||
import { tmpdir } from 'os'
|
||||
|
||||
import { getHandler } from '.'
|
||||
@@ -27,9 +24,6 @@ const unsecureRandomBytes = n => {
|
||||
|
||||
const TEST_DATA_LEN = 1024
|
||||
const TEST_DATA = unsecureRandomBytes(TEST_DATA_LEN)
|
||||
const createTestDataStream = asyncIteratorToStream(function* () {
|
||||
yield TEST_DATA
|
||||
})
|
||||
|
||||
const rejectionOf = p =>
|
||||
p.then(
|
||||
@@ -82,14 +76,6 @@ handlers.forEach(url => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('#createOutputStream()', () => {
|
||||
it('creates parent dir if missing', async () => {
|
||||
const stream = await handler.createOutputStream('dir/file')
|
||||
await fromCallback(pipeline, createTestDataStream(), stream)
|
||||
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
|
||||
})
|
||||
})
|
||||
|
||||
describe('#getInfo()', () => {
|
||||
let info
|
||||
beforeAll(async () => {
|
||||
|
||||
@@ -5,7 +5,6 @@ import RemoteHandlerLocal from './local'
|
||||
import RemoteHandlerNfs from './nfs'
|
||||
import RemoteHandlerS3 from './s3'
|
||||
import RemoteHandlerSmb from './smb'
|
||||
import RemoteHandlerSmbMount from './smb-mount'
|
||||
|
||||
const HANDLERS = {
|
||||
file: RemoteHandlerLocal,
|
||||
@@ -15,10 +14,8 @@ const HANDLERS = {
|
||||
|
||||
try {
|
||||
execa.sync('mount.cifs', ['-V'])
|
||||
HANDLERS.smb = RemoteHandlerSmbMount
|
||||
} catch (_) {
|
||||
HANDLERS.smb = RemoteHandlerSmb
|
||||
}
|
||||
} catch (_) {}
|
||||
|
||||
export const getHandler = (remote, ...rest) => {
|
||||
const Handler = HANDLERS[parse(remote.url).type]
|
||||
|
||||
@@ -1,13 +1,35 @@
|
||||
import df from '@sindresorhus/df'
|
||||
import fs from 'fs-extra'
|
||||
import identity from 'lodash/identity.js'
|
||||
import lockfile from 'proper-lockfile'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { fromEvent, retry } from 'promise-toolbox'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
|
||||
const { info, warn } = createLogger('xo:fs:local')
|
||||
|
||||
// save current stack trace and add it to any rejected error
|
||||
//
|
||||
// This is especially useful when the resolution is separate from the initial
|
||||
// call, which is often the case with RPC libs.
|
||||
//
|
||||
// There is a perf impact and it should be avoided in production.
|
||||
async function addSyncStackTrace(promise) {
|
||||
const stackContainer = new Error()
|
||||
try {
|
||||
return await promise
|
||||
} catch (error) {
|
||||
error.stack = stackContainer.stack
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
constructor(remote, opts = {}) {
|
||||
super(remote)
|
||||
|
||||
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : identity
|
||||
this._retriesOnEagain = {
|
||||
delay: 1e3,
|
||||
retries: 9,
|
||||
@@ -30,17 +52,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _closeFile(fd) {
|
||||
return fs.close(fd)
|
||||
return this._addSyncStackTrace(fs.close(fd))
|
||||
}
|
||||
|
||||
async _copy(oldPath, newPath) {
|
||||
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
return this._addSyncStackTrace(fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath)))
|
||||
}
|
||||
|
||||
async _createReadStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
const stream = fs.createReadStream(this._getFilePath(file), options)
|
||||
await fromEvent(stream, 'open')
|
||||
await this._addSyncStackTrace(fromEvent(stream, 'open'))
|
||||
return stream
|
||||
}
|
||||
return fs.createReadStream('', {
|
||||
@@ -53,7 +75,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
async _createWriteStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
const stream = fs.createWriteStream(this._getFilePath(file), options)
|
||||
await fromEvent(stream, 'open')
|
||||
await this._addSyncStackTrace(fromEvent(stream, 'open'))
|
||||
return stream
|
||||
}
|
||||
return fs.createWriteStream('', {
|
||||
@@ -79,71 +101,93 @@ export default class LocalHandler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
|
||||
const stats = await this._addSyncStackTrace(fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path)))
|
||||
return stats.size
|
||||
}
|
||||
|
||||
async _list(dir) {
|
||||
return fs.readdir(this._getFilePath(dir))
|
||||
return this._addSyncStackTrace(fs.readdir(this._getFilePath(dir)))
|
||||
}
|
||||
|
||||
_lock(path) {
|
||||
return lockfile.lock(this._getFilePath(path))
|
||||
async _lock(path) {
|
||||
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
|
||||
async onCompromised(error) {
|
||||
warn('lock compromised', { error })
|
||||
try {
|
||||
release = await acquire()
|
||||
info('compromised lock was reacquired')
|
||||
} catch (error) {
|
||||
warn('compromised lock could not be reacquired', { error })
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
let release = await acquire()
|
||||
|
||||
return async () => {
|
||||
try {
|
||||
await release()
|
||||
} catch (error) {
|
||||
warn('lock could not be released', { error })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_mkdir(dir, { mode }) {
|
||||
return fs.mkdir(this._getFilePath(dir), { mode })
|
||||
return this._addSyncStackTrace(fs.mkdir(this._getFilePath(dir), { mode }))
|
||||
}
|
||||
|
||||
async _openFile(path, flags) {
|
||||
return fs.open(this._getFilePath(path), flags)
|
||||
return this._addSyncStackTrace(fs.open(this._getFilePath(path), flags))
|
||||
}
|
||||
|
||||
async _read(file, buffer, position) {
|
||||
const needsClose = typeof file === 'string'
|
||||
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
|
||||
file = needsClose ? await this._addSyncStackTrace(fs.open(this._getFilePath(file), 'r')) : file.fd
|
||||
try {
|
||||
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
|
||||
return await this._addSyncStackTrace(
|
||||
fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
|
||||
)
|
||||
} finally {
|
||||
if (needsClose) {
|
||||
await fs.close(file)
|
||||
await this._addSyncStackTrace(fs.close(file))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _readFile(file, options) {
|
||||
const filePath = this._getFilePath(file)
|
||||
return await retry(() => fs.readFile(filePath, options), this._retriesOnEagain)
|
||||
return await this._addSyncStackTrace(retry(() => fs.readFile(filePath, options), this._retriesOnEagain))
|
||||
}
|
||||
|
||||
async _rename(oldPath, newPath) {
|
||||
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
|
||||
return this._addSyncStackTrace(fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath)))
|
||||
}
|
||||
|
||||
async _rmdir(dir) {
|
||||
return fs.rmdir(this._getFilePath(dir))
|
||||
return this._addSyncStackTrace(fs.rmdir(this._getFilePath(dir)))
|
||||
}
|
||||
|
||||
async _sync() {
|
||||
const path = this._getRealPath('/')
|
||||
await fs.ensureDir(path)
|
||||
await fs.access(path, fs.R_OK | fs.W_OK)
|
||||
await this._addSyncStackTrace(fs.ensureDir(path))
|
||||
await this._addSyncStackTrace(fs.access(path, fs.R_OK | fs.W_OK))
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return fs.truncate(this._getFilePath(file), len)
|
||||
return this._addSyncStackTrace(fs.truncate(this._getFilePath(file), len))
|
||||
}
|
||||
|
||||
async _unlink(file) {
|
||||
const filePath = this._getFilePath(file)
|
||||
return await retry(() => fs.unlink(filePath), this._retriesOnEagain)
|
||||
return await this._addSyncStackTrace(retry(() => fs.unlink(filePath), this._retriesOnEagain))
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return fs.write(file.fd, buffer, 0, buffer.length, position)
|
||||
return this._addSyncStackTrace(fs.write(file.fd, buffer, 0, buffer.length, position))
|
||||
}
|
||||
|
||||
_writeFile(file, data, { flags }) {
|
||||
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
|
||||
return this._addSyncStackTrace(fs.writeFile(this._getFilePath(file), data, { flag: flags }))
|
||||
}
|
||||
}
|
||||
|
||||
24
@xen-orchestra/fs/src/path.js
Normal file
24
@xen-orchestra/fs/src/path.js
Normal file
@@ -0,0 +1,24 @@
|
||||
import path from 'path'
|
||||
|
||||
const { basename, dirname, join, resolve, relative, sep } = path.posix
|
||||
|
||||
export { basename, dirname, join }
|
||||
|
||||
// normalize the path:
|
||||
// - does not contains `.` or `..` (cannot escape root dir)
|
||||
// - always starts with `/`
|
||||
// - no trailing slash (expect for root)
|
||||
// - no duplicate slashes
|
||||
export const normalize = path => resolve('/', path)
|
||||
|
||||
export function split(path) {
|
||||
const parts = normalize(path).split(sep)
|
||||
|
||||
// remove first (empty) entry
|
||||
parts.shift()
|
||||
|
||||
return parts
|
||||
}
|
||||
|
||||
export const relativeFromFile = (file, path) => relative(dirname(file), path)
|
||||
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
|
||||
@@ -1,13 +1,33 @@
|
||||
import aws from '@sullux/aws-sdk'
|
||||
import {
|
||||
AbortMultipartUploadCommand,
|
||||
CompleteMultipartUploadCommand,
|
||||
CopyObjectCommand,
|
||||
CreateMultipartUploadCommand,
|
||||
DeleteObjectCommand,
|
||||
GetObjectCommand,
|
||||
HeadObjectCommand,
|
||||
ListObjectsV2Command,
|
||||
PutObjectCommand,
|
||||
S3Client,
|
||||
UploadPartCommand,
|
||||
UploadPartCopyCommand,
|
||||
} from '@aws-sdk/client-s3'
|
||||
import { Upload } from '@aws-sdk/lib-storage'
|
||||
import { NodeHttpHandler } from '@aws-sdk/node-http-handler'
|
||||
import { getApplyMd5BodyChecksumPlugin } from '@aws-sdk/middleware-apply-body-checksum'
|
||||
import assert from 'assert'
|
||||
import http from 'http'
|
||||
import https from 'https'
|
||||
import { Agent as HttpAgent } from 'http'
|
||||
import { Agent as HttpsAgent } from 'https'
|
||||
import pRetry from 'promise-toolbox/retry'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateWith } from '@vates/decorate-with'
|
||||
import { PassThrough, pipeline } from 'stream'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import copyStreamToBuffer from './_copyStreamToBuffer.js'
|
||||
import createBufferFromStream from './_createBufferFromStream.js'
|
||||
import guessAwsRegion from './_guessAwsRegion.js'
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import { basename, join, split } from './path'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
|
||||
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
|
||||
@@ -24,78 +44,115 @@ const { warn } = createLogger('xo:fs:s3')
|
||||
export default class S3Handler extends RemoteHandlerAbstract {
|
||||
constructor(remote, _opts) {
|
||||
super(remote)
|
||||
const { allowUnauthorized, host, path, username, password, protocol, region } = parse(remote.url)
|
||||
const params = {
|
||||
accessKeyId: username,
|
||||
const {
|
||||
allowUnauthorized,
|
||||
host,
|
||||
path,
|
||||
username,
|
||||
password,
|
||||
protocol,
|
||||
region = guessAwsRegion(host),
|
||||
} = parse(remote.url)
|
||||
|
||||
this._s3 = new S3Client({
|
||||
apiVersion: '2006-03-01',
|
||||
endpoint: host,
|
||||
s3ForcePathStyle: true,
|
||||
secretAccessKey: password,
|
||||
signatureVersion: 'v4',
|
||||
httpOptions: {
|
||||
timeout: 600000,
|
||||
endpoint: `${protocol}://${host}`,
|
||||
forcePathStyle: true,
|
||||
credentials: {
|
||||
accessKeyId: username,
|
||||
secretAccessKey: password,
|
||||
},
|
||||
}
|
||||
if (protocol === 'http') {
|
||||
params.httpOptions.agent = new http.Agent({ keepAlive: true })
|
||||
params.sslEnabled = false
|
||||
} else if (protocol === 'https') {
|
||||
params.httpOptions.agent = new https.Agent({
|
||||
rejectUnauthorized: !allowUnauthorized,
|
||||
keepAlive: true,
|
||||
})
|
||||
}
|
||||
if (region !== undefined) {
|
||||
params.region = region
|
||||
}
|
||||
tls: protocol === 'https',
|
||||
region,
|
||||
requestHandler: new NodeHttpHandler({
|
||||
socketTimeout: 600000,
|
||||
httpAgent: new HttpAgent({
|
||||
keepAlive: true,
|
||||
}),
|
||||
httpsAgent: new HttpsAgent({
|
||||
rejectUnauthorized: !allowUnauthorized,
|
||||
keepAlive: true,
|
||||
}),
|
||||
}),
|
||||
})
|
||||
|
||||
this._s3 = aws(params).s3
|
||||
// Workaround for https://github.com/aws/aws-sdk-js-v3/issues/2673
|
||||
this._s3.middlewareStack.use(getApplyMd5BodyChecksumPlugin(this._s3.config))
|
||||
|
||||
const splitPath = path.split('/').filter(s => s.length)
|
||||
this._bucket = splitPath.shift()
|
||||
this._dir = splitPath.join('/')
|
||||
const parts = split(path)
|
||||
this._bucket = parts.shift()
|
||||
this._dir = join(...parts)
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 's3'
|
||||
}
|
||||
|
||||
_makeCopySource(path) {
|
||||
return join(this._bucket, this._dir, path)
|
||||
}
|
||||
|
||||
_makeKey(file) {
|
||||
return join(this._dir, file)
|
||||
}
|
||||
|
||||
_makePrefix(dir) {
|
||||
const prefix = join(this._dir, dir, '/')
|
||||
|
||||
// no prefix for root
|
||||
if (prefix !== './') {
|
||||
return prefix
|
||||
}
|
||||
}
|
||||
|
||||
_createParams(file) {
|
||||
return { Bucket: this._bucket, Key: this._dir + file }
|
||||
return { Bucket: this._bucket, Key: this._makeKey(file) }
|
||||
}
|
||||
|
||||
async _multipartCopy(oldPath, newPath) {
|
||||
const size = await this._getSize(oldPath)
|
||||
const CopySource = `/${this._bucket}/${this._dir}${oldPath}`
|
||||
const multipartParams = await this._s3.createMultipartUpload({ ...this._createParams(newPath) })
|
||||
const param2 = { ...multipartParams, CopySource }
|
||||
const CopySource = this._makeCopySource(oldPath)
|
||||
const multipartParams = await this._s3.send(new CreateMultipartUploadCommand({ ...this._createParams(newPath) }))
|
||||
try {
|
||||
const parts = []
|
||||
let start = 0
|
||||
while (start < size) {
|
||||
const range = `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`
|
||||
const partParams = { ...param2, PartNumber: parts.length + 1, CopySourceRange: range }
|
||||
const upload = await this._s3.uploadPartCopy(partParams)
|
||||
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partParams.PartNumber })
|
||||
const partNumber = parts.length + 1
|
||||
const upload = await this._s3.send(
|
||||
new UploadPartCopyCommand({
|
||||
...multipartParams,
|
||||
CopySource,
|
||||
CopySourceRange: `bytes=${start}-${Math.min(start + MAX_PART_SIZE, size) - 1}`,
|
||||
PartNumber: partNumber,
|
||||
})
|
||||
)
|
||||
parts.push({ ETag: upload.CopyPartResult.ETag, PartNumber: partNumber })
|
||||
start += MAX_PART_SIZE
|
||||
}
|
||||
await this._s3.completeMultipartUpload({ ...multipartParams, MultipartUpload: { Parts: parts } })
|
||||
await this._s3.send(
|
||||
new CompleteMultipartUploadCommand({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
)
|
||||
} catch (e) {
|
||||
await this._s3.abortMultipartUpload(multipartParams)
|
||||
await this._s3.send(new AbortMultipartUploadCommand(multipartParams))
|
||||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
async _copy(oldPath, newPath) {
|
||||
const CopySource = `/${this._bucket}/${this._dir}${oldPath}`
|
||||
const CopySource = this._makeCopySource(oldPath)
|
||||
try {
|
||||
await this._s3.copyObject({
|
||||
...this._createParams(newPath),
|
||||
CopySource,
|
||||
})
|
||||
await this._s3.send(
|
||||
new CopyObjectCommand({
|
||||
...this._createParams(newPath),
|
||||
CopySource,
|
||||
})
|
||||
)
|
||||
} catch (e) {
|
||||
// object > 5GB must be copied part by part
|
||||
if (e.code === 'EntityTooLarge') {
|
||||
if (e.name === 'EntityTooLarge') {
|
||||
return this._multipartCopy(oldPath, newPath)
|
||||
}
|
||||
throw e
|
||||
@@ -103,20 +160,22 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _isNotEmptyDir(path) {
|
||||
const result = await this._s3.listObjectsV2({
|
||||
Bucket: this._bucket,
|
||||
MaxKeys: 1,
|
||||
Prefix: this._dir + path + '/',
|
||||
})
|
||||
return result.Contents.length !== 0
|
||||
const result = await this._s3.send(
|
||||
new ListObjectsV2Command({
|
||||
Bucket: this._bucket,
|
||||
MaxKeys: 1,
|
||||
Prefix: this._makePrefix(path),
|
||||
})
|
||||
)
|
||||
return result.Contents?.length > 0
|
||||
}
|
||||
|
||||
async _isFile(path) {
|
||||
try {
|
||||
await this._s3.headObject(this._createParams(path))
|
||||
await this._s3.send(new HeadObjectCommand(this._createParams(path)))
|
||||
return true
|
||||
} catch (error) {
|
||||
if (error.code === 'NotFound') {
|
||||
if (error.name === 'NotFound') {
|
||||
return false
|
||||
}
|
||||
throw error
|
||||
@@ -124,13 +183,23 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _outputStream(path, input, { validator }) {
|
||||
await this._s3.upload(
|
||||
{
|
||||
// Workaround for "ReferenceError: ReadableStream is not defined"
|
||||
// https://github.com/aws/aws-sdk-js-v3/issues/2522
|
||||
const Body = new PassThrough()
|
||||
pipeline(input, Body, () => {})
|
||||
|
||||
const upload = new Upload({
|
||||
client: this._s3,
|
||||
queueSize: 1,
|
||||
partSize: IDEAL_FRAGMENT_SIZE,
|
||||
params: {
|
||||
...this._createParams(path),
|
||||
Body: input,
|
||||
Body,
|
||||
},
|
||||
{ partSize: IDEAL_FRAGMENT_SIZE, queueSize: 1 }
|
||||
)
|
||||
})
|
||||
|
||||
await upload.done()
|
||||
|
||||
if (validator !== undefined) {
|
||||
try {
|
||||
await validator.call(this, path)
|
||||
@@ -146,7 +215,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
// https://www.backblaze.com/b2/docs/calling.html#error_handling
|
||||
@decorateWith(pRetry.wrap, {
|
||||
delays: [100, 200, 500, 1000, 2000],
|
||||
when: e => e.code === 'InternalError',
|
||||
when: e => e.$metadata?.httpStatusCode === 500,
|
||||
onRetry(error) {
|
||||
warn('retrying writing file', {
|
||||
attemptNumber: this.attemptNumber,
|
||||
@@ -157,23 +226,31 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
},
|
||||
})
|
||||
async _writeFile(file, data, options) {
|
||||
return this._s3.putObject({ ...this._createParams(file), Body: data })
|
||||
return this._s3.send(
|
||||
new PutObjectCommand({
|
||||
...this._createParams(file),
|
||||
Body: data,
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
async _createReadStream(path, options) {
|
||||
if (!(await this._isFile(path))) {
|
||||
const error = new Error(`ENOENT: no such file '${path}'`)
|
||||
error.code = 'ENOENT'
|
||||
error.path = path
|
||||
throw error
|
||||
try {
|
||||
return (await this._s3.send(new GetObjectCommand(this._createParams(path)))).Body
|
||||
} catch (e) {
|
||||
if (e.name === 'NoSuchKey') {
|
||||
const error = new Error(`ENOENT: no such file '${path}'`)
|
||||
error.code = 'ENOENT'
|
||||
error.path = path
|
||||
throw error
|
||||
}
|
||||
throw e
|
||||
}
|
||||
|
||||
// https://github.com/Sullux/aws-sdk/issues/11
|
||||
return this._s3.getObject.raw(this._createParams(path)).createReadStream()
|
||||
}
|
||||
|
||||
async _unlink(path) {
|
||||
await this._s3.deleteObject(this._createParams(path))
|
||||
await this._s3.send(new DeleteObjectCommand(this._createParams(path)))
|
||||
|
||||
if (await this._isNotEmptyDir(path)) {
|
||||
const error = new Error(`EISDIR: illegal operation on a directory, unlink '${path}'`)
|
||||
error.code = 'EISDIR'
|
||||
@@ -182,39 +259,41 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
}
|
||||
|
||||
async _list(dir) {
|
||||
function splitPath(path) {
|
||||
return path.split('/').filter(d => d.length)
|
||||
}
|
||||
async _list(dir, { delimiter = '/' } = {}) {
|
||||
let NextContinuationToken
|
||||
const uniq = new Set()
|
||||
const Prefix = this._makePrefix(dir)
|
||||
do {
|
||||
const command = {
|
||||
Bucket: this._bucket,
|
||||
Prefix,
|
||||
// will only return path until delimiters
|
||||
ContinuationToken: NextContinuationToken,
|
||||
}
|
||||
if (delimiter !== null) {
|
||||
command.Delimiter = delimiter
|
||||
}
|
||||
const result = await this._s3.send(new ListObjectsV2Command(command))
|
||||
|
||||
const prefix = [this._dir, dir].join('/')
|
||||
const splitPrefix = splitPath(prefix)
|
||||
const result = await this._s3.listObjectsV2({
|
||||
Bucket: this._bucket,
|
||||
Prefix: splitPrefix.join('/') + '/', // need slash at the end with the use of delimiters
|
||||
Delimiter: '/', // will only return path until delimiters
|
||||
})
|
||||
if (result.IsTruncated) {
|
||||
warn(`need pagination to browse the directory ${dir} completely`)
|
||||
NextContinuationToken = result.NextContinuationToken
|
||||
} else {
|
||||
NextContinuationToken = undefined
|
||||
}
|
||||
|
||||
if (result.IsTruncated) {
|
||||
const error = new Error('more than 1000 objects, unsupported in this implementation')
|
||||
error.dir = dir
|
||||
throw error
|
||||
}
|
||||
// subdirectories
|
||||
for (const entry of result.CommonPrefixes ?? []) {
|
||||
uniq.add(basename(entry.Prefix))
|
||||
}
|
||||
|
||||
const uniq = []
|
||||
// files
|
||||
for (const entry of result.Contents ?? []) {
|
||||
uniq.add(delimiter === null ? entry.Key.substr(Prefix.length) : basename(entry.Key))
|
||||
}
|
||||
} while (NextContinuationToken !== undefined)
|
||||
|
||||
// sub directories
|
||||
for (const entry of result.CommonPrefixes) {
|
||||
const line = splitPath(entry.Prefix)
|
||||
uniq.push(line[line.length - 1])
|
||||
}
|
||||
// files
|
||||
for (const entry of result.Contents) {
|
||||
const line = splitPath(entry.Key)
|
||||
uniq.push(line[line.length - 1])
|
||||
}
|
||||
|
||||
return uniq
|
||||
return [...uniq]
|
||||
}
|
||||
|
||||
async _mkdir(path) {
|
||||
@@ -229,15 +308,15 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
|
||||
// s3 doesn't have a rename operation, so copy + delete source
|
||||
async _rename(oldPath, newPath) {
|
||||
await this.copy(oldPath, newPath)
|
||||
await this._s3.deleteObject(this._createParams(oldPath))
|
||||
await this._copy(oldPath, newPath)
|
||||
await this._s3.send(new DeleteObjectCommand(this._createParams(oldPath)))
|
||||
}
|
||||
|
||||
async _getSize(file) {
|
||||
if (typeof file !== 'string') {
|
||||
file = file.fd
|
||||
}
|
||||
const result = await this._s3.headObject(this._createParams(file))
|
||||
const result = await this._s3.send(new HeadObjectCommand(this._createParams(file)))
|
||||
return +result.ContentLength
|
||||
}
|
||||
|
||||
@@ -248,11 +327,11 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
const params = this._createParams(file)
|
||||
params.Range = `bytes=${position}-${position + buffer.length - 1}`
|
||||
try {
|
||||
const result = await this._s3.getObject(params)
|
||||
result.Body.copy(buffer)
|
||||
return { bytesRead: result.Body.length, buffer }
|
||||
const result = await this._s3.send(new GetObjectCommand(params))
|
||||
const bytesRead = await copyStreamToBuffer(result.Body, buffer)
|
||||
return { bytesRead, buffer }
|
||||
} catch (e) {
|
||||
if (e.code === 'NoSuchKey') {
|
||||
if (e.name === 'NoSuchKey') {
|
||||
if (await this._isNotEmptyDir(file)) {
|
||||
const error = new Error(`${file} is a directory`)
|
||||
error.code = 'EISDIR'
|
||||
@@ -279,22 +358,28 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
// @todo : use parallel processing for unlink
|
||||
async _rmtree(path) {
|
||||
let NextContinuationToken
|
||||
const Prefix = this._makePrefix(path)
|
||||
do {
|
||||
const result = await this._s3.listObjectsV2({
|
||||
Bucket: this._bucket,
|
||||
Prefix: this._dir + path + '/',
|
||||
ContinuationToken: NextContinuationToken,
|
||||
})
|
||||
const result = await this._s3.send(
|
||||
new ListObjectsV2Command({
|
||||
Bucket: this._bucket,
|
||||
Prefix,
|
||||
ContinuationToken: NextContinuationToken,
|
||||
})
|
||||
)
|
||||
|
||||
NextContinuationToken = result.IsTruncated ? result.NextContinuationToken : undefined
|
||||
await asyncEach(
|
||||
result.Contents,
|
||||
result.Contents ?? [],
|
||||
async ({ Key }) => {
|
||||
// _unlink will add the prefix, but Key contains everything
|
||||
// also we don't need to check if we delete a directory, since the list only return files
|
||||
await this._s3.deleteObject({
|
||||
Bucket: this._bucket,
|
||||
Key,
|
||||
})
|
||||
await this._s3.send(
|
||||
new DeleteObjectCommand({
|
||||
Bucket: this._bucket,
|
||||
Key,
|
||||
})
|
||||
)
|
||||
},
|
||||
{
|
||||
concurrency: 16,
|
||||
@@ -310,9 +395,9 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
const uploadParams = this._createParams(file)
|
||||
let fileSize
|
||||
try {
|
||||
fileSize = +(await this._s3.headObject(uploadParams)).ContentLength
|
||||
fileSize = +(await this._s3.send(new HeadObjectCommand(uploadParams))).ContentLength
|
||||
} catch (e) {
|
||||
if (e.code === 'NotFound') {
|
||||
if (e.name === 'NotFound') {
|
||||
fileSize = 0
|
||||
} else {
|
||||
throw e
|
||||
@@ -320,10 +405,19 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
if (fileSize < MIN_PART_SIZE) {
|
||||
const resultBuffer = Buffer.alloc(Math.max(fileSize, position + buffer.length))
|
||||
const fileContent = fileSize !== 0 ? (await this._s3.getObject(uploadParams)).Body : Buffer.alloc(0)
|
||||
fileContent.copy(resultBuffer)
|
||||
if (fileSize !== 0) {
|
||||
const result = await this._s3.send(new GetObjectCommand(uploadParams))
|
||||
await copyStreamToBuffer(result.Body, resultBuffer)
|
||||
} else {
|
||||
Buffer.alloc(0).copy(resultBuffer)
|
||||
}
|
||||
buffer.copy(resultBuffer, position)
|
||||
await this._s3.putObject({ ...uploadParams, Body: resultBuffer })
|
||||
await this._s3.send(
|
||||
new PutObjectCommand({
|
||||
...uploadParams,
|
||||
Body: resultBuffer,
|
||||
})
|
||||
)
|
||||
return { buffer, bytesWritten: buffer.length }
|
||||
} else {
|
||||
// using this trick: https://stackoverflow.com/a/38089437/72637
|
||||
@@ -334,10 +428,10 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
// `edit` will always be an upload part
|
||||
// `suffix` will always be sourced from uploadPartCopy()
|
||||
// Then everything will be sliced in 5Gb parts before getting uploaded
|
||||
const multipartParams = await this._s3.createMultipartUpload(uploadParams)
|
||||
const multipartParams = await this._s3.send(new CreateMultipartUploadCommand(uploadParams))
|
||||
const copyMultipartParams = {
|
||||
...multipartParams,
|
||||
CopySource: `/${this._bucket}/${this._dir + file}`,
|
||||
CopySource: this._makeCopySource(file),
|
||||
}
|
||||
try {
|
||||
const parts = []
|
||||
@@ -364,14 +458,20 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
assert.strictEqual(fragmentEnd - prefixPosition <= MAX_PART_SIZE, true)
|
||||
const range = `bytes=${prefixPosition}-${fragmentEnd - 1}`
|
||||
const copyPrefixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: range }
|
||||
const part = await this._s3.uploadPartCopy(copyPrefixParams)
|
||||
const part = await this._s3.send(new UploadPartCopyCommand(copyPrefixParams))
|
||||
parts.push({ ETag: part.CopyPartResult.ETag, PartNumber: copyPrefixParams.PartNumber })
|
||||
prefixPosition += prefixFragmentSize
|
||||
}
|
||||
if (prefixLastFragmentSize) {
|
||||
// grab everything from the prefix that was too small to be copied, download and merge to the edit buffer.
|
||||
const downloadParams = { ...uploadParams, Range: `bytes=${prefixPosition}-${prefixSize - 1}` }
|
||||
const prefixBuffer = prefixSize > 0 ? (await this._s3.getObject(downloadParams)).Body : Buffer.alloc(0)
|
||||
let prefixBuffer
|
||||
if (prefixSize > 0) {
|
||||
const result = await this._s3.send(new GetObjectCommand(downloadParams))
|
||||
prefixBuffer = await createBufferFromStream(result.Body)
|
||||
} else {
|
||||
prefixBuffer = Buffer.alloc(0)
|
||||
}
|
||||
editBuffer = Buffer.concat([prefixBuffer, buffer])
|
||||
editBufferOffset -= prefixLastFragmentSize
|
||||
}
|
||||
@@ -386,11 +486,12 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
hasSuffix = suffixSize > 0
|
||||
const prefixRange = `bytes=${complementOffset}-${complementOffset + complementSize - 1}`
|
||||
const downloadParams = { ...uploadParams, Range: prefixRange }
|
||||
const complementBuffer = (await this._s3.getObject(downloadParams)).Body
|
||||
const result = await this._s3.send(new GetObjectCommand(downloadParams))
|
||||
const complementBuffer = await createBufferFromStream(result.Body)
|
||||
editBuffer = Buffer.concat([editBuffer, complementBuffer])
|
||||
}
|
||||
const editParams = { ...multipartParams, Body: editBuffer, PartNumber: partNumber++ }
|
||||
const editPart = await this._s3.uploadPart(editParams)
|
||||
const editPart = await this._s3.send(new UploadPartCommand(editParams))
|
||||
parts.push({ ETag: editPart.ETag, PartNumber: editParams.PartNumber })
|
||||
if (hasSuffix) {
|
||||
// use ceil because the last fragment can be arbitrarily small.
|
||||
@@ -401,17 +502,19 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
assert.strictEqual(Math.min(fileSize, fragmentEnd) - suffixFragmentOffset <= MAX_PART_SIZE, true)
|
||||
const suffixRange = `bytes=${suffixFragmentOffset}-${Math.min(fileSize, fragmentEnd) - 1}`
|
||||
const copySuffixParams = { ...copyMultipartParams, PartNumber: partNumber++, CopySourceRange: suffixRange }
|
||||
const suffixPart = (await this._s3.uploadPartCopy(copySuffixParams)).CopyPartResult
|
||||
const suffixPart = (await this._s3.send(new UploadPartCopyCommand(copySuffixParams))).CopyPartResult
|
||||
parts.push({ ETag: suffixPart.ETag, PartNumber: copySuffixParams.PartNumber })
|
||||
suffixFragmentOffset = fragmentEnd
|
||||
}
|
||||
}
|
||||
await this._s3.completeMultipartUpload({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
await this._s3.send(
|
||||
new CompleteMultipartUploadCommand({
|
||||
...multipartParams,
|
||||
MultipartUpload: { Parts: parts },
|
||||
})
|
||||
)
|
||||
} catch (e) {
|
||||
await this._s3.abortMultipartUpload(multipartParams)
|
||||
await this._s3.send(new AbortMultipartUploadCommand(multipartParams))
|
||||
throw e
|
||||
}
|
||||
}
|
||||
@@ -422,4 +525,8 @@ export default class S3Handler extends RemoteHandlerAbstract {
|
||||
}
|
||||
|
||||
async _closeFile(fd) {}
|
||||
|
||||
useVhdDirectory() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import MountHandler from './_mount'
|
||||
import normalizePath from './_normalizePath'
|
||||
|
||||
export default class SmbMountHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'cifs',
|
||||
device: '//' + host + normalizePath(path),
|
||||
options: `domain=${domain}`,
|
||||
env: {
|
||||
USER: username,
|
||||
PASSWD: password,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 'smb'
|
||||
}
|
||||
}
|
||||
@@ -1,163 +1,23 @@
|
||||
import Smb2 from '@marsaud/smb2'
|
||||
import { parse } from 'xo-remote-parser'
|
||||
|
||||
import RemoteHandlerAbstract from './abstract'
|
||||
import MountHandler from './_mount'
|
||||
import { normalize } from './path'
|
||||
|
||||
// Normalize the error code for file not found.
|
||||
const wrapError = (error, code) => ({
|
||||
__proto__: error,
|
||||
cause: error,
|
||||
code,
|
||||
})
|
||||
const normalizeError = (error, shouldBeDirectory) => {
|
||||
const { code } = error
|
||||
|
||||
throw code === 'STATUS_DIRECTORY_NOT_EMPTY'
|
||||
? wrapError(error, 'ENOTEMPTY')
|
||||
: code === 'STATUS_FILE_IS_A_DIRECTORY'
|
||||
? wrapError(error, 'EISDIR')
|
||||
: code === 'STATUS_NOT_A_DIRECTORY'
|
||||
? wrapError(error, 'ENOTDIR')
|
||||
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
|
||||
? wrapError(error, 'ENOENT')
|
||||
: code === 'STATUS_OBJECT_NAME_COLLISION'
|
||||
? wrapError(error, 'EEXIST')
|
||||
: code === 'STATUS_NOT_SUPPORTED' || code === 'STATUS_INVALID_PARAMETER'
|
||||
? wrapError(error, shouldBeDirectory ? 'ENOTDIR' : 'EISDIR')
|
||||
: error
|
||||
}
|
||||
const normalizeDirError = error => normalizeError(error, true)
|
||||
|
||||
export default class SmbHandler extends RemoteHandlerAbstract {
|
||||
export default class SmbHandler extends MountHandler {
|
||||
constructor(remote, opts) {
|
||||
super(remote, opts)
|
||||
|
||||
// defined in _sync()
|
||||
this._client = undefined
|
||||
|
||||
const prefix = this._remote.path
|
||||
this._prefix = prefix !== '' ? prefix + '\\' : prefix
|
||||
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
|
||||
super(remote, opts, {
|
||||
type: 'cifs',
|
||||
device: '//' + host + normalize(path),
|
||||
options: `domain=${domain}`,
|
||||
env: {
|
||||
USER: username,
|
||||
PASSWD: password,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
get type() {
|
||||
return 'smb'
|
||||
}
|
||||
|
||||
_getFilePath(file) {
|
||||
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
|
||||
}
|
||||
|
||||
_dirname(file) {
|
||||
const parts = file.split('\\')
|
||||
parts.pop()
|
||||
return parts.join('\\')
|
||||
}
|
||||
|
||||
_closeFile(file) {
|
||||
return this._client.close(file).catch(normalizeError)
|
||||
}
|
||||
|
||||
_createReadStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
file = this._getFilePath(file)
|
||||
} else {
|
||||
options = { autoClose: false, ...options, fd: file.fd }
|
||||
file = ''
|
||||
}
|
||||
return this._client.createReadStream(file, options).catch(normalizeError)
|
||||
}
|
||||
|
||||
_createWriteStream(file, options) {
|
||||
if (typeof file === 'string') {
|
||||
file = this._getFilePath(file)
|
||||
} else {
|
||||
options = { autoClose: false, ...options, fd: file.fd }
|
||||
file = ''
|
||||
}
|
||||
return this._client.createWriteStream(file, options).catch(normalizeError)
|
||||
}
|
||||
|
||||
_forget() {
|
||||
const client = this._client
|
||||
this._client = undefined
|
||||
return client.disconnect()
|
||||
}
|
||||
|
||||
_getSize(file) {
|
||||
return this._client.getSize(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_list(dir) {
|
||||
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
_mkdir(dir, { mode }) {
|
||||
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
// TODO: add flags
|
||||
_openFile(path, flags) {
|
||||
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
|
||||
}
|
||||
|
||||
async _read(file, buffer, position) {
|
||||
const client = this._client
|
||||
const needsClose = typeof file === 'string'
|
||||
file = needsClose ? await client.open(this._getFilePath(file)) : file.fd
|
||||
try {
|
||||
return await client.read(file, buffer, 0, buffer.length, position)
|
||||
} catch (error) {
|
||||
normalizeError(error)
|
||||
} finally {
|
||||
if (needsClose) {
|
||||
await client.close(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_readFile(file, options) {
|
||||
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
|
||||
}
|
||||
|
||||
_rename(oldPath, newPath) {
|
||||
return this._client
|
||||
.rename(this._getFilePath(oldPath), this._getFilePath(newPath), {
|
||||
replace: true,
|
||||
})
|
||||
.catch(normalizeError)
|
||||
}
|
||||
|
||||
_rmdir(dir) {
|
||||
return this._client.rmdir(this._getFilePath(dir)).catch(normalizeDirError)
|
||||
}
|
||||
|
||||
_sync() {
|
||||
const remote = this._remote
|
||||
|
||||
this._client = new Smb2({
|
||||
share: `\\\\${remote.host}`,
|
||||
domain: remote.domain,
|
||||
username: remote.username,
|
||||
password: remote.password,
|
||||
autoCloseTimeout: 0,
|
||||
})
|
||||
|
||||
// Check access (smb2 does not expose connect in public so far...)
|
||||
return this.list('.')
|
||||
}
|
||||
|
||||
_truncate(file, len) {
|
||||
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
|
||||
}
|
||||
|
||||
_unlink(file) {
|
||||
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
|
||||
}
|
||||
|
||||
_writeFd(file, buffer, position) {
|
||||
return this._client.write(file.fd, buffer, 0, buffer.length, position)
|
||||
}
|
||||
|
||||
_writeFile(file, data, options) {
|
||||
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
">2%"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
"node": ">=8.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"lodash": "^4.17.4",
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
'use strict'
|
||||
|
||||
const fromCallback = require('promise-toolbox/fromCallback')
|
||||
const nodemailer = require('nodemailer') // eslint-disable-line n/no-extraneous-import
|
||||
const prettyFormat = require('pretty-format') // eslint-disable-line n/no-extraneous-import
|
||||
const nodemailer = require('nodemailer') // eslint-disable-line n/no-extraneous-require
|
||||
const prettyFormat = require('pretty-format') // eslint-disable-line n/no-extraneous-require
|
||||
|
||||
const { evalTemplate, required } = require('../utils')
|
||||
const { NAMES } = require('../levels')
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user