Compare commits

...

374 Commits

Author SHA1 Message Date
Julien Fontanet
282805966b WiP: feat(xen-api/getCachedRecord): getRecord + cache + events
Fixes #5088
2022-03-01 13:37:03 +01:00
Julien Fontanet
86e390f70f feat(xo-server/vm.import): support from URL 2022-03-01 11:44:16 +01:00
Julien Fontanet
e04352a577 chore: update dev deps 2022-03-01 09:04:28 +01:00
Julien Fontanet
ee4e7620b5 feat(xo-server/sr.getAllUnhealthyVdiChainsLength): memoize 60 seconds
Introduced by 0975863d9
2022-02-28 23:15:02 +01:00
Julien Fontanet
975dc4f314 feat: release 5.68.0 2022-02-28 23:15:02 +01:00
Julien Fontanet
dfff96ace1 feat(xo-server-audit): 0.9.3 2022-02-28 17:37:20 +01:00
Julien Fontanet
f01171bb8b fix(xo-server-audit): ignore sr.getAllUnhealthyVdiChainsLength
Introduced by 0975863d9
2022-02-28 17:36:27 +01:00
Julien Fontanet
8f248f8d14 feat(CHANGELOG): integrate released changes 2022-02-28 17:24:11 +01:00
Julien Fontanet
ae815a80b1 feat(xo-web): 5.94.0 2022-02-28 17:23:43 +01:00
Julien Fontanet
925514b0a3 feat(xo-server): 5.89.0 2022-02-28 17:23:23 +01:00
Julien Fontanet
45639e5a5c feat(@xen-orchestra/proxy): 0.19.0 2022-02-28 17:22:59 +01:00
Julien Fontanet
17ab78f064 feat(@xen-orchestra/backups): 0.20.0 2022-02-28 17:22:27 +01:00
Rajaa.BARHTAOUI
c43a3343d8 fix(xo-server/sr.getAllUnhealthyVdiChainsLength): don't include empty SRs (#6126) 2022-02-28 15:07:33 +01:00
Julien Fontanet
095e40d1dd feat(xo-server/api): logs number of clients 2022-02-28 11:12:40 +01:00
Rajaa.BARHTAOUI
a400ef823f feat(delta backup/restoration): ability to choose SR for each VDI (#6117)
Fixes #4605
Fixes #4016
2022-02-28 09:29:30 +01:00
Julien Fontanet
7684fbd5ec fix(xo-server): add missing @vates/predicates
Introduced by 636025c1b
2022-02-25 14:43:15 +01:00
Julien Fontanet
cd64e8dc28 feat(CHANGELOG): integrate released changes 2022-02-25 13:43:13 +01:00
Julien Fontanet
dbba918b79 feat(xo-web): 5.93.0 2022-02-25 13:40:52 +01:00
Julien Fontanet
88dbe35c4b feat(xo-server): 5.88.0 2022-02-25 13:40:26 +01:00
Julien Fontanet
4a61fedf97 feat(xo-cli): 0.13.0 2022-02-25 13:39:49 +01:00
Julien Fontanet
110181679b feat(@xen-orchestra/proxy): 0.18.1 2022-02-25 13:38:43 +01:00
Julien Fontanet
b0a0092a73 feat(@xen-orchestra/backups): 0.19.1 2022-02-25 13:38:19 +01:00
Julien Fontanet
a6a975ce40 feat(@xen-orchestra/mixins): 0.2.0 2022-02-25 13:36:23 +01:00
Julien Fontanet
a73eb82ac6 feat(@vates/predicates): 1.0.0 2022-02-25 13:33:55 +01:00
Julien Fontanet
702cade0d7 feat(@xen-orchestra/xapi): 0.9.0 2022-02-25 13:33:12 +01:00
Julien Fontanet
367b86bc7a feat(xen-api): 0.36.0 2022-02-25 13:32:14 +01:00
Julien Fontanet
7c5e823805 chore: update deps 2022-02-25 13:18:16 +01:00
Mathieu
0975863d98 feat(xo-server/api/sr, xo-web/dashboard/health): list coalescing VDIs (#6120)
See zammad#5224
2022-02-25 10:26:47 +01:00
Rajaa.BARHTAOUI
6e6886a6ba feat(xo-web/new/sr): creating local SR requires confirmation (#6121) 2022-02-23 23:04:42 +01:00
Julien Fontanet
8bf11d6f6a chore(decorate-with): add tests 2022-02-22 15:26:10 +01:00
Julien Fontanet
6c97ccafd5 chore(xo-server-test/README): regenerate
Introduced by 2ee153509
2022-02-22 12:47:44 +01:00
Julien Fontanet
72ff66ebde chore: enforce strict mode for CJS files 2022-02-22 12:34:41 +01:00
Julien Fontanet
b3d0da7392 feat(eslint): use correct sourceType for scripts and modules 2022-02-22 12:02:57 +01:00
Julien Fontanet
a1c0d82889 fix(docs/from sources): change Node LTS version in example 2022-02-19 14:06:37 +01:00
Julien Fontanet
fbd708d1b5 chore: update dev deps 2022-02-19 14:02:32 +01:00
Julien Fontanet
710ce965a1 feat(gitignore): ignore .nyc_output 2022-02-18 17:24:26 +01:00
Julien Fontanet
2ee1535095 chore(xo-server-test): rename script teststart
By convention, `test` means running tests for the current package, but it's not the case here, `xo-server-test` is a an app (even if dedicated to test `xo-server`'s API) and should be run as such.
2022-02-18 17:20:55 +01:00
Julien Fontanet
471fbef6ef chore: hide USAGE.md
This highlight the fact that it's not designed for direct consumption and it no longer needs a special handling in `npmignore`.
2022-02-18 17:11:52 +01:00
Julien Fontanet
ab63709104 chore(predicates): add tests 2022-02-18 17:03:21 +01:00
Julien Fontanet
0924184358 fix(predicates): use strict mode 2022-02-18 17:03:21 +01:00
Julien Fontanet
6e38cf2bc1 fix(predicates): typo 2022-02-18 17:03:21 +01:00
Pierre Donias
2183422eb5 docs(users/auth-google): update doc with new screens (#6118)
See zammad#5306
2022-02-18 15:49:33 +01:00
Mathieu
3842f5b16d fix(Backup/FileRestore): hide unsupported S3 backups (#6116)
Fixes https://xcp-ng.org/forum/post/46166
2022-02-18 11:07:30 +01:00
Julien Fontanet
1dad6b37ef feat(xo-server): pass backup network address to proxy
Fixes zammad#4836
2022-02-16 18:38:48 +01:00
Julien Fontanet
7023db2264 chore: update dev deps 2022-02-16 14:55:16 +01:00
Julien Fontanet
bf194f5c85 feat(xo-server): add supportFileRestore prop to remote objects
Related to #6116
2022-02-14 14:23:54 +01:00
Julien Fontanet
bcdaa37f8a feat(backups/VmBackup#_callWriters): integrate sub-errors in logs
Fixes zammad#5264
2022-02-14 11:22:32 +01:00
Julien Fontanet
a5f8900d23 chore(CHANGELOG.unreleased): add changes for 3c684c7f4 2022-02-14 11:09:56 +01:00
Julien Fontanet
3c684c7f45 fix(xo-server): use pool.uuid/template.uuid for default templates instead of refs
Fixes https://help.vates.fr/#ticket/zoom/5221

Opaque refs are not persistent and should not be used as ids (e.g. resource sets in database).
2022-02-14 10:15:30 +01:00
Julien Fontanet
cd0f4baa15 fix(xo-server/docs): fix REST API path 2022-02-12 11:33:02 +01:00
Julien Fontanet
58f3050344 feat(xo-server/rest-api): add root collection 2022-02-12 11:32:38 +01:00
Julien Fontanet
ab110bb0df feat(xo-server/rest-api): add hosts, pools & SRs 2022-02-10 17:09:42 +01:00
Julien Fontanet
636025c1b9 chore(xo-server/rest-api): use @vates/predicates 2022-02-10 17:05:55 +01:00
Julien Fontanet
c28fa78963 feat(@vates/predicates): utils to compose predicates 2022-02-10 16:55:15 +01:00
Julien Fontanet
ecb66fb9f3 feat(xo-server): first endpoints of the beta REST API 2022-02-09 23:43:21 +01:00
Julien Fontanet
f8e4192d34 feat(xo-cli): --createToken command 2022-02-09 17:13:18 +01:00
Julien Fontanet
3d15cd57c4 chore: remove pump dep 2022-02-09 14:52:37 +01:00
Julien Fontanet
2e633f0df4 fix(xo-cli): support allowUnauthorized for $sendTo/$getFrom 2022-02-09 14:49:25 +01:00
Julien Fontanet
833b814fd5 fix(xo-cli): dont use deprecated http.resolve 2022-02-09 14:40:47 +01:00
Julien Fontanet
da52d905b6 chore(xo-cli): remove bluebird dep 2022-02-09 14:39:54 +01:00
Julien Fontanet
a2054c24d8 chore(xo-cli): remove mkdirp dep 2022-02-09 14:13:51 +01:00
Julien Fontanet
599db80467 chore: remove l33teral dep 2022-02-09 14:08:24 +01:00
Julien Fontanet
6075a3307e feat(xo-server): announce appliance in pool.other_config (#6115)
It will be used by XO Lite to list available XOs on the pool.
2022-02-09 11:29:01 +01:00
Julien Fontanet
9af06996c0 fix(backups/merge-worker): handle ENAMETOOLONG errors
Related to https://xcp-ng.org/forum/post/46424
2022-02-09 10:18:05 +01:00
Julien Fontanet
72c386cdce fix(backups/merge-worker): ignore missing VM dir
Fixes https://xcp-ng.org/forum/post/46578
2022-02-09 10:11:07 +01:00
Julien Fontanet
f6d4771603 feat(xen-api/barrier): remove stale entries 2022-02-07 16:21:02 +01:00
Julien Fontanet
15d06c591e chore: remove unnecessary strip-indent dep 2022-02-02 12:36:28 +01:00
Julien Fontanet
0a07765027 chore: update deps 2022-02-02 12:35:06 +01:00
Julien Fontanet
81e0d06604 chore: update to eslint-config-standard@-jsx@11.0.0-0
Follow up of 767762064
2022-02-01 10:03:15 +01:00
Julien Fontanet
809314e516 feat: release 5.67.0 2022-01-31 13:41:24 +01:00
Julien Fontanet
cbe37b2ccd feat(xo-server-audit): 0.9.2 2022-01-28 18:19:56 +01:00
Julien Fontanet
14fcbb6b8b chore: format with Prettier 2022-01-28 15:55:35 +01:00
Julien Fontanet
bac976144e chore: lint fixes 2022-01-28 15:55:35 +01:00
Julien Fontanet
7677620645 chore: update to ESLint@8
Brings the (private) class fields support, which is getting more and more used in XO.
2022-01-28 15:55:35 +01:00
Mathieu
c8597bd363 fix(xo-server-audit/getRecords): limit number of records returned (#6113) 2022-01-28 15:46:56 +01:00
Julien Fontanet
899be12418 feat(CHANGELOG): integrate released changes 2022-01-27 14:44:54 +01:00
Julien Fontanet
31975b6737 feat(xo-web): 5.92.0 2022-01-27 14:27:48 +01:00
Julien Fontanet
315549e588 feat(xo-server): 5.87.0 2022-01-27 14:25:20 +01:00
Julien Fontanet
457886d538 feat(xo-server-audit): 0.9.1 2022-01-27 14:25:00 +01:00
Julien Fontanet
8d02b206e0 feat(@xen-orchestra/proxy): 0.18.0 2022-01-27 14:24:23 +01:00
Julien Fontanet
45e5b94b4f feat(xo-vmdk-to-vhd): 2.1.0 2022-01-27 14:23:53 +01:00
Julien Fontanet
ec866b4a61 fix(xo-server-audit): dont log proxy.getApplianceUpdaterState calls
Fix zammad#4753
2022-01-27 14:17:36 +01:00
Mathieu
2e8e2522e5 feat(xo-web/dashboard/health): show pools with no default SR (#6083)
See zammad#4640
2022-01-26 11:20:56 +01:00
Julien Fontanet
96bd46c3a8 fix(xo-web): fix number of VMs when running a backup
Following aa27b3be6
2022-01-25 15:30:58 +01:00
Julien Fontanet
797ed9ac51 fix(xo-server/backups): fix VMs matching
Introduced by aa27b3be6

Fixes https://xcp-ng.org/forum/topic/5473/backup-broken
2022-01-25 15:29:06 +01:00
Julien Fontanet
7089dcddff chore: update dev deps 2022-01-25 09:38:00 +01:00
Julien Fontanet
8ee93dfe9c feat(@xen-orchestra/backups-cli): 0.7.0 2022-01-24 22:35:08 +01:00
Julien Fontanet
88f160820f feat(@xen-orchestra/backups): 0.19.0 2022-01-24 22:34:02 +01:00
Julien Fontanet
b61b3e75c1 feat(vhd-lib): 3.1.0 2022-01-24 22:33:16 +01:00
Julien Fontanet
a1bac51f7b feat(@xen-orchestra/fs): 0.20.0 2022-01-24 22:31:39 +01:00
Julien Fontanet
fa986fb4cb fix(proxy/ReverseProxy): dont 404 non proxy URLs
Introduced by b78a94645
2022-01-24 22:21:18 +01:00
Julien Fontanet
aa27b3be64 fix(backups): dont ignore restored VMs
Fixes zammad#4794

Introduced by cf9f0da6e

Check that the `start` operation is blocked which is the case for replicated VMs but (should) not for restored backups.
2022-01-24 17:11:48 +01:00
Florent Beauchamp
97d94b7952 feat(vhd-lib): merge blocks in parallel for VhdDirectory, throttle mergestate writes 2022-01-24 10:37:34 +01:00
Florent Beauchamp
96eb793298 feat(fs): s3#rmtree parallelize object deletion 2022-01-24 10:37:34 +01:00
Florent Beauchamp
b4f15de7be feat(fs): s3#copy don't use multi part copy for objects smaller than 5GB 2022-01-24 10:37:34 +01:00
Mathieu
ae5726b836 fix(xo-server-audit/generateFingerprint): handle the case when db is empty (#6071)
Fixes #6040
2022-01-21 12:47:56 +01:00
Florent BEAUCHAMP
692e72a78a fix(vhd-lib): fixes asserts on existing merge state (#6099)
Introduced by 5a933bad9
2022-01-21 12:40:45 +01:00
Pierre Donias
ff24364bb6 fix(CHANGELOG): fix and homogenize changelog (#6102) 2022-01-20 15:40:49 +01:00
Florent BEAUCHAMP
b60a1958b6 fix(fs#S3#{list,rmtree}): fix support of more than 1000 files (#6103)
Bug found when working on #6100
2022-01-20 14:31:13 +01:00
Florent Beauchamp
f6a2b505db feat(backups): execute cleanup on each related vm dir after a backup deletion 2022-01-19 10:46:15 +01:00
Florent Beauchamp
38aacdbd7d feat(xo-web): delete all the backups at once and let xo-server handle the cleanup 2022-01-19 10:46:15 +01:00
Florent Beauchamp
089b877cc5 fix(backups): use handler.unlink for deleting metadat instead of VhdAbstract.unlink 2022-01-19 10:46:15 +01:00
Julien Fontanet
81e55dcf77 fix(backups/listAllVmBackups): ignore empty dirs 2022-01-19 10:43:00 +01:00
Julien Fontanet
58dd44bf5d fix(backups/listAllVmBackups): ignore hidden and lock files 2022-01-19 10:41:20 +01:00
Julien Fontanet
3aa6669fd9 chore(vhd-lib): move alias utils to aliases submodule
Introduced in e15be7ebd
2022-01-18 15:33:31 +01:00
Florent BEAUCHAMP
c10601d905 fix(backups/_MixinBackupWriter#afterBackup): execute merge on S3 (#6095)
Introduced by 47f9da216
2022-01-18 11:05:55 +01:00
Florent BEAUCHAMP
e15be7ebd3 fix(backups/_cleanVm): fixes for aliases cleaning (#6094)
Introduced in 249f638495
2022-01-18 10:07:56 +01:00
Julien Fontanet
b465a91cd3 fix(value-matcher/README): __all → __and 2022-01-18 08:58:24 +01:00
Julien Fontanet
f304a46bea fix(vhd-lib/VhdDirectory): missing readChunkFilters in readHeaderAndFooter
Introduced by 249f63849
2022-01-17 10:18:54 +01:00
Pierre Donias
6756faa1cc feat(xo-server,xo-web): disable Load Balancer during Rolling Pool Update (#6089)
Fixes #5711
Follow-up of #6057
2022-01-17 10:08:32 +01:00
Julien Fontanet
73fd7c7d54 fix(backups/_cleanVm): temporary disable aliases checking 2022-01-17 09:52:42 +01:00
Julien Fontanet
60eda9ec69 chore(vhd-lib): remove contentVhdStreamWithLength export from main module 2022-01-16 22:44:41 +01:00
Julien Fontanet
a979c29a15 chore(vhd-lib): remove createReadableRawStream
Use `VhdAbstract#rawContent()` instead.
2022-01-16 22:34:04 +01:00
Julien Fontanet
8f25082917 fix(xo-vmdk-to-vhd): avoid requiring whole vhd-lib
Introduced by 9375b1c8b

Fixes #6093
2022-01-16 22:31:38 +01:00
Nicolas Raynaud
9375b1c8bd feat: support VDI export in VMDK (#5982)
Co-authored-by: Rajaa.BARHTAOUI <rajaa.barhtaoui@gmail.com>
Co-authored-by: Julien Fontanet <julien.fontanet@isonoe.net>
Co-authored-by: Florent BEAUCHAMP <flo850@free.fr>
2022-01-16 18:40:08 +01:00
Julien Fontanet
422a22a767 chore: update dev deps 2022-01-14 14:51:38 +01:00
Florent BEAUCHAMP
249f638495 feat(backups/_cleanVm): check VHD aliases (#6043) 2022-01-13 16:07:28 +01:00
Florent BEAUCHAMP
6cf5e10195 feat(fs/S3#_writeFile): retry on remote internal error (#6086) 2022-01-13 15:46:12 +01:00
Florent BEAUCHAMP
b78a946458 feat(proxy): implement reverse proxies (#6072) 2022-01-13 14:54:10 +01:00
Julien Fontanet
e8a5694d51 feat(backups/_cleanVm): clean orphan mergeState (#6087)
Fixes zammad#4778
2022-01-13 10:41:39 +01:00
Julien Fontanet
514fa72ee2 fix(package.json/jest): vhd-lib no longer has a build step
Introduced by 3a74c71f1
2022-01-12 22:50:49 +01:00
Julien Fontanet
e9ca13aa12 fix(backups/cleanVm): handle zstd-compressed XVAs
Related to zammad#4300
2022-01-12 11:31:09 +01:00
Julien Fontanet
57f1ec6716 chore(backups/_cleanVm/listVhds): make vhds directly a Set 2022-01-11 15:31:56 +01:00
Julien Fontanet
02e32cc9b9 chore(backups/_cleanVm/listVhds): minor simplification
This also removes the incorrect handling of an optional dir in `INTERRUPTED_VHDS_REG`.
2022-01-11 15:09:18 +01:00
Julien Fontanet
902abd5d94 chore: update deps 2022-01-06 13:59:31 +01:00
Julien Fontanet
53380802ec feat(xo-server): limit VM migration concurrency (#6076)
Related to #6065
2022-01-06 09:32:42 +01:00
Julien Fontanet
af5d8d02b6 feat: release 5.66.2 2022-01-05 11:30:29 +01:00
Julien Fontanet
7abba76f03 feat(CHANGELOG): integrate released changes 2022-01-05 10:36:05 +01:00
Julien Fontanet
79b22057d9 feat(xo-web): 5.91.2 2022-01-05 10:34:30 +01:00
Julien Fontanet
366daef718 feat(xo-server): 5.86.3 2022-01-05 10:33:30 +01:00
Julien Fontanet
a5ff0ba799 feat(@xen-orchestra/proxy): 0.17.3 2022-01-05 10:32:42 +01:00
Julien Fontanet
c2c6febb88 feat(@xen-orchestra/backups): 0.18.3 2022-01-05 10:18:02 +01:00
Julien Fontanet
f119c72a7f feat(xo-vmdk-to-vhd): 2.0.3 2022-01-05 10:16:47 +01:00
Julien Fontanet
8aee897d23 feat(vhd-lib): 3.0.0 2022-01-05 10:15:45 +01:00
Florent BEAUCHAMP
729db5c662 fix(backups): race condition in checkBaseVdi preventing delta backup (#6075)
Fixes zammad#4751, zammad#4729, zammad#4665 and zammad#4300
2022-01-05 09:58:06 +01:00
Julien Fontanet
61c46df7bf chore(xo-server): dont pass (unused) httpServer to app 2022-01-03 16:04:18 +01:00
Julien Fontanet
9b1a04338d chore(xo-server): attach express before creating app 2022-01-03 15:46:30 +01:00
Julien Fontanet
d307134d22 chore(xapi/_assertHealthyVdiChain): clearer warnings in case of missing VDI 2021-12-28 18:14:32 +01:00
Julien Fontanet
5bc44363f9 fix(xo-vmdk-to-vhd): fix createReadableSparseStream import
Introduced by 3a74c71f1

Fixes #6068
2021-12-23 23:40:58 +01:00
Julien Fontanet
68c4fac3ab chore: update deps 2021-12-23 13:25:48 +01:00
Julien Fontanet
6ad9245019 feat: release 5.66.1 2021-12-23 13:25:08 +01:00
Julien Fontanet
763cf771fb feat(CHANGELOG): integrate released changes 2021-12-23 12:18:50 +01:00
Julien Fontanet
3160b08637 feat(xo-web): 5.91.1 2021-12-23 12:18:14 +01:00
Julien Fontanet
f8949958a3 feat(xo-server): 5.86.2 2021-12-23 12:17:54 +01:00
Julien Fontanet
8b7ac07d2d feat(@xen-orchestra/proxy): 0.17.2 2021-12-23 12:17:25 +01:00
Julien Fontanet
044df9adba feat(@xen-orchestra/backups): 0.18.2 2021-12-23 12:16:53 +01:00
Julien Fontanet
040139f4cc fix(backups/cleanVm): computeVhdSize can return undefined 2021-12-23 12:09:11 +01:00
Julien Fontanet
7b73bb9df0 chore: format with Prettier 2021-12-23 12:06:11 +01:00
Julien Fontanet
24c8370daa fix(xo-server-test): add missing ESLint config 2021-12-23 11:58:14 +01:00
Julien Fontanet
029c4921d7 fix(backups/RemoteAdapter#isMergeableParent): #useVhdDirectory is a function (#6070)
Fixes zammad#4646
Fixes https://xcp-ng.org/forum/topic/5371/delta-backup-changes-in-5-66

Introduced by 5d605d1bd
2021-12-23 11:57:51 +01:00
Julien Fontanet
3a74c71f1a chore(vhd-lib): remove build step
BREAKING:
- removes `dist/` in the path of sub-modules
- requires Node >=12
2021-12-23 10:31:29 +01:00
Julien Fontanet
6022a1bbaa feat(normalize-packages): delete unused Babel configs 2021-12-23 09:26:00 +01:00
Julien Fontanet
4e88c993f7 chore: update dev deps 2021-12-22 11:07:25 +01:00
Julien Fontanet
c9a61f467c fix(xo-web/Dashboard/Health): handle no default_SR
Fixes zammad#4640

Introduced by 7bacd781c
2021-12-22 10:33:18 +01:00
Julien Fontanet
e6a5f42f63 feat: release 5.66.0 2021-12-21 18:00:39 +01:00
Julien Fontanet
a373823eea feat(xo-server): 5.86.1 2021-12-21 17:58:02 +01:00
Julien Fontanet
b5e010eac8 feat(@xen-orchestra/proxy): 0.17.1 2021-12-21 17:57:47 +01:00
Julien Fontanet
50ffe58655 feat(@xen-orchestra/backups): 0.18.1 2021-12-21 17:56:55 +01:00
Julien Fontanet
07eb3b59b3 feat(@xen-orchestra/mixins): 0.1.2 2021-12-21 17:56:52 +01:00
Julien Fontanet
5177b5e142 chore(backups/RemoteAdapter): remove default value for vhdDirectoryCompression
Introduced by 3c984e21c
2021-12-21 17:51:23 +01:00
Julien Fontanet
3c984e21cd fix({proxy,xo-server}): add backup.vhdDirectoryCompression setting
Introduced by 5d605d1bd
2021-12-21 17:49:43 +01:00
Julien Fontanet
aa2b27e22b fix(mixins/Config#get): fix missing entry error message 2021-12-21 17:37:07 +01:00
Julien Fontanet
14a7f00c90 chore(CHANGELOG): remove non-breakable spaces 2021-12-21 17:31:51 +01:00
Julien Fontanet
56f98601bd feat(CHANGELOG): integrate released changes 2021-12-21 17:24:19 +01:00
Julien Fontanet
027a8c675e feat(@xen-orchestra/proxy): 0.17.0 2021-12-21 17:22:29 +01:00
Julien Fontanet
bdaba9a767 feat(xo-server): 5.86.0 2021-12-21 17:22:07 +01:00
Julien Fontanet
4e9090f60d feat(@xen-orchestra/backups): 0.18.0 2021-12-21 17:21:37 +01:00
Julien Fontanet
73b445d371 feat(xo-vmdk-to-vhd): 2.0.2 2021-12-21 17:21:10 +01:00
Julien Fontanet
75bfc283af feat(vhd-lib): 2.1.0 2021-12-21 17:20:36 +01:00
Julien Fontanet
727de19b89 feat(@xen-orchestra/xapi): 0.8.5 2021-12-21 17:20:06 +01:00
Florent BEAUCHAMP
5d605d1bd7 feat(backups): compress VHDs on S3 (#5932) 2021-12-21 17:18:27 +01:00
Julien Fontanet
ffdd1dfd6f fix(xo-vmdk-to-vhd): avoid requiring whole vhd-lib
This library is used in the browser and a lot of parts of `vhd-lib` are not intended to be used in (or bundled for) the browser.
2021-12-21 17:10:33 +01:00
Julien Fontanet
d45418eb29 fix(backups/cleanVm): metadata.vhds is an object, not an array
Introduced by 93069159d
2021-12-21 16:23:03 +01:00
Julien Fontanet
6ccc9d1ade fix(xapi/VM_create): support NVRAM field (#6062)
Fixes #6054
Fixes https://xcp-ng.org/forum/topic/5319/bug-uefi-boot-parameters-not-preserved-with-delta-backups
2021-12-20 16:30:41 +01:00
Florent BEAUCHAMP
93069159dd fix(backups/cleanVm): don't warn on size change due to merged VHDs (#6010) 2021-12-20 14:57:54 +01:00
Julien Fontanet
8c4780131f feat: release 5.65.3 2021-12-20 10:50:51 +01:00
Julien Fontanet
02ae8bceda fix(backups/cleanVm): dont fail on broken metadata 2021-12-20 09:49:27 +01:00
Julien Fontanet
bb10bbc945 chore(backups/cleanVm): remove deleted files from jsons 2021-12-20 09:46:09 +01:00
Florent BEAUCHAMP
478d88e97f fix(fs/s3#_rmtree): infinite loop (#6067) 2021-12-17 16:01:57 +01:00
Florent BEAUCHAMP
6fb397a729 fix(vhd-lib): parseVhdStream int overflow when rebuilding the bat (#6066)
BAT should contain sector address, not byte address

We were not really rebuilding the BAT, since we were using the data read in the old bat and write it as is in the new one
2021-12-17 14:28:48 +01:00
Julien Fontanet
18dae34778 feat(vhd-lib/parseVhdStream): new public method (#6063)
Extracted from `createVhdDirectoryFromStream`

Co-authored-by: Florent Beauchamp <flo850@free.fr>
2021-12-17 10:08:29 +01:00
Julien Fontanet
243566e936 fix(xen-api): named import for @vates/coalesce-calls
Introduced by 87f4fd675
2021-12-16 14:00:49 +01:00
Julien Fontanet
87f4fd675d fix(xen-api): fix coalesceCalls
Introduced by dec6b59a9
2021-12-16 13:26:31 +01:00
Julien Fontanet
dec6b59a9f chore(xen-api): use @vates/coalesce-calls 2021-12-16 12:03:07 +01:00
Rajaa.BARHTAOUI
e51baedf7f feat: technical release (#6060) 2021-12-16 12:01:57 +01:00
Julien Fontanet
530da14e24 feat(@vates/decorate-with): 1.0.0 2021-12-16 11:49:29 +01:00
Julien Fontanet
02da7c272f feat(decorate-with): perInstance helper 2021-12-16 11:48:48 +01:00
Pierre Donias
a07c5418e9 feat(xo-server,xo-web): disable HA during Rolling Pool Update (#6057)
See #5711
2021-12-16 10:29:13 +01:00
Mathieu
c080db814b feat(xo-web/home/backed up VMs): filter out VMs in disabled backup jobs (#6037)
See xoa-support#4294
2021-12-16 10:06:45 +01:00
Julien Fontanet
3abe13c006 chore(backups/RemoteAdapter#deleteVmBackups): report unsupported backup modes
It was removed in 7e302fd1c
2021-12-16 10:05:08 +01:00
Julien Fontanet
fb331c0a2c fix(backups/RemoteAdapter#deleteVmBackups): dont delete undefined
Fixes https://xcp-ng.org/forum/topic/5331/backup-smart-mode-broken/6
Introduced by 7e302fd1c
2021-12-16 10:03:16 +01:00
Julien Fontanet
19ea78afc5 fix(xo-server): fix job matching for smart mode
Fixes https://xcp-ng.org/forum/topic/5331/backup-smart-mode-broken
Fixes #6058

Introduced by cf9f0da6e

XO VM objects have a `other` field instead of `other_config`.
2021-12-15 23:25:04 +01:00
Julien Fontanet
2096c782e3 feat(xo-server/api): new method backupNg.deleteVmBackups
Related to 7e302fd1c
2021-12-15 17:36:47 +01:00
Julien Fontanet
79a6a8a10c feat(proxy/api): new method backup.deleteVmBackups
Related to 7e302fd1c
2021-12-15 17:27:08 +01:00
Julien Fontanet
5a933bad93 fix(vhd-lib/merge): dont fail on invalid state file
Fixes zammad#4227
2021-12-15 16:36:18 +01:00
Julien Fontanet
7e302fd1cb feat(backups/RemoteAdapter): new method deleteVmBackups()
It's usually best to delete multiple backups at once instead of one by one because it allows some optimizations, for instance when merging unused VHDs.

This was already possible in private methods but not exposed in the public API.
2021-12-15 16:34:35 +01:00
Julien Fontanet
cf9f0da6e5 fix(backups): ignore VMs created by current job
See xoa-support#4271
2021-12-14 12:07:51 +01:00
Pierre Donias
10ac23e265 feat(docs/Netbox): specify minimum required permissions (#6047)
See https://xcp-ng.org/forum/topic/5300/
2021-12-14 11:48:19 +01:00
Rajaa.BARHTAOUI
dc2e1cba1f feat(xo-web/pool,VM/advanced): ability to set suspend SR (#6044)
Fixes #4163
2021-12-14 10:13:11 +01:00
Julien Fontanet
7bfd190c22 fix(backups/_VmBackup): no base VM when no base VDIs found
Introduced by 5b188f35b
2021-12-13 17:55:54 +01:00
Manon Mercier
c3bafeb468 fix(docs/xoa): must reboot after changing password (#6056)
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2021-12-13 17:48:14 +01:00
Mathieu
7bacd781cf feat(xo-web/health): display non shared default SRs (#6033)
Fixes #5871
2021-12-13 10:00:24 +01:00
Julien Fontanet
ee005c3679 fix(xo-server-usage-report): csv-stringify usage
Fixes #6053

Introduced by b179dc1d5
2021-12-13 09:39:00 +01:00
Florent BEAUCHAMP
315f54497a fix(vhd-lib/resolveAlias): limit size (#6004)
Current resolution loads whole file in memory. It can lead to crash is the alias is malformed (for example a full VHD named `.alias.vhd`).
2021-12-12 14:19:40 +01:00
Julien Fontanet
e30233347b feat: release 5.65.2 2021-12-10 17:19:54 +01:00
Julien Fontanet
56d4a7f01e fix(xo-web/about): show commit iff available & always versions
Fixes #6052
2021-12-10 16:40:26 +01:00
Julien Fontanet
7c110eebd8 feat(CHANGELOG): integrate released changes 2021-12-10 12:06:26 +01:00
Julien Fontanet
39394f8c09 feat(@xen-orchestra/proxy): 0.15.5 2021-12-10 12:04:33 +01:00
Julien Fontanet
3283130dfc feat(xo-server): 5.84.3 2021-12-10 12:04:33 +01:00
Julien Fontanet
3146a591d0 feat(@xen-orchestra/backups): 0.16.2 2021-12-10 12:04:30 +01:00
Julien Fontanet
e478b1ec04 feat(vhd-lib): 2.0.3 2021-12-10 12:04:00 +01:00
Julien Fontanet
7bc4d14f46 feat(@xen-orchestra/fs): 0.19.2 2021-12-10 11:38:20 +01:00
Florent BEAUCHAMP
f3eeeef389 fix(fs/S3#list): should not look into all the file tree (#6048) 2021-12-09 14:06:21 +01:00
Florent BEAUCHAMP
8d69208197 fix(backups/MixinBackupWriter#_cleanVm): always returns an object (#6050) 2021-12-09 10:38:31 +01:00
Julien Fontanet
2c689af1a9 fix(backups): add random suffixes to task files to avoid collitions
See https://xcp-ng.org/forum/post/44661
2021-12-08 18:03:13 +01:00
Julien Fontanet
cb2a34c765 feat(backups/RemoteAdapter#cleanVm): show missing VHDs
Related to investigation on zammad#4156
2021-12-08 11:41:18 +01:00
Julien Fontanet
465c8f9009 feat(xo-web/about): show build commit in sources version (#6045) 2021-12-08 09:57:15 +01:00
Florent BEAUCHAMP
8ea4c1c1fd fix(vhd-lib): output parent locator in VhdAbstract#stream (#6035) 2021-12-07 14:14:39 +01:00
Julien Fontanet
ba0f7df9e8 feat(xapi-explore-sr): 0.4.1 2021-12-06 17:59:27 +01:00
Julien Fontanet
e47dd723b0 fix(xapi-explore-sr): add missing @xen-orchestra/defined dep
Introduced by 2412f8b1e
2021-12-06 17:43:47 +01:00
Julien Fontanet
fca6e2f6bf fix(fs/S3#_list): throw if result is truncated 2021-12-06 14:25:55 +01:00
Florent BEAUCHAMP
faa7ba6f24 fix(vhd-lib, fs): use rmtree and not rmTree (#6041) 2021-12-06 14:25:09 +01:00
Julien Fontanet
fc2dbbe3ee feat: release 5.65.1 2021-12-03 16:42:11 +01:00
Julien Fontanet
cc98b81825 fix(CHANGELOG): incorrect secion name Packages to release
Introduced in ae24b10da
2021-12-03 15:29:48 +01:00
Julien Fontanet
eb4a7069d4 feat(CHANGELOG): integrate released changes 2021-12-03 15:27:54 +01:00
Julien Fontanet
4f65d9214e feat(xo-server): 5.84.2 2021-12-03 15:23:59 +01:00
Julien Fontanet
4d3c8ee63c feat(@xen-orchestra/proxy): 0.15.4 2021-12-03 15:23:26 +01:00
Julien Fontanet
e41c1b826a feat(@xen-orchestra/backups): 0.16.1 2021-12-03 15:23:02 +01:00
Julien Fontanet
644bb48135 feat(xo-vmdk-to-vhd): 2.0.1 2021-12-03 15:22:23 +01:00
Julien Fontanet
c9809285f6 feat(vhd-lib): 2.0.2 2021-12-03 15:19:07 +01:00
Julien Fontanet
5704949f4d feat(@vates/compose): 2.1.0 2021-12-03 15:17:47 +01:00
Julien Fontanet
a19e00fbc0 fix(backups/_VmBackup#_selectBaseVm): cant read .uuid of undefined srcVdi (#6034)
See xoa-support#4263

The debug message is now clearer and has correct associated data.
2021-12-03 10:22:17 +01:00
Julien Fontanet
470a9b3e27 chore(decorate-with/README): document usage with @vates/compose 2021-12-02 21:37:25 +01:00
Julien Fontanet
ace31dc566 feat(compose): supports attaching extra params 2021-12-02 21:37:25 +01:00
Julien Fontanet
ed252276cb fix(compose): dont mutate passed functions array 2021-12-02 21:37:25 +01:00
Julien Fontanet
26d0ff3c9a fix(vhd-lib/VhdAbtract#stream): explicitely ignore differencing
Because parentLocator entries handling are broken.
2021-12-02 16:48:19 +01:00
Florent Beauchamp
ff806a3ff9 fix(vhd-lib): use parent locator of root disk in VhdSynthetic 2021-12-02 16:48:19 +01:00
Florent Beauchamp
949b17dee6 fix(vhd-lib): fix footer and header accessor in vhd hierarchy 2021-12-02 16:48:19 +01:00
Florent Beauchamp
b1fdc68623 fix(vhd-lib): platformDataSpace in sectors not bytes 2021-12-02 16:48:19 +01:00
Florent BEAUCHAMP
f502facfd1 fix(backup): createAlias to data instead of circular alias (#6029) 2021-12-02 13:56:20 +01:00
Mathieu
bf0a74d709 fix(xo-web/SortedTable): properly disable collapsed actions (#6023) 2021-12-02 13:48:22 +01:00
Julien Fontanet
7296d98313 fix(backups/RemoteAdapter#_createSyntheticStream): only dispose once
See https://xcp-ng.org/forum/topic/5257/problems-building-from-source/20
2021-12-01 13:24:46 +01:00
Julien Fontanet
30568ced49 fix(vhd-lib/VhdSynthetic): fix parent UUID assert
See https://xcp-ng.org/forum/topic/5257/problems-building-from-source
2021-12-01 12:54:00 +01:00
Julien Fontanet
5e1284a9e0 chore: refresh yarn.lock
Introduced by 03d6e3356 due to extra files in my repo…
2021-12-01 12:33:43 +01:00
Julien Fontanet
27d2de872a chore: update to lint-staged@^12.0.3
See https://xcp-ng.org/forum/topic/5257/problems-building-from-source

Fix missing peer dependency `inquirer`.
2021-12-01 12:19:19 +01:00
Julien Fontanet
03d6e3356b chore: refresh yarn.lock 2021-12-01 12:17:52 +01:00
Julien Fontanet
ca8baa62fb fix(xo-vmdk-to-vhd): remove duplicate promise-toolbox dep
See https://xcp-ng.org/forum/topic/5257/problems-building-from-source
2021-12-01 12:17:25 +01:00
Florent BEAUCHAMP
2f607357c6 feat: release 5.65 (#6028) 2021-11-30 17:45:31 +01:00
Julien Fontanet
2de80f7aff feat(xo-server): 5.84.1 2021-11-30 17:04:37 +01:00
Julien Fontanet
386058ed88 chore(CHANGELOG): update vhd-lib version
Introduced by 033fa9e067
2021-11-30 17:04:25 +01:00
Julien Fontanet
033fa9e067 feat(vhd-lib): 2.0.1 2021-11-30 17:00:49 +01:00
Julien Fontanet
e8104420b5 fix(vhd-lib): add missing @vates/async-each dep
Introduced by 56c3d70149
2021-11-30 16:59:01 +01:00
Florent BEAUCHAMP
ae24b10da0 feat: technical release (#6025) 2021-11-30 15:45:36 +01:00
Florent BEAUCHAMP
407b05b643 fix(backups): use the full VHD hierarchy for restore (#6027) 2021-11-30 15:27:54 +01:00
Julien Fontanet
79bf8bc9f6 fix(xo-server): add missing complex-matcher dep
Introduced by 65d6dca52
2021-11-30 09:35:10 +01:00
Julien Fontanet
65d6dca52c feat(xo-server/xo.getAllObjects): add complex-matcher support
See https://xcp-ng.org/forum/topic/5238/xo-cli-command-list-vms-which-ha-snapshots
2021-11-29 19:00:44 +01:00
Julien Fontanet
66eeefbd7b feat(xo-server/vm.set): suspendSr support
See #4163
2021-11-29 14:44:02 +01:00
Mathieu
c10bbcde00 feat(xo-web,xo-server/snapshot): ability to export snapshot memory (#6015)
See xoa-support#4113
2021-11-29 14:08:02 +01:00
Julien Fontanet
fe69928bcc feat(xo-server/pool.set): suspendSr support
See #4163
2021-11-29 10:53:49 +01:00
Florent BEAUCHAMP
3ad8508ea5 feat(vhd-lib/VhdDirectory#_writeChunk): use outputFile (#6019)
This is much faster than manually creating parent directories.
2021-11-29 09:52:47 +01:00
Florent BEAUCHAMP
1f1ae759e0 feat(fs): use keepalive for queries to s3 (#6018) 2021-11-27 10:10:19 +01:00
Mathieu
6e4bfe8f0f feat(xo-web,xo-server): ability to create a cloud config network template (#5979)
Fixes #5931
2021-11-26 10:28:22 +01:00
Rajaa.BARHTAOUI
6276c48768 fix(xo-server/proxies): remove state cache after the proxy update (#6013) 2021-11-26 10:02:30 +01:00
Julien Fontanet
f6005baf1a feat(vhd-cli info): human format some fields 2021-11-25 18:29:25 +01:00
Julien Fontanet
b62fdbc6a6 feat(vhd-lib/Constants): make disk types and platorms maps
BREAKING
2021-11-25 18:02:26 +01:00
Florent BEAUCHAMP
bbd3d31b6a fix(backups/writeVhd): await outputStream (#6017) 2021-11-25 16:34:21 +01:00
Julien Fontanet
481ac92bf8 fix(backups/RemoteAdapter): dont use .dir suffix (#6016)
An alias can point to any kind of VHD, file or directory.

Also, for now, aliases are only used for VHD directories.
2021-11-25 15:31:25 +01:00
Florent BEAUCHAMP
a2f2b50f57 feat(s3): allow self signed certificate (#5961) 2021-11-25 11:32:08 +01:00
Julien Fontanet
bbab9d0f36 fix(xapi/vm/_assertHealthyVdiChain): ignore unused unmanaged VDIs
Fixes xoa-support#4280
2021-11-25 11:28:50 +01:00
Florent BEAUCHAMP
7f8190056d fix(backups/RemoteAdapter): unused import and path in writeVhd (#6014) 2021-11-25 11:26:59 +01:00
Julien Fontanet
8f4737c5f1 chore: upgrade to jsonrpc-websocket-client@0.7.2 2021-11-25 10:33:39 +01:00
Julien Fontanet
c5adba3c97 fix(xo-lib): upgrade to jsonrpc-websocket-client@^0.7.2
Fix default value for `protocols` option.
2021-11-25 10:28:40 +01:00
Julien Fontanet
d91eb9e396 fix(CHANGELOG.unreleased): fix duplicate package
Introduced by d5f21bc27c
2021-11-25 10:27:01 +01:00
Julien Fontanet
1b47102d6c chore: refresh yarn.lock 2021-11-25 00:06:01 +01:00
Julien Fontanet
cd147f3fc5 feat(xo-cli): 0.12.0 2021-11-25 00:03:21 +01:00
Julien Fontanet
c3acdc8cbd feat(xo-cli register): --allowUnauthorized flag
See https://xcp-ng.org/forum/topic/5226/xo-cli-and-using-self-signed-certificates
2021-11-25 00:02:08 +01:00
Julien Fontanet
c3d755dc7b feat(xo-lib): 0.11.0 2021-11-24 23:59:05 +01:00
Julien Fontanet
6f49c48bd4 feat(xo-lib): upgrade to jsonrpc-websocket-client@0.7.1
Use secure protocol (`wss`) by default and contains a fix for `rejectUnauthorized` option.
2021-11-24 23:55:23 +01:00
Julien Fontanet
446f390b3d feat(xo-lib): allow passing opts to JsonRpcWebSocketClient 2021-11-24 23:53:33 +01:00
Julien Fontanet
966091593a chore(vhd-lib): rename build{Footer,Header} to unpack{Footer,Header}
To make it clearer that it unpacks a binary footer/header to a JS object.
2021-11-24 23:34:23 +01:00
Florent Beauchamp
d5f21bc27c feat(backups): handle the choice of the vhd type to use during backup 2021-11-24 21:08:15 +01:00
Florent Beauchamp
8c3b452c0d feat(backup): DeltaBackupWriter can handle any type of vhd 2021-11-24 21:08:15 +01:00
Florent Beauchamp
9cacb92c2c feat(backups): remoteadapter can delete any type of vhd 2021-11-24 21:08:15 +01:00
Florent Beauchamp
7a1b56db87 feat(backups): checkvhd can handle all vhd types 2021-11-24 21:08:15 +01:00
Florent Beauchamp
56c3d70149 feat(vhd-lib): generate a vhd directory from a vhd stream 2021-11-24 21:08:15 +01:00
Florent Beauchamp
1ec8fcc73f feat(vhd-lib): extract computeSectorsPerBlock, computeBlockBitmapSize and computeSectorOfBitmap to utils 2021-11-24 21:08:15 +01:00
Rajaa.BARHTAOUI
060b16c5ca feat(xo-web/backup/logs): identify XAPI errors (#6001)
See xoa-support#3977
2021-11-24 15:25:27 +01:00
Yannick Achy
0acc52e3e9 fix(docs): move NOBAK from Delta to general concepts (#6012)
Co-authored-by: yannick Achy <yannick.achy@vates.fr>
2021-11-24 09:10:35 +01:00
Florent Beauchamp
a9c2c9b6ba refator(vhd-lib): move createSyntheticStream to backup, move stream() tests to vhdabstracts 2021-11-23 15:56:25 +01:00
Florent Beauchamp
5b2a6bc56b chore(vhd-lib/createSyntheticStream): based on VhdSynthetic#stream() 2021-11-23 15:56:25 +01:00
Florent Beauchamp
19c8693b62 fix(vhd-lib/VhdSynthetic#readHeaderAndFooter()): root vhd can be a dynamic and check chaining 2021-11-23 15:56:25 +01:00
Florent Beauchamp
c4720e1215 fix(vhd-lib/VhdAbstract#stream()): stream.length should contain blocks 2021-11-23 15:56:25 +01:00
Florent BEAUCHAMP
b6d4c8044c feat(backups/cleanVm) : support VHD dirs and aliases (#6000) 2021-11-22 17:14:29 +01:00
Florent BEAUCHAMP
57dd6ebfba chore(vhd-lib): use openVhd for chain and checkChain (#5997) 2021-11-22 15:50:30 +01:00
Julien Fontanet
c75569f278 feat(proxy/authentication.setToken): API method to change auth token 2021-11-18 18:14:19 +01:00
Julien Fontanet
a8757f9074 chore(proxy/authentication): use private field for auth token
More idiomatic and potentially more secure.
2021-11-18 18:02:26 +01:00
Julien Fontanet
f5c3bf72e5 fix(mixins/Config): dont create multiple stop listeners 2021-11-18 16:41:46 +01:00
Florent BEAUCHAMP
d7ee13f98d feat(vhd-lib/merge): use Vhd* classes (#5950) 2021-11-18 11:30:04 +01:00
Julien Fontanet
1f47aa491d fix(xo-server/pool.mergeInto): dont export masterPassword on error
Fixes xoa-support#4265
2021-11-17 22:42:00 +01:00
Julien Fontanet
ffe430758e feat(async-each): run async fn for each item in (async) iterable 2021-11-17 22:27:43 +01:00
Florent BEAUCHAMP
a4bb453401 feat(vhd-lib): add VhdAbstract#{stream,rawContent}() methods (#5992) 2021-11-17 09:16:34 +01:00
Florent BEAUCHAMP
5c8ebce9eb feat(vhd-lib): add vhd synthetic class (#5990) 2021-11-17 09:15:13 +01:00
Julien Fontanet
8b0cee5e6f feat(@xen-orchestra/backups-cli): 0.6.1 2021-11-16 14:26:50 +01:00
Julien Fontanet
e5f4f825b6 fix(xapi): group retry options together
- it does not make sense to only set the delay or the number of tries without the other
- it allow using any options either as default or in config without worrying about incompatibilities (e.g. `tries` & `retries`)
2021-11-16 14:26:11 +01:00
Julien Fontanet
b179dc1d56 chore: update dev deps 2021-11-15 23:43:20 +01:00
Julien Fontanet
7281c9505d fix(CHANGELOG.unreleased): new release backups-cli
`vhd-cli@^1` compat was broken by 7ef89d504
2021-11-15 14:46:51 +01:00
Julien Fontanet
4db82f447d fix(xo-web/about): update link to create issue
Related to 71b8e625f

See #5977
2021-11-15 14:22:46 +01:00
Julien Fontanet
834da3d2b4 fix(vhd-lib/VhdAbstract): remove duplicate field declarations
Introduced in c6c3a33dc
2021-11-10 16:04:59 +01:00
Julien Fontanet
c6c3a33dcc feat(vhd-cli/VhdAbstract): make derived values getters
It makes them read-only, make sure they are always up-to-date with the header and avoid duplicating their logic.
2021-11-10 15:45:42 +01:00
Julien Fontanet
fb720d9b05 fix(docs/xoa): use wget instead of curl
The version of curl installed on XCP-ng 8.2.0, (curl 7.29.0) does not support any encryption algos available on https://xoa.io
2021-11-09 19:55:49 +01:00
Florent Beauchamp
547d318e55 fix(vhd-lib): write parent locator before the blocks 2021-11-08 18:03:46 +01:00
Florent Beauchamp
cb5a2c18f2 fix(vhd-lib): ensure block allocation table is written after modifying it in tests 2021-11-08 18:03:46 +01:00
Florent Beauchamp
e01ca3ad07 refactor(vhd-lib): use method from test/utils when possible 2021-11-08 18:03:46 +01:00
Florent Beauchamp
314d193f35 fix(vhd-lib): set platform code when setting unique parent locator 2021-11-08 18:03:46 +01:00
Florent Beauchamp
e0200bb730 refactor(vhd-lib): split tests 2021-11-08 18:03:46 +01:00
Florent BEAUCHAMP
2a3f4a6f97 feat(vhd-lib): handle file alias (#5962) 2021-11-08 14:46:00 +01:00
Nicolas Raynaud
88628bbdc0 chore(xo-vmdk-to-vhd): fix tests (#5981)
Introduced by fdf52a3d59

Follow-up of b00750bfa3
2021-11-07 15:38:45 +01:00
Olivier Lambert
cb7b695a72 feat(docs/netbox): add how to add a custom field in Netbox 3 (#5984) 2021-11-07 13:44:02 +01:00
Julien Fontanet
ae549e2a88 fix(jest): dont use fake timers by default
Introduced by 844efb88d

The upgrade to Jest 27 (15630aee5) revealed this issue.
2021-11-05 13:24:51 +01:00
Julien Fontanet
7f9a970714 fix(log/USAGE): document filter array
Introduced by d3cb31f1a
2021-11-04 10:45:58 +01:00
Julien Fontanet
7661d3372d fix(xen-api/USAGE): add httpProxy option
Introduced by 2412f8b1e
2021-11-04 10:38:22 +01:00
Julien Fontanet
dbb4f34015 chore(xapi/VDI_destroy): decorate with retry.wrap()
- more efficient than creating a function at each call
- better logging
2021-11-03 23:10:58 +01:00
Julien Fontanet
8f15a4c29d feat(ISSUE_TEMPLATE/bug_report): add hypervisor version 2021-11-03 16:55:17 +01:00
Florent BEAUCHAMP
1b0a885ac3 feat(vhd-cli): use any remote for copy and compare (#5927) 2021-11-03 15:45:52 +01:00
Nicolas Raynaud
f7195bad88 fix(xo-server): fix ova multipart upload (#5976)
Introduced by 0451aaeb5c
2021-11-02 17:43:45 +01:00
Julien Fontanet
15630aee5e chore: update dev deps 2021-11-02 13:43:49 +01:00
Florent BEAUCHAMP
a950a1fe24 refactor(vhd-lib): centralize test methods (#5968) 2021-11-02 09:53:30 +01:00
Julien Fontanet
71b8e625fe chore: update issue templates (#5974) 2021-10-30 15:06:51 +02:00
Julien Fontanet
e7391675fb feat(@xen-orchestra/proxy): 0.15.2 2021-10-29 17:41:02 +02:00
Julien Fontanet
84fdd3fe4b fix(proxy/api/ndJsonStream): send header for empty iterables
Introduced by ed987e161
2021-10-29 17:05:05 +02:00
Julien Fontanet
4dc4b635f2 feat(@xen-orchestra/proxy): 0.15.1 2021-10-29 15:50:42 +02:00
Julien Fontanet
ee0c6d7f8b feat(xen-api): 0.35.1 2021-10-29 15:50:05 +02:00
Julien Fontanet
a637af395d fix(xen-api): add missing dep proxy-agent
Introduced by 2412f8b1e
2021-10-29 15:40:25 +02:00
Julien Fontanet
59fb612315 feat(@xen-orchestra/proxy): 0.15.0 2021-10-29 15:20:09 +02:00
Mathieu
59b21c7a3e feat: release 5.64 (#5971) 2021-10-29 11:40:16 +02:00
Mathieu
40f881c2ac feat: technical release (#5970) 2021-10-28 16:30:00 +02:00
Rajaa.BARHTAOUI
1d069683ca feat(xo-web/host): manage evacuation failure during host shutdown (#5966) 2021-10-28 14:23:43 +02:00
Julien Fontanet
de1d942b90 fix(xo-server/listPoolsMatchingCriteria): check{Sr,Pool}Name is not a function
Fixes xoa-support#4193

Introduced by cd8c618f0
2021-10-28 13:29:32 +02:00
Rajaa.BARHTAOUI
fc73971d63 feat(xo-server,xo-web/menu): proxy upgrade notification (#5930)
See xoa-support#4105
2021-10-28 10:52:23 +02:00
Rajaa.BARHTAOUI
eb238bf107 feat(xo-web/pool/advanced, xen-api/{get,put}Resource): introduce backup network (#5957) 2021-10-28 10:21:48 +02:00
Florent BEAUCHAMP
2412f8b1e2 feat(xen-api): add HTTP proxy support (#5958)
See #5436

Using an IP address as HTTPS proxy show this warning: `DeprecationWarning: Setting the TLS ServerName to an IP address is not permitted by RFC 6066`

The corresponding issue is there : TooTallNate/node-https-proxy-agent#127
2021-10-27 17:30:41 +02:00
Pierre Donias
0c87dee31c fix(xo-web/xoa): handle string expiration dates (#5967)
See xoa-support#4114
See xoa-support#4192

www-xo may return a string instead of a number in some rare cases
2021-10-27 16:59:59 +02:00
Mathieu
215146f663 feat(xo-web/vm/export): allow to copy the export URL (#5948) 2021-10-27 16:58:09 +02:00
Mathieu
9fe1069df0 feat(xo-web/host): format logs (#5943)
See xoa-support#4100
2021-10-27 15:41:29 +02:00
Julien Fontanet
d2c5b52bf1 feat(backups): enable merge worker by default
Related to 47f9da216

It can still be disabled in case of problems:

```toml
[backups]
disableMergeWorker = true
```
2021-10-27 09:29:50 +02:00
Pierre Donias
12153a414d fix(xo-server/{clone,copy}Vm): force is_a_template to false on the new VM (#5955)
See xoa-support#4137
2021-10-26 16:53:09 +02:00
Pierre Donias
5ec1092a83 fix(xo-server-netbox/test): perform test with a 50-character name (#5963)
See https://xcp-ng.org/forum/topic/5111
See https://netbox.readthedocs.io/en/stable/release-notes/version-2.10/#other-changes > #5011

Versions of Netbox <2.10 only allow cluster type names of length <= 50.
2021-10-26 15:55:11 +02:00
Julien Fontanet
284169a2f2 chore(vhd-lib/VhdAbstract): format with Prettier
Introduced by 7ef89d504
2021-10-25 16:12:49 +02:00
Julien Fontanet
838bfbb75f fix(backups/cleanVm): wait for merge to finish
Introduced by 9c83e70a2
2021-10-25 09:14:38 +02:00
Julien Fontanet
a448da77c9 fix(backups/cleanVm): mergeLimiter support
Introduced by 9c83e70a2
2021-10-25 09:13:58 +02:00
Rajaa.BARHTAOUI
268fb22d5f feat(xo-web/host/advanced): add button to disable/enable host (#5952) 2021-10-20 16:39:54 +02:00
Julien Fontanet
07cc4c853d fix(vhd-lib): fix block table properties & accessors
Fixes #5956

Introduced by 7ef89d504
2021-10-18 23:13:55 +02:00
Florent BEAUCHAMP
c62d727cbe feat(vhd-cli compare): compare metadata and content of two VHDs (#5920) 2021-10-18 16:21:40 +02:00
Florent BEAUCHAMP
7ef89d5043 feat(vhd-{cli,lib}): implement chunking and copy command (#5919) 2021-10-18 14:56:58 +02:00
Mathieu
9ceba1d6e8 feat(xo-web/jobs): add button to copy jobs IDs (#5951)
Useful to create a `job.runSequence` job. Follow-up of #5944.
2021-10-15 14:25:02 +02:00
Pierre Donias
e2e453985f fix(xo-web/job): properly handle array arguments (#5944)
See https://xcp-ng.org/forum/topic/5010

When creating/editing a job, properties of type `array` must not go through the
cross product builder, they must be saved as arrays.
2021-10-15 10:42:33 +02:00
Florent BEAUCHAMP
84dccd800f feat(backups): clean up other schedules snapshots (#5949)
Fixes xoa-support#4129
2021-10-14 14:44:40 +02:00
Julien Fontanet
f9734d202b chore(backups/_VmBackup): remove unused import 2021-10-14 13:51:29 +02:00
Julien Fontanet
d3cb0f4672 feat(xo-server): 5.82.4 2021-10-14 09:47:39 +02:00
Julien Fontanet
c198bbb6fa feat(@xen-orchestra/backups): 0.14.0 2021-10-14 09:45:20 +02:00
Julien Fontanet
c965a89509 feat(xo-server-netbox): 0.3.2 2021-10-14 09:43:38 +02:00
Julien Fontanet
47f9da2160 feat(backups/MixinBackupWriter): use merge worker if not disabled 2021-10-13 16:26:12 +02:00
Julien Fontanet
348a75adb4 feat(backups): merge worker implementation
This CLI must be run directly in the directory where the remote is mounted.

It's only compatible with local remote at the moment.

To start the worker:

```js
const MergeWorker = require('@xen-orchestra/backups/merge-worker/index.js')

await MergeWorker.run(remotePath)
```

To register a VM backup dir to be clean (thus merging its unused VHD), create a file in the queue directory containing the VM UUID:

```
> echo cc700fe2-724e-44a5-8663-5f8f88e05e34 > .queue/clean-vm/20211013T142401Z
```

The queue directory is available as `MergeWorker.CLEAN_VM_QUEUE`.
2021-10-13 16:25:21 +02:00
Julien Fontanet
332218a7f7 feat(backups): move merge responsability to cleanVm 2021-10-13 16:10:19 +02:00
Julien Fontanet
6d7a26d2b9 chore(backups/MixinBackupWriter): use private fields 2021-10-13 10:02:57 +02:00
Pierre Donias
d19a748f0c fix(xo-server-netbox): support older versions of Netbox (#5946)
Fixes #5898
See https://netbox.readthedocs.io/en/stable/release-notes/version-2.7/#api-choice-fields-now-use-string-values-3569
2021-10-13 09:28:46 +02:00
Julien Fontanet
9c83e70a28 feat(backups/RemoteAdapter#cleanVm): configurable merge limiter 2021-10-12 09:17:42 +02:00
Rajaa.BARHTAOUI
abcabb736b feat(xo-web/tasks): filter out short tasks with a default filter (#5941)
See xoa-support#4096
2021-10-08 16:42:16 +02:00
Julien Fontanet
0451aaeb5c fix(xo-server/vm.import): restore non-multipart upload (#5936)
See xoa-support#4085

Introduced by fdf52a3d5

Required by `xo-cli`.
2021-10-08 15:24:21 +02:00
Julien Fontanet
880c45830c fix(xo-cli): http-request-plus@0.12 has no longer default export
Introduced by 62e5ab699
2021-10-07 17:11:54 +02:00
Julien Fontanet
5fa16d2344 chore: format with Prettier 2021-10-07 14:40:41 +02:00
Julien Fontanet
9e50b5dd83 feat(proxy): logging is now dynamically configurable
It was done for xo-server in f20d5cd8d
2021-10-06 16:54:57 +02:00
Julien Fontanet
29d8753574 chore(backups/VmBackup#_selectBaseVm): add debug logs 2021-10-06 16:48:42 +02:00
Pierre Donias
f93e1e1695 feat: release 5.63.0 (#5925) 2021-09-30 15:25:34 +02:00
463 changed files with 12761 additions and 5006 deletions

View File

@@ -1,3 +1,5 @@
'use strict'
module.exports = {
extends: ['plugin:eslint-comments/recommended', 'standard', 'standard-jsx', 'prettier'],
globals: {
@@ -18,8 +20,19 @@ module.exports = {
'no-console': 'off',
},
},
{
files: ['*.mjs'],
parserOptions: {
sourceType: 'module',
},
},
],
parserOptions: {
ecmaVersion: 13,
sourceType: 'script',
},
rules: {
// disabled because XAPI objects are using camel case
camelcase: ['off'],
@@ -34,5 +47,7 @@ module.exports = {
'lines-between-class-members': 'off',
'no-console': ['error', { allow: ['warn', 'error'] }],
strict: 'error',
},
}

34
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,34 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**
Add any other context about the problem here.

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

3
.gitignore vendored
View File

@@ -35,3 +35,6 @@ pnpm-debug.log.*
yarn-error.log
yarn-error.log.*
.env
# nyc test coverage
.nyc_output

View File

@@ -0,0 +1,35 @@
### `asyncEach(iterable, iteratee, [opts])`
Executes `iteratee` in order for each value yielded by `iterable`.
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
`iterable` must be an iterable or async iterable.
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
- `value`: the value yielded by `iterable`
- `index`: the 0-based index for this value
- `iterable`: the iterable itself
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
```js
import { asyncEach } from '@vates/async-each'
const contents = []
await asyncEach(
['foo.txt', 'bar.txt', 'baz.txt'],
async function (filename, i) {
contents[i] = await readFile(filename)
},
{
// reads two files at a time
concurrency: 2,
}
)
```

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -0,0 +1,68 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/async-each
[![Package Version](https://badgen.net/npm/v/@vates/async-each)](https://npmjs.org/package/@vates/async-each) ![License](https://badgen.net/npm/license/@vates/async-each) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/async-each)](https://bundlephobia.com/result?p=@vates/async-each) [![Node compatibility](https://badgen.net/npm/node/@vates/async-each)](https://npmjs.org/package/@vates/async-each)
> Run async fn for each item in (async) iterable
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/async-each):
```
> npm install --save @vates/async-each
```
## Usage
### `asyncEach(iterable, iteratee, [opts])`
Executes `iteratee` in order for each value yielded by `iterable`.
Returns a promise wich rejects as soon as a call to `iteratee` throws or a promise returned by it rejects, and which resolves when all promises returned by `iteratee` have resolved.
`iterable` must be an iterable or async iterable.
`iteratee` is called with the same `this` value as `asyncEach`, and with the following arguments:
- `value`: the value yielded by `iterable`
- `index`: the 0-based index for this value
- `iterable`: the iterable itself
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`
```js
import { asyncEach } from '@vates/async-each'
const contents = []
await asyncEach(
['foo.txt', 'bar.txt', 'baz.txt'],
async function (filename, i) {
contents[i] = await readFile(filename)
},
{
// reads two files at a time
concurrency: 2,
}
)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,99 @@
'use strict'
const noop = Function.prototype
class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []
let running = 0
let index = 0
let onAbort
if (signal !== undefined) {
onAbort = () => {
onRejectedWrapper(new Error('asyncEach aborted'))
}
signal.addEventListener('abort', onAbort)
}
const clean = () => {
onFulfilled = onRejected = noop
if (onAbort !== undefined) {
signal.removeEventListener('abort', onAbort)
}
}
resolve = (resolve =>
function resolveAndClean(value) {
resolve(value)
clean()
})(resolve)
reject = (reject =>
function rejectAndClean(reason) {
reject(reason)
clean()
})(reject)
let onFulfilled = value => {
--running
next()
}
const onFulfilledWrapper = value => onFulfilled(value)
let onRejected = stopOnError
? reject
: error => {
--running
errors.push(error)
next()
}
const onRejectedWrapper = reason => onRejected(reason)
let nextIsRunning = false
let next = async () => {
if (nextIsRunning) {
return
}
nextIsRunning = true
if (running < concurrency) {
const cursor = await it.next()
if (cursor.done) {
next = () => {
if (running === 0) {
if (errors.length === 0) {
resolve()
} else {
reject(new AggregateError(errors))
}
}
}
} else {
++running
try {
const result = iteratee.call(this, cursor.value, index++, iterable)
let then
if (result != null && typeof result === 'object' && typeof (then = result.then) === 'function') {
then.call(result, onFulfilledWrapper, onRejectedWrapper)
} else {
onFulfilled(result)
}
} catch (error) {
onRejected(error)
}
}
nextIsRunning = false
return next()
}
nextIsRunning = false
}
next()
})
}

View File

@@ -0,0 +1,99 @@
'use strict'
/* eslint-env jest */
const { asyncEach } = require('./')
const randomDelay = (max = 10) =>
new Promise(resolve => {
setTimeout(resolve, Math.floor(Math.random() * max + 1))
})
const rejectionOf = p =>
new Promise((resolve, reject) => {
p.then(reject, resolve)
})
describe('asyncEach', () => {
const thisArg = 'qux'
const values = ['foo', 'bar', 'baz']
Object.entries({
'sync iterable': () => values,
'async iterable': async function* () {
for (const value of values) {
await randomDelay()
yield value
}
},
}).forEach(([what, getIterable]) =>
describe('with ' + what, () => {
let iterable
beforeEach(() => {
iterable = getIterable()
})
it('works', async () => {
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee)
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
;[1, 2, 4].forEach(concurrency => {
it('respects a concurrency of ' + concurrency, async () => {
let running = 0
await asyncEach(
values,
async () => {
++running
expect(running).toBeLessThanOrEqual(concurrency)
await randomDelay()
--running
},
{ concurrency }
)
})
})
it('stops on first error when stopOnError is true', async () => {
const error = new Error()
const iteratee = jest.fn((_, i) => {
if (i === 1) {
throw error
}
})
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
it('rejects AggregateError when stopOnError is false', async () => {
const errors = []
const iteratee = jest.fn(() => {
const error = new Error()
errors.push(error)
throw error
})
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
expect(error.errors).toEqual(errors)
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
})
it('can be interrupted with an AbortSignal', async () => {
const ac = new AbortController()
const iteratee = jest.fn((_, i) => {
if (i === 1) {
ac.abort()
}
})
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
expect(iteratee).toHaveBeenCalledTimes(2)
})
})
)
})

View File

@@ -0,0 +1,34 @@
{
"private": false,
"name": "@vates/async-each",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/async-each",
"description": "Run async fn for each item in (async) iterable",
"keywords": [
"array",
"async",
"collection",
"each",
"for",
"foreach",
"iterable",
"iterator"
],
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/async-each",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -1,3 +1,5 @@
'use strict'
exports.coalesceCalls = function (fn) {
let promise
const clean = () => {

View File

@@ -1,3 +1,5 @@
'use strict'
/* eslint-env jest */
const { coalesceCalls } = require('./')

View File

@@ -46,3 +46,20 @@ const f = compose(
[add2, mul3]
)
```
Functions can receive extra parameters:
```js
const isIn = (value, min, max) => min <= value && value <= max
// Only compatible when `fns` is passed as an array!
const f = compose([
[add, 2],
[isIn, 3, 10],
])
console.log(f(1))
// → true
```
> Note: if the first function is defined with extra parameters, it will only receive the first value passed to the composed function, instead of all the parameters.

View File

@@ -65,6 +65,23 @@ const f = compose(
)
```
Functions can receive extra parameters:
```js
const isIn = (value, min, max) => min <= value && value <= max
// Only compatible when `fns` is passed as an array!
const f = compose([
[add, 2],
[isIn, 3, 10],
])
console.log(f(1))
// → true
```
> Note: if the first function is defined with extra parameters, it will only receive the first value passed to the composed function, instead of all the parameters.
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -4,11 +4,13 @@ const defaultOpts = { async: false, right: false }
exports.compose = function compose(opts, fns) {
if (Array.isArray(opts)) {
fns = opts
fns = opts.slice() // don't mutate passed array
opts = defaultOpts
} else if (typeof opts === 'object') {
opts = Object.assign({}, defaultOpts, opts)
if (!Array.isArray(fns)) {
if (Array.isArray(fns)) {
fns = fns.slice() // don't mutate passed array
} else {
fns = Array.prototype.slice.call(arguments, 1)
}
} else {
@@ -20,6 +22,24 @@ exports.compose = function compose(opts, fns) {
if (n === 0) {
throw new TypeError('at least one function must be passed')
}
for (let i = 0; i < n; ++i) {
const entry = fns[i]
if (Array.isArray(entry)) {
const fn = entry[0]
const args = entry.slice()
args[0] = undefined
fns[i] = function composeWithArgs(value) {
args[0] = value
try {
return fn.apply(this, args)
} finally {
args[0] = undefined
}
}
}
}
if (n === 1) {
return fns[0]
}

View File

@@ -1,3 +1,5 @@
'use strict'
/* eslint-env jest */
const { compose } = require('./')

View File

@@ -14,7 +14,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "2.0.0",
"version": "2.1.0",
"engines": {
"node": ">=7.6"
},

View File

@@ -40,3 +40,33 @@ decorateMethodsWith(Foo, {
```
The decorated class is returned, so you can export it directly.
To apply multiple transforms to a method, you can either call `decorateMethodsWith` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
```js
decorateMethodsWith(Foo, {
bar: compose([
[lodash.debounce, 150]
lodash.curry,
])
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.
This is often necessary for caching or deduplicating calls.
```js
import { perInstance } from '@vates/decorateWith'
class Foo {
@decorateWith(perInstance, lodash.memoize)
bar() {
// body
}
}
```
Because it's a normal function, it can also be used with `decorateMethodsWith`, with `compose` or even by itself.

View File

@@ -59,6 +59,36 @@ decorateMethodsWith(Foo, {
The decorated class is returned, so you can export it directly.
To apply multiple transforms to a method, you can either call `decorateMethodsWith` multiple times or use [`@vates/compose`](https://www.npmjs.com/package/@vates/compose):
```js
decorateMethodsWith(Foo, {
bar: compose([
[lodash.debounce, 150]
lodash.curry,
])
})
```
### `perInstance(fn, ...args)`
Helper to decorate the method by instance instead of for the whole class.
This is often necessary for caching or deduplicating calls.
```js
import { perInstance } from '@vates/decorateWith'
class Foo {
@decorateWith(perInstance, lodash.memoize)
bar() {
// body
}
}
```
Because it's a normal function, it can also be used with `decorateMethodsWith`, with `compose` or even by itself.
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -1,3 +1,5 @@
'use strict'
exports.decorateWith = function decorateWith(fn, ...args) {
return (target, name, descriptor) => ({
...descriptor,
@@ -19,3 +21,15 @@ exports.decorateMethodsWith = function decorateMethodsWith(klass, map) {
}
return klass
}
exports.perInstance = function perInstance(fn, decorator, ...args) {
const map = new WeakMap()
return function () {
let decorated = map.get(this)
if (decorated === undefined) {
decorated = decorator(fn, ...args)
map.set(this, decorated)
}
return decorated.apply(this, arguments)
}
}

View File

@@ -0,0 +1,100 @@
'use strict'
const assert = require('assert')
const { describe, it } = require('tap').mocha
const { decorateWith, decorateMethodsWith, perInstance } = require('./')
describe('decorateWith', () => {
it('works', () => {
const expectedArgs = [Math.random(), Math.random()]
const expectedFn = Function.prototype
const newFn = () => {}
const decorator = decorateWith(function wrapper(fn, ...args) {
assert.deepStrictEqual(fn, expectedFn)
assert.deepStrictEqual(args, expectedArgs)
return newFn
}, ...expectedArgs)
const descriptor = {
configurable: true,
enumerable: false,
value: expectedFn,
writable: true,
}
assert.deepStrictEqual(decorator({}, 'foo', descriptor), {
...descriptor,
value: newFn,
})
})
})
describe('decorateMethodsWith', () => {
it('works', () => {
class C {
foo() {}
bar() {}
}
const expectedArgs = [Math.random(), Math.random()]
const P = C.prototype
const descriptors = Object.getOwnPropertyDescriptors(P)
const newFoo = () => {}
const newBar = () => {}
decorateMethodsWith(C, {
foo(method) {
assert.strictEqual(arguments.length, 1)
assert.strictEqual(method, P.foo)
return newFoo
},
bar: [
function (method, ...args) {
assert.strictEqual(method, P.bar)
assert.deepStrictEqual(args, expectedArgs)
return newBar
},
...expectedArgs,
],
})
const newDescriptors = Object.getOwnPropertyDescriptors(P)
assert.deepStrictEqual(newDescriptors.foo, { ...descriptors.foo, value: newFoo })
assert.deepStrictEqual(newDescriptors.bar, { ...descriptors.bar, value: newBar })
})
})
describe('perInstance', () => {
it('works', () => {
let calls = 0
const expectedArgs = [Math.random(), Math.random()]
const expectedFn = Function.prototype
function wrapper(fn, ...args) {
assert.strictEqual(fn, expectedFn)
assert.deepStrictEqual(args, expectedArgs)
const i = ++calls
return () => i
}
const wrapped = perInstance(expectedFn, wrapper, ...expectedArgs)
// decorator is not called before decorated called
assert.strictEqual(calls, 0)
const o1 = {}
const o2 = {}
assert.strictEqual(wrapped.call(o1), 1)
// the same decorated function is returned for the same instance
assert.strictEqual(wrapped.call(o1), 1)
// a new decorated function is returned for another instance
assert.strictEqual(wrapped.call(o2), 2)
})
})

View File

@@ -20,11 +20,15 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"version": "1.0.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "tap"
},
"devDependencies": {
"tap": "^15.1.6"
}
}

View File

@@ -1,3 +1,5 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')

View File

@@ -1,3 +1,5 @@
'use strict'
/* eslint-env jest */
const { createDebounceResource } = require('./debounceResource')

View File

@@ -1,3 +1,5 @@
'use strict'
const ensureArray = require('ensure-array')
const { MultiKeyMap } = require('@vates/multi-key-map')

View File

@@ -1,3 +1,5 @@
'use strict'
/* eslint-env jest */
const { deduped } = require('./deduped')

View File

@@ -1,3 +1,5 @@
'use strict'
class Node {
constructor(value) {
this.children = new Map()

View File

@@ -1,3 +1,5 @@
'use strict'
/* eslint-env jest */
const { MultiKeyMap } = require('./')

View File

@@ -1,3 +1,5 @@
'use strict'
const ms = require('ms')
exports.parseDuration = value => {

View File

@@ -0,0 +1,57 @@
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
const compositePredicate = predicate2
```
Predicates can also be passed wrapped in an array:
```js
const compositePredicate = every([predicate1, some([predicate2, predicate3])])
```
`this` and all arguments are passed to the nested predicates.
### `every(predicates)`
> Returns a predicate that returns `true` iff every predicate returns `true`.
```js
const isBetween3And7 = every(
n => n >= 3,
n => n <= 7
)
isBetween3And10(0)
// → false
isBetween3And10(5)
// → true
isBetween3And10(10)
// → false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.
```js
const isAliceOrBob = some(
name => name === 'Alice',
name => name === 'Bob'
)
isAliceOrBob('Alice')
// → true
isAliceOrBob('Bob')
// → true
isAliceOrBob('Oscar')
// → false
```

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -0,0 +1,90 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/predicates
[![Package Version](https://badgen.net/npm/v/@vates/predicates)](https://npmjs.org/package/@vates/predicates) ![License](https://badgen.net/npm/license/@vates/predicates) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/predicates)](https://bundlephobia.com/result?p=@vates/predicates) [![Node compatibility](https://badgen.net/npm/node/@vates/predicates)](https://npmjs.org/package/@vates/predicates)
> Utilities to compose predicates
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
```
> npm install --save @vates/predicates
```
## Usage
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = every(undefined, some(predicate2, undefined))
// ends up as
const compositePredicate = predicate2
```
Predicates can also be passed wrapped in an array:
```js
const compositePredicate = every([predicate1, some([predicate2, predicate3])])
```
`this` and all arguments are passed to the nested predicates.
### `every(predicates)`
> Returns a predicate that returns `true` iff every predicate returns `true`.
```js
const isBetween3And7 = every(
n => n >= 3,
n => n <= 7
)
isBetween3And10(0)
// → false
isBetween3And10(5)
// → true
isBetween3And10(10)
// → false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.
```js
const isAliceOrBob = some(
name => name === 'Alice',
name => name === 'Bob'
)
isAliceOrBob('Alice')
// → true
isAliceOrBob('Bob')
// → true
isAliceOrBob('Oscar')
// → false
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,71 @@
'use strict'
const {
isArray,
prototype: { filter },
} = Array
class InvalidPredicate extends TypeError {
constructor(value) {
super('not a valid predicate')
this.value = value
}
}
function isDefinedPredicate(value) {
if (value === undefined) {
return false
}
if (typeof value !== 'function') {
throw new InvalidPredicate(value)
}
return true
}
function handleArgs() {
let predicates
if (!(arguments.length === 1 && isArray((predicates = arguments[0])))) {
predicates = arguments
}
return filter.call(predicates, isDefinedPredicate)
}
exports.every = function every() {
const predicates = handleArgs.apply(this, arguments)
const n = predicates.length
if (n === 0) {
return
}
if (n === 1) {
return predicates[0]
}
return function everyPredicate() {
for (let i = 0; i < n; ++i) {
if (!predicates[i].apply(this, arguments)) {
return false
}
}
return true
}
}
exports.some = function some() {
const predicates = handleArgs.apply(this, arguments)
const n = predicates.length
if (n === 0) {
return
}
if (n === 1) {
return predicates[0]
}
return function somePredicate() {
for (let i = 0; i < n; ++i) {
if (predicates[i].apply(this, arguments)) {
return true
}
}
return false
}
}

View File

@@ -0,0 +1,65 @@
'use strict'
const assert = require('assert/strict')
const { describe, it } = require('tap').mocha
const { every, some } = require('./')
const T = () => true
const F = () => false
const testArgsHandling = fn => {
it('returns undefined if all predicates are undefined', () => {
assert.equal(fn(undefined), undefined)
assert.equal(fn([undefined]), undefined)
})
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('throws if it receives a non-predicate', () => {
const error = new TypeError('not a valid predicate')
error.value = 3
assert.throws(() => fn(3), error)
})
it('forwards this and arguments to predicates', () => {
const thisArg = 'qux'
const args = ['foo', 'bar', 'baz']
const predicate = function () {
assert.equal(this, thisArg)
assert.deepEqual(Array.from(arguments), args)
}
fn(predicate, predicate).apply(thisArg, args)
})
}
const runTests = (fn, truthTable) =>
it('works', () => {
truthTable.forEach(([result, ...predicates]) => {
assert.equal(fn(...predicates)(), result)
assert.equal(fn(predicates)(), result)
})
})
describe('every', () => {
testArgsHandling(every)
runTests(every, [
[true, T, T],
[false, T, F],
[false, F, T],
[false, F, F],
])
})
describe('some', () => {
testArgsHandling(some)
runTests(some, [
[true, T, T],
[true, T, F],
[true, F, T],
[false, F, F],
])
})

View File

@@ -0,0 +1,40 @@
{
"private": false,
"name": "@vates/predicates",
"description": "Utilities to compose predicates",
"keywords": [
"and",
"combine",
"compose",
"every",
"function",
"functions",
"or",
"predicate",
"predicates",
"some"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/predicates",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/predicates",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=6"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "tap"
},
"devDependencies": {
"tap": "^15.1.6"
}
}

View File

@@ -1,3 +1,5 @@
'use strict'
const readChunk = (stream, size) =>
size === 0
? Promise.resolve(Buffer.alloc(0))

View File

@@ -1,3 +1,5 @@
'use strict'
/* eslint-env jest */
const { Readable } = require('stream')

View File

@@ -1,5 +1,7 @@
#!/usr/bin/env node
'use strict'
const fs = require('fs')
const mapKeys = (object, iteratee) => {

View File

@@ -1,3 +1,5 @@
'use strict'
const wrapCall = (fn, arg, thisArg) => {
try {
return Promise.resolve(fn.call(thisArg, arg))

View File

@@ -1,3 +1,5 @@
'use strict'
/* eslint-env jest */
const { asyncMapSettled } = require('./')

View File

@@ -1,3 +1,5 @@
'use strict'
// type MaybePromise<T> = Promise<T> | T
//
// declare export function asyncMap<T1, T2>(

View File

@@ -1 +1,3 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -30,7 +30,7 @@
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/decorate-with": "^0.1.0",
"@vates/decorate-with": "^1.0.0",
"@xen-orchestra/log": "^0.3.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"

View File

@@ -46,7 +46,7 @@ module.exports = function (pkg, configs = {}) {
return {
comments: !__PROD__,
ignore: __PROD__ ? [/\.spec\.js$/] : undefined,
ignore: __PROD__ ? [/\btests?\//, /\.spec\.js$/] : undefined,
plugins: Object.keys(plugins)
.map(plugin => [plugin, plugins[plugin]])
.sort(([a], [b]) => {

View File

@@ -1,3 +1,5 @@
'use strict'
const getopts = require('getopts')
const { version } = require('./package.json')

View File

@@ -1,3 +1,5 @@
'use strict'
const { dirname } = require('path')
const fs = require('promise-toolbox/promisifyAll')(require('fs'))

View File

@@ -1,5 +1,7 @@
#!/usr/bin/env node
'use strict'
// -----------------------------------------------------------------------------
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)

View File

@@ -1,3 +1,5 @@
'use strict'
const filenamify = require('filenamify')
const get = require('lodash/get')
const { asyncMap } = require('@xen-orchestra/async-map')

View File

@@ -1,3 +1,5 @@
'use strict'
const groupBy = require('lodash/groupBy')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createHash } = require('crypto')

View File

@@ -1,5 +1,7 @@
#!/usr/bin/env node
'use strict'
require('./_composeCommands')({
'clean-vms': {
get main() {

View File

@@ -7,12 +7,12 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.13.0",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/backups": "^0.20.0",
"@xen-orchestra/fs": "^0.20.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.19.2"
"promise-toolbox": "^0.21.0"
},
"engines": {
"node": ">=7.10.1"
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.6.0",
"version": "0.7.0",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -1,6 +1,8 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const Disposable = require('promise-toolbox/Disposable.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const Disposable = require('promise-toolbox/Disposable')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')

View File

@@ -1,3 +1,5 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
exports.DurablePartition = class DurablePartition {

View File

@@ -1,3 +1,5 @@
'use strict'
const assert = require('assert')
const { formatFilenameDate } = require('./_filenameDate.js')
@@ -6,9 +8,9 @@ const { Task } = require('./Task.js')
const { watchStreamSize } = require('./_watchStreamSize.js')
exports.ImportVmBackup = class ImportVmBackup {
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses } = {} }) {
constructor({ adapter, metadata, srUuid, xapi, settings: { newMacAddresses, mapVdisSrs } = {} }) {
this._adapter = adapter
this._importDeltaVmSettings = { newMacAddresses }
this._importDeltaVmSettings = { newMacAddresses, mapVdisSrs }
this._metadata = metadata
this._srUuid = srUuid
this._xapi = xapi

View File

@@ -1,21 +1,26 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const Disposable = require('promise-toolbox/Disposable.js')
const fromCallback = require('promise-toolbox/fromCallback.js')
const fromEvent = require('promise-toolbox/fromEvent.js')
const pDefer = require('promise-toolbox/defer.js')
const pump = require('pump')
const { basename, dirname, join, normalize, resolve } = require('path')
const Disposable = require('promise-toolbox/Disposable')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
const pDefer = require('promise-toolbox/defer')
const groupBy = require('lodash/groupBy.js')
const { dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
const { Constants, createVhdDirectoryFromStream, openVhd, VhdAbstract, VhdDirectory, VhdSynthetic } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile, isVhdFile } = require('./_backupType.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
@@ -67,58 +72,17 @@ const debounceResourceFactory = factory =>
}
class RemoteAdapter {
constructor(handler, { debounceResource = res => res, dirMode } = {}) {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression } = {}) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
}
get handler() {
return this._handler
}
async _deleteVhd(path) {
const handler = this._handler
const vhds = await asyncMapSettled(
await handler.list(dirname(path), {
filter: isVhdFile,
prependDir: true,
}),
async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
header: vhd.header,
path,
}
} catch (error) {
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
// they are probably inconsequent to the backup process and should not
// fail it.
warn(`BackupNg#_deleteVhd ${path}`, { error })
}
}
)
const base = basename(path)
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
if (child === undefined) {
await handler.unlink(path)
return 0
}
try {
const childPath = child.path
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
await handler.rename(path, childPath)
return mergedDataSize
} catch (error) {
handler.unlink(path).catch(warn)
throw error
}
}
async _findPartition(devicePath, partitionId) {
const partitions = await listPartitions(devicePath)
const partition = partitions.find(_ => _.id === partitionId)
@@ -128,9 +92,6 @@ class RemoteAdapter {
return partition
}
_getLvmLogicalVolumes = Disposable.factory(this._getLvmLogicalVolumes)
_getLvmLogicalVolumes = deduped(this._getLvmLogicalVolumes, (devicePath, pvId, vgName) => [devicePath, pvId, vgName])
_getLvmLogicalVolumes = debounceResourceFactory(this._getLvmLogicalVolumes)
async *_getLvmLogicalVolumes(devicePath, pvId, vgName) {
yield this._getLvmPhysicalVolume(devicePath, pvId && (await this._findPartition(devicePath, pvId)))
@@ -142,9 +103,6 @@ class RemoteAdapter {
}
}
_getLvmPhysicalVolume = Disposable.factory(this._getLvmPhysicalVolume)
_getLvmPhysicalVolume = deduped(this._getLvmPhysicalVolume, (devicePath, partition) => [devicePath, partition?.id])
_getLvmPhysicalVolume = debounceResourceFactory(this._getLvmPhysicalVolume)
async *_getLvmPhysicalVolume(devicePath, partition) {
const args = []
if (partition !== undefined) {
@@ -165,9 +123,6 @@ class RemoteAdapter {
}
}
_getPartition = Disposable.factory(this._getPartition)
_getPartition = deduped(this._getPartition, (devicePath, partition) => [devicePath, partition?.id])
_getPartition = debounceResourceFactory(this._getPartition)
async *_getPartition(devicePath, partition) {
const options = ['loop', 'ro']
@@ -220,7 +175,6 @@ class RemoteAdapter {
})
}
_usePartitionFiles = Disposable.factory(this._usePartitionFiles)
async *_usePartitionFiles(diskId, partitionId, paths) {
const path = yield this.getPartition(diskId, partitionId)
@@ -232,6 +186,22 @@ class RemoteAdapter {
return files
}
// check if we will be allowed to merge a a vhd created in this adapter
// with the vhd at path `path`
async isMergeableParent(packedParentUid, path) {
return await Disposable.use(openVhd(this.handler, path), vhd => {
// this baseUuid is not linked with this vhd
if (!vhd.footer.uuid.equals(packedParentUid)) {
return false
}
const isVhdDirectory = vhd instanceof VhdDirectory
return isVhdDirectory
? this.#useVhdDirectory() && this.#getCompressionType() === vhd.compressionType
: !this.#useVhdDirectory()
})
}
fetchPartitionFiles(diskId, partitionId, paths) {
const { promise, reject, resolve } = pDefer()
Disposable.use(
@@ -253,16 +223,9 @@ class RemoteAdapter {
async deleteDeltaVmBackups(backups) {
const handler = this._handler
let mergedDataSize = 0
await asyncMapSettled(backups, ({ _filename, vhds }) =>
Promise.all([
handler.unlink(_filename),
asyncMap(Object.values(vhds), async _ => {
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
}),
])
)
return mergedDataSize
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
}
async deleteMetadataBackup(backupId) {
@@ -292,22 +255,42 @@ class RemoteAdapter {
)
}
async deleteVmBackup(filename) {
const metadata = JSON.parse(String(await this._handler.readFile(filename)))
metadata._filename = filename
deleteVmBackup(file) {
return this.deleteVmBackups([file])
}
if (metadata.mode === 'delta') {
await this.deleteDeltaVmBackups([metadata])
} else if (metadata.mode === 'full') {
await this.deleteFullVmBackups([metadata])
} else {
throw new Error(`no deleter for backup mode ${metadata.mode}`)
async deleteVmBackups(files) {
const { delta, full, ...others } = groupBy(await asyncMap(files, file => this.readVmBackupMetadata(file)), 'mode')
const unsupportedModes = Object.keys(others)
if (unsupportedModes.length !== 0) {
throw new Error('no deleter for backup modes: ' + unsupportedModes.join(', '))
}
await Promise.all([
delta !== undefined && this.deleteDeltaVmBackups(delta),
full !== undefined && this.deleteFullVmBackups(full),
])
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
}
getDisk = Disposable.factory(this.getDisk)
getDisk = deduped(this.getDisk, diskId => [diskId])
getDisk = debounceResourceFactory(this.getDisk)
#getCompressionType() {
return this._vhdDirectoryCompression
}
#useVhdDirectory() {
return this.handler.type === 's3'
}
#useAlias() {
return this.#useVhdDirectory()
}
async *getDisk(diskId) {
const handler = this._handler
@@ -344,7 +327,6 @@ class RemoteAdapter {
// - `<partitionId>`: partitioned disk
// - `<pvId>/<vgName>/<lvName>`: LVM on a partitioned disk
// - `/<vgName>/lvName>`: LVM on a raw disk
getPartition = Disposable.factory(this.getPartition)
async *getPartition(diskId, partitionId) {
const devicePath = yield this.getDisk(diskId)
if (partitionId === undefined) {
@@ -361,13 +343,26 @@ class RemoteAdapter {
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
}
// if we use alias on this remote, we have to name the file alias.vhd
getVhdFileName(baseName) {
if (this.#useAlias()) {
return `${baseName}.alias.vhd`
}
return `${baseName}.vhd`
}
async listAllVmBackups() {
const handler = this._handler
const backups = { __proto__: null }
await asyncMap(await handler.list(BACKUP_DIR), async vmUuid => {
const vmBackups = await this.listVmBackups(vmUuid)
backups[vmUuid] = vmBackups
await asyncMap(await handler.list(BACKUP_DIR), async entry => {
// ignore hidden and lock files
if (entry[0] !== '.' && !entry.endsWith('.lock')) {
const vmBackups = await this.listVmBackups(entry)
if (vmBackups.length !== 0) {
backups[entry] = vmBackups
}
}
})
return backups
@@ -505,6 +500,25 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: 16,
compression: this.#getCompressionType(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {
await this.outputStream(path, input, { checksum, validator })
}
}
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
await this._handler.outputStream(path, input, {
checksum,
@@ -516,6 +530,52 @@ class RemoteAdapter {
})
}
async _createSyntheticStream(handler, paths) {
let disposableVhds = []
// if it's a path : open all hierarchy of parent
if (typeof paths === 'string') {
let vhd
let vhdPath = paths
do {
const disposable = await openVhd(handler, vhdPath)
vhd = disposable.value
disposableVhds.push(disposable)
vhdPath = resolveRelativeFromFile(vhdPath, vhd.header.parentUnicodeName)
} while (vhd.footer.diskType !== Constants.DISK_TYPES.DYNAMIC)
} else {
// only open the list of path given
disposableVhds = paths.map(path => openVhd(handler, path))
}
// I don't want the vhds to be disposed on return
// but only when the stream is done ( or failed )
const disposables = await Disposable.all(disposableVhds)
const vhds = disposables.value
let disposed = false
const disposeOnce = async () => {
if (!disposed) {
disposed = true
try {
await disposables.dispose()
} catch (error) {
warn('_createSyntheticStream: failed to dispose VHDs', { error })
}
}
}
const synthetic = new VhdSynthetic(vhds)
await synthetic.readHeaderAndFooter()
await synthetic.readBlockAllocationTable()
const stream = await synthetic.stream()
stream.on('end', disposeOnce)
stream.on('close', disposeOnce)
stream.on('error', disposeOnce)
return stream
}
async readDeltaVmBackup(metadata) {
const handler = this._handler
const { vbds, vdis, vhds, vifs, vm } = metadata
@@ -523,7 +583,7 @@ class RemoteAdapter {
const streams = {}
await asyncMapSettled(Object.keys(vdis), async id => {
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
streams[`${id}.vhd`] = await this._createSyntheticStream(handler, join(dir, vhds[id]))
})
return {
@@ -556,4 +616,30 @@ Object.assign(RemoteAdapter.prototype, {
isValidXva,
})
decorateMethodsWith(RemoteAdapter, {
_getLvmLogicalVolumes: compose([
Disposable.factory,
[deduped, (devicePath, pvId, vgName) => [devicePath, pvId, vgName]],
debounceResourceFactory,
]),
_getLvmPhysicalVolume: compose([
Disposable.factory,
[deduped, (devicePath, partition) => [devicePath, partition?.id]],
debounceResourceFactory,
]),
_getPartition: compose([
Disposable.factory,
[deduped, (devicePath, partition) => [devicePath, partition?.id]],
debounceResourceFactory,
]),
_usePartitionFiles: Disposable.factory,
getDisk: compose([Disposable.factory, [deduped, diskId => [diskId]], debounceResourceFactory]),
getPartition: Disposable.factory,
})
exports.RemoteAdapter = RemoteAdapter

View File

@@ -1,3 +1,5 @@
'use strict'
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup.js')

View File

@@ -1,4 +1,6 @@
const CancelToken = require('promise-toolbox/CancelToken.js')
'use strict'
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const logAfterEnd = () => {
@@ -7,6 +9,8 @@ const logAfterEnd = () => {
const noop = Function.prototype
const serializeErrors = errors => (Array.isArray(errors) ? errors.map(serializeError) : errors)
// Create a serializable object from an error.
//
// Otherwise some fields might be non-enumerable and missing from logs.
@@ -15,6 +19,7 @@ const serializeError = error =>
? {
...error, // Copy enumerable properties.
code: error.code,
errors: serializeErrors(error.errors), // supports AggregateError
message: error.message,
name: error.name,
stack: error.stack,

View File

@@ -1,3 +1,5 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')

View File

@@ -1,10 +1,14 @@
'use strict'
const assert = require('assert')
const findLast = require('lodash/findLast.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const keyBy = require('lodash/keyBy.js')
const mapValues = require('lodash/mapValues.js')
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { formatDateTime } = require('@xen-orchestra/xapi')
@@ -20,6 +24,13 @@ const { watchStreamSize } = require('./_watchStreamSize.js')
const { debug, warn } = createLogger('xo:backups:VmBackup')
class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}
const asyncEach = async (iterable, fn, thisArg = iterable) => {
for (const item of iterable) {
await fn.call(thisArg, item)
@@ -33,8 +44,14 @@ const forkDeltaExport = deltaExport =>
},
})
exports.VmBackup = class VmBackup {
class VmBackup {
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
if (vm.other_config['xo:backup:job'] === job.id && 'start' in vm.blocked_operations) {
// don't match replicated VMs created by this very job otherwise they
// will be replicated again and again
throw new Error('cannot backup a VM created by this very job')
}
this.config = config
this.job = job
this.remoteAdapters = remoteAdapters
@@ -118,16 +135,18 @@ exports.VmBackup = class VmBackup {
return
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await fn(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
}
})
if (writers.size === 0) {
throw new Error('all targets have failed, step: ' + warnMessage)
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
}
}
@@ -284,17 +303,28 @@ exports.VmBackup = class VmBackup {
}
async _removeUnusedSnapshots() {
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
const { scheduleId } = this
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
const jobSettings = this.job.settings
const baseVmRef = this._baseVm?.$ref
const { config } = this
const baseSettings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...jobSettings[''],
}
const snapshotsPerSchedule = groupBy(this._jobSnapshots, _ => _.other_config['xo:backup:schedule'])
const xapi = this._xapi
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
return xapi.VM_destroy($ref)
await asyncMap(Object.entries(snapshotsPerSchedule), ([scheduleId, snapshots]) => {
const settings = {
...baseSettings,
...jobSettings[scheduleId],
...jobSettings[this.vm.uuid],
}
return asyncMap(getOldEntries(settings.snapshotRetention, snapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
return xapi.VM_destroy($ref)
}
})
})
}
@@ -303,12 +333,14 @@ exports.VmBackup = class VmBackup {
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
if (baseVm === undefined) {
debug('no base VM found')
return
}
const fullInterval = this._settings.fullInterval
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
debug('not using base VM becaust fullInterval reached')
return
}
@@ -319,10 +351,17 @@ exports.VmBackup = class VmBackup {
const baseUuidToSrcVdi = new Map()
await asyncMap(await baseVm.$getDisks(), async baseRef => {
const snapshotOf = await xapi.getField('VDI', baseRef, 'snapshot_of')
const [baseUuid, snapshotOf] = await Promise.all([
xapi.getField('VDI', baseRef, 'uuid'),
xapi.getField('VDI', baseRef, 'snapshot_of'),
])
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
baseUuidToSrcVdi.set(baseUuid, srcVdi)
} else {
debug('ignore snapshot VDI because no longer present on VM', {
vdi: baseUuid,
})
}
})
@@ -333,9 +372,23 @@ exports.VmBackup = class VmBackup {
false
)
if (presentBaseVdis.size === 0) {
debug('no base VM found')
return
}
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (!presentBaseVdis.has(baseUuid)) {
if (presentBaseVdis.has(baseUuid)) {
debug('found base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
} else {
debug('missing base VDI', {
base: baseUuid,
vdi: srcVdi.uuid,
})
fullVdisRequired.add(srcVdi.uuid)
}
})
@@ -344,7 +397,6 @@ exports.VmBackup = class VmBackup {
this._fullVdisRequired = fullVdisRequired
}
run = defer(this.run)
async run($defer) {
const settings = this._settings
assert(
@@ -392,3 +444,8 @@ exports.VmBackup = class VmBackup {
}
}
}
exports.VmBackup = VmBackup
decorateMethodsWith(VmBackup, {
run: defer,
})

View File

@@ -1,3 +1,5 @@
'use strict'
const { asyncMap } = require('@xen-orchestra/async-map')
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter.js')

View File

@@ -1,3 +1,5 @@
'use strict'
exports.isMetadataFile = filename => filename.endsWith('.json')
exports.isVhdFile = filename => filename.endsWith('.vhd')
exports.isXvaFile = filename => filename.endsWith('.xva')

View File

@@ -1,11 +1,14 @@
'use strict'
require('@xen-orchestra/log/configure.js').catchGlobalErrors(
require('@xen-orchestra/log').createLogger('xo:backups:worker')
)
const Disposable = require('promise-toolbox/Disposable.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const Disposable = require('promise-toolbox/Disposable')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { compose } = require('@vates/compose')
const { createDebounceResource } = require('@vates/disposable/debounceResource.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { deduped } = require('@vates/disposable/deduped.js')
const { getHandler } = require('@xen-orchestra/fs')
const { parseDuration } = require('@vates/parse-duration')
@@ -58,11 +61,6 @@ class BackupWorker {
}).run()
}
getAdapter = Disposable.factory(this.getAdapter)
getAdapter = deduped(this.getAdapter, remote => [remote.url])
getAdapter = compose(this.getAdapter, function (resource) {
return this.debounceResource(resource)
})
async *getAdapter(remote) {
const handler = getHandler(remote, this.#remoteOptions)
await handler.sync()
@@ -70,17 +68,13 @@ class BackupWorker {
yield new RemoteAdapter(handler, {
debounceResource: this.debounceResource,
dirMode: this.#config.dirMode,
vhdDirectoryCompression: this.#config.vhdDirectoryCompression,
})
} finally {
await handler.forget()
}
}
getXapi = Disposable.factory(this.getXapi)
getXapi = deduped(this.getXapi, ({ url }) => [url])
getXapi = compose(this.getXapi, function (resource) {
return this.debounceResource(resource)
})
async *getXapi({ credentials: { username: user, password }, ...opts }) {
const xapi = new Xapi({
...this.#xapiOptions,
@@ -102,6 +96,30 @@ class BackupWorker {
}
}
decorateMethodsWith(BackupWorker, {
getAdapter: compose([
Disposable.factory,
[deduped, remote => [remote.url]],
[
compose,
function (resource) {
return this.debounceResource(resource)
},
],
]),
getXapi: compose([
Disposable.factory,
[deduped, xapi => [xapi.url]],
[
compose,
function (resource) {
return this.debounceResource(resource)
},
],
]),
})
// Received message:
//
// Message {

View File

@@ -1,5 +1,7 @@
const cancelable = require('promise-toolbox/cancelable.js')
const CancelToken = require('promise-toolbox/CancelToken.js')
'use strict'
const cancelable = require('promise-toolbox/cancelable')
const CancelToken = require('promise-toolbox/CancelToken')
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
//

View File

@@ -0,0 +1,439 @@
'use strict'
/* eslint-env jest */
const rimraf = require('rimraf')
const tmp = require('tmp')
const fs = require('fs-extra')
const { getHandler } = require('@xen-orchestra/fs')
const { pFromCallback } = require('promise-toolbox')
const crypto = require('crypto')
const { RemoteAdapter } = require('./RemoteAdapter')
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')
let tempDir, adapter, handler, jobId, vdiId, basePath
jest.setTimeout(60000)
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
handler = getHandler({ url: `file://${tempDir}` })
await handler.sync()
adapter = new RemoteAdapter(handler)
jobId = uniqueId()
vdiId = uniqueId()
basePath = `vdis/${jobId}/${vdiId}`
await fs.mkdirp(`${tempDir}/${basePath}`)
})
afterEach(async () => {
await pFromCallback(cb => rimraf(tempDir, cb))
await handler.forget()
})
const uniqueId = () => crypto.randomBytes(16).toString('hex')
async function generateVhd(path, opts = {}) {
let vhd
let dataPath = path
if (opts.useAlias) {
await handler.mkdir(dirname(path) + '/data/')
dataPath = dirname(path) + '/data/' + basename(path)
}
if (opts.mode === 'directory') {
await handler.mkdir(dataPath)
vhd = new VhdDirectory(handler, dataPath)
} else {
const fd = await handler.openFile(dataPath, 'wx')
vhd = new VhdFile(handler, fd)
}
vhd.header = { ...VHDHEADER, ...opts.header }
vhd.footer = { ...VHDFOOTER, ...opts.footer }
vhd.footer.uuid = Buffer.from(crypto.randomBytes(16))
if (vhd.header.parentUnicodeName) {
vhd.footer.diskType = Constants.DISK_TYPES.DIFFERENCING
} else {
vhd.footer.diskType = Constants.DISK_TYPES.DYNAMIC
}
if (opts.useAlias === true) {
await VhdAbstract.createAlias(handler, path + '.alias.vhd', dataPath)
}
await vhd.writeBlockAllocationTable()
await vhd.writeHeader()
await vhd.writeFooter()
return vhd
}
test('It remove broken vhd', async () => {
// todo also tests a directory and an alias
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const onLog = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm('/', { remove: true, onLog })
expect((await handler.list(basePath)).length).toEqual(0)
})
test('it remove vhd with missing or multiple ancestors', async () => {
// one with a broken parent
await generateVhd(`${basePath}/abandonned.vhd`, {
header: {
parentUnicodeName: 'gone.vhd',
parentUid: Buffer.from(crypto.randomBytes(16)),
},
})
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
// clean
let loggued = ''
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
// create a metadata file marking child and orphan as ok
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
],
})
)
await generateVhd(`${basePath}/abandonned.vhd`)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
let loggued = ''
const onLog = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
test('it merges delta of non destroyed chain', async () => {
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 12000, // a size too small
vhds: [
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
// a grand child
await generateVhd(`${basePath}/grandchild.vhd`, {
header: {
parentUnicodeName: 'child.vhd',
parentUid: child.footer.uuid,
},
})
let loggued = []
const onLog = message => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(loggued[1]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
const [unused, merging] = loggued
expect(unused).toEqual(`the parent /${basePath}/orphan.vhd of the child /${basePath}/child.vhd is unused`)
expect(merging).toEqual(`merging /${basePath}/child.vhd into /${basePath}/orphan.vhd`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
expect(metadata.size).toEqual(209920)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(2)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
})
test('it finish unterminated merge ', async () => {
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
})
)
// one orphan, which is a full vhd, no parent
const orphan = await generateVhd(`${basePath}/orphan.vhd`)
// a child to the orphan
const child = await generateVhd(`${basePath}/child.vhd`, {
header: {
parentUnicodeName: 'orphan.vhd',
parentUid: orphan.footer.uuid,
},
})
// a merge in progress file
await handler.writeFile(
`${basePath}/.orphan.vhd.merge.json`,
JSON.stringify({
parent: {
header: orphan.header.checksum,
},
child: {
header: child.header.checksum,
},
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(1)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
})
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
// the message an resulting files should be identical to the output with vhd files which is tested independantly
describe('tests multiple combination ', () => {
for (const useAlias of [true, false]) {
for (const vhdMode of ['file', 'directory']) {
test(`alias : ${useAlias}, mode: ${vhdMode}`, async () => {
// a broken VHD
if (useAlias) {
await handler.mkdir(basePath + '/data')
}
const brokenVhdDataPath = basePath + (useAlias ? '/data/broken.vhd' : '/broken.vhd')
if (vhdMode === 'directory') {
await handler.mkdir(brokenVhdDataPath)
} else {
await handler.writeFile(brokenVhdDataPath, 'notreallyavhd')
}
if (useAlias) {
await VhdAbstract.createAlias(handler, 'broken.alias.vhd', brokenVhdDataPath)
}
// a vhd non referenced in metada
await generateVhd(`${basePath}/nonreference.vhd`, { useAlias, mode: vhdMode })
// an abandonded delta vhd without its parent
await generateVhd(`${basePath}/abandonned.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'gone.vhd',
parentUid: crypto.randomBytes(16),
},
})
// an ancestor of a vhd present in metadata
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
useAlias,
mode: vhdMode,
})
const child = await generateVhd(`${basePath}/child.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'ancestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: ancestor.footer.uuid,
},
})
// a grand child vhd in metadata
await generateVhd(`${basePath}/grandchild.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'child.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: child.footer.uuid,
},
})
// an older parent that was merging in clean
const cleanAncestor = await generateVhd(`${basePath}/cleanAncestor.vhd`, {
useAlias,
mode: vhdMode,
})
// a clean vhd in metadata
const clean = await generateVhd(`${basePath}/clean.vhd`, {
useAlias,
mode: vhdMode,
header: {
parentUnicodeName: 'cleanAncestor.vhd' + (useAlias ? '.alias.vhd' : ''),
parentUid: cleanAncestor.footer.uuid,
},
})
await handler.writeFile(
`${basePath}/.cleanAncestor.vhd${useAlias ? '.alias.vhd' : ''}.merge.json`,
JSON.stringify({
parent: {
header: cleanAncestor.header.checksum,
},
child: {
header: clean.header.checksum,
},
})
)
// the metadata file
await handler.writeFile(
`metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
],
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged
// grand child and clean vhd should not have changed
const survivors = await handler.list(basePath)
// console.log(survivors)
if (useAlias) {
const dataSurvivors = await handler.list(basePath + '/data')
// the goal of the alias : do not move a full folder
expect(dataSurvivors).toContain('ancestor.vhd')
expect(dataSurvivors).toContain('grandchild.vhd')
expect(dataSurvivors).toContain('cleanAncestor.vhd')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(4) // the 3 ok + data
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
} else {
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
expect(survivors).toContain('grandchild.vhd')
expect(survivors.length).toEqual(3)
}
})
}
}
})
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true })
expect(await handler.list(basePath)).toEqual([])
})
test('check Aliases should work alone', async () => {
await handler.mkdir('vhds')
await handler.mkdir('vhds/data')
await generateVhd(`vhds/data/ok.vhd`)
await VhdAbstract.createAlias(handler, 'vhds/ok.alias.vhd', 'vhds/data/ok.vhd')
await VhdAbstract.createAlias(handler, 'vhds/missingData.alias.vhd', 'vhds/data/nonexistent.vhd')
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
expect(alias.length).toEqual(1)
const data = await handler.list('vhds/data')
expect(data.length).toEqual(1)
})

View File

@@ -1,17 +1,41 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const { asyncMap } = require('@xen-orchestra/async-map')
const { default: Vhd, mergeVhd } = require('vhd-lib')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants.js')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(vhds) {
return vhds.every(vhd => vhd instanceof VhdFile)
}
const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is an array of VHDs from child to parent
//
// the whole chain will be merged into parent, parent will be renamed to child
// and all the others will deleted
const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
assert(chain.length >= 2)
let child = chain[0]
@@ -44,7 +68,7 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, {
}
}, 10e3)
await mergeVhd(
const mergedSize = await mergeVhd(
handler,
parent,
handler,
@@ -61,26 +85,28 @@ const mergeVhdChain = limitConcurrency(1)(async function mergeVhdChain(chain, {
)
clearInterval(handle)
await Promise.all([
handler.rename(parent, child),
VhdAbstract.rename(handler, parent, child),
asyncMap(children.slice(0, -1), child => {
onLog(`the VHD ${child} is unused`)
if (remove) {
onLog(`deleting unused VHD ${child}`)
return handler.unlink(child)
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
}
})
}
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^(?:(.+)\/)?\.(.+)\.merge.json$/
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir) => {
const vhds = []
const interruptedVhds = new Set()
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
await asyncMap(
await handler.list(`${vmDir}/vdis`, {
@@ -95,84 +121,166 @@ const listVhds = async (handler, vmDir) => {
async vdiDir => {
const list = await handler.list(vdiDir, {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
prependDir: true,
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.push(file)
vhds.add(`${vdiDir}/${file}`)
} else {
const [, dir, file] = res
interruptedVhds.add(`${dir}/${file}`)
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
}
})
}
)
)
return { vhds, interruptedVhds }
return { vhds, interruptedVhds, aliases }
}
exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, onLog = noop }) {
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
const aliasFound = []
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
if (!isVhdFile(target)) {
onLog(`Alias ${path} references a non vhd target: ${target}`)
if (remove) {
await handler.unlink(target)
await handler.unlink(path)
}
continue
}
try {
const { dispose } = await openVhd(handler, target)
try {
await dispose()
} catch (e) {
// error during dispose should not trigger a deletion
}
} catch (error) {
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
if (remove) {
try {
await VhdAbstract.unlink(handler, path)
} catch (e) {
if (e.code !== 'ENOENT') {
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
}
}
}
continue
}
aliasFound.push(resolve('/', target))
}
const entries = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
onLog(`the Vhd ${entry} is not referenced by a an alias`)
if (remove) {
await VhdAbstract.unlink(handler, entry)
}
}
})
}
exports.checkAliases = checkAliases
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
) {
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const handler = this._handler
const vhds = new Set()
const vhdsToJSons = new Set()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const vhdsList = await listVhds(handler, vmDir)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
// remove broken VHDs
await asyncMap(vhdsList.vhds, async path => {
await asyncMap(vhds, async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter(!vhdsList.interruptedVhds.has(path))
vhds.add(path)
if (vhd.footer.diskType === DISK_TYPE_DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
await Disposable.use(openVhd(handler, path, { checkSecondFooter: !interruptedVhds.has(path) }), vhd => {
if (vhd.footer.diskType === DISK_TYPES.DIFFERENCING) {
const parent = resolve('/', dirname(path), vhd.header.parentUnicodeName)
vhdParents[path] = parent
if (parent in vhdChildren) {
const error = new Error('this script does not support multiple VHD children')
error.parent = parent
error.child1 = vhdChildren[parent]
error.child2 = path
throw error // should we throw?
}
vhdChildren[parent] = path
}
vhdChildren[parent] = path
}
})
} catch (error) {
vhds.delete(path)
onLog(`error while checking the VHD with path ${path}`, { error })
if (error?.code === 'ERR_ASSERTION' && remove) {
onLog(`deleting broken ${path}`)
await handler.unlink(path)
return VhdAbstract.unlink(handler, path)
}
}
})
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const statePath = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
onLog('orphan merge state', {
mergeStatePath: statePath,
missingVhdPath: interruptedVhd,
})
if (remove) {
onLog(`deleting orphan merge state ${statePath}`)
await handler.unlink(statePath)
}
}
}
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
})
// remove VHDs with missing ancestors
{
const deletions = []
// return true if the VHD has been deleted or is missing
const deleteIfOrphan = vhd => {
const parent = vhdParents[vhd]
const deleteIfOrphan = vhdPath => {
const parent = vhdParents[vhdPath]
if (parent === undefined) {
return
}
// no longer needs to be checked
delete vhdParents[vhd]
delete vhdParents[vhdPath]
deleteIfOrphan(parent)
if (!vhds.has(parent)) {
vhds.delete(vhd)
vhds.delete(vhdPath)
onLog(`the parent ${parent} of the VHD ${vhd} is missing`)
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
if (remove) {
onLog(`deleting orphan VHD ${vhd}`)
deletions.push(handler.unlink(vhd))
onLog(`deleting orphan VHD ${vhdPath}`)
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
}
@@ -188,7 +296,7 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
await Promise.all(deletions)
}
const jsons = []
const jsons = new Set()
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -196,7 +304,7 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
})
entries.forEach(path => {
if (isMetadataFile(path)) {
jsons.push(path)
jsons.add(path)
} else if (isXvaFile(path)) {
xvas.add(path)
} else if (isXvaSumFile(path)) {
@@ -218,22 +326,25 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
// compile the list of unused XVAs and VHDs, and remove backup metadata which
// reference a missing XVA/VHD
await asyncMap(jsons, async json => {
const metadata = JSON.parse(await handler.readFile(json))
let metadata
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
onLog(`failed to read metadata file ${json}`, { error })
jsons.delete(json)
return
}
const { mode } = metadata
let size
if (mode === 'full') {
const linkedXva = resolve('/', vmDir, metadata.xva)
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
size = await handler.getSize(linkedXva).catch(error => {
onLog(`failed to get size of ${json}`, { error })
})
} else {
onLog(`the XVA linked to the metadata ${json} is missing`)
if (remove) {
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
await handler.unlink(json)
}
}
@@ -243,42 +354,29 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
return Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
})()
const missingVhds = linkedVhds.filter(_ => !vhds.has(_))
// FIXME: find better approach by keeping as much of the backup as
// possible (existing disks) even if one disk is missing
if (linkedVhds.every(_ => vhds.has(_))) {
if (missingVhds.length === 0) {
linkedVhds.forEach(_ => unusedVhds.delete(_))
size = await asyncMap(linkedVhds, vhd => handler.getSize(vhd)).then(sum, error => {
onLog(`failed to get size of ${json}`, { error })
linkedVhds.forEach(path => {
vhdsToJSons[path] = json
})
} else {
onLog(`Some VHDs linked to the metadata ${json} are missing`)
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
if (remove) {
onLog(`deleting incomplete backup ${json}`)
jsons.delete(json)
await handler.unlink(json)
}
}
}
const metadataSize = metadata.size
if (size !== undefined && metadataSize !== size) {
onLog(`incorrect size in metadata: ${metadataSize ?? 'none'} instead of ${size}`)
// don't update if the the stored size is greater than found files,
// it can indicates a problem
if (fixMetadata && (metadataSize === undefined || metadataSize < size)) {
try {
metadata.size = size
await handler.writeFile(json, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${json}`, { error })
}
}
}
})
// TODO: parallelize by vm/job/vdi
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// ancestor
@@ -312,7 +410,7 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
onLog(`the VHD ${vhd} is unused`)
if (remove) {
onLog(`deleting unused VHD ${vhd}`)
unusedVhdsDeletion.push(handler.unlink(vhd))
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -321,22 +419,31 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
})
// merge interrupted VHDs
if (merge) {
vhdsList.interruptedVhds.forEach(parent => {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
})
for (const parent of interruptedVhds.keys()) {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
}
Object.keys(vhdChainsToMerge).forEach(key => {
const chain = vhdChainsToMerge[key]
Object.values(vhdChainsToMerge).forEach(chain => {
if (chain !== undefined) {
unusedVhdsDeletion.push(mergeVhdChain(chain, { handler, onLog, remove, merge }))
toMerge.push(chain)
}
})
}
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
}
await Promise.all([
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
onLog(`the XVA ${path} is unused`)
if (remove) {
@@ -355,4 +462,55 @@ exports.cleanVm = async function cleanVm(vmDir, { fixMetadata, remove, merge, on
}
}),
])
// update size for delta metadata with merged VHD
// check for the other that the size is the same as the real file size
await asyncMap(jsons, async metadataPath => {
const metadata = JSON.parse(await handler.readFile(metadataPath))
let fileSystemSize
const merged = metadataWithMergedVhd[metadataPath] !== undefined
const { mode, size, vhds, xva } = metadata
try {
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
fileSystemSize = await handler.getSize(linkedXva)
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
// the size is not computed in some cases (e.g. VhdDirectory)
if (fileSystemSize === undefined) {
return
}
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
}
}
} catch (error) {
onLog(`failed to get size of ${metadataPath}`, { error })
return
}
// systematically update size after a merge
if ((merged || fixMetadata) && size !== fileSystemSize) {
metadata.size = fileSystemSize
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
}
}
})
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,
}
}

View File

@@ -1,7 +1,9 @@
'use strict'
const compareVersions = require('compare-versions')
const find = require('lodash/find.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
@@ -17,6 +19,14 @@ const TAG_COPY_SRC = 'xo:copy_of'
exports.TAG_COPY_SRC = TAG_COPY_SRC
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
const resolveUuid = async (xapi, cache, uuid, type) => {
let ref = cache.get(uuid)
if (ref === undefined) {
ref = await xapi.call(`${type}.get_by_uuid`, uuid)
cache.set(uuid, ref)
}
return ref
}
exports.exportDeltaVm = async function exportDeltaVm(
vm,
@@ -165,6 +175,12 @@ exports.importDeltaVm = defer(async function importDeltaVm(
}
}
const cache = new Map()
const mapVdisSrRefs = {}
for (const [vdiUuid, srUuid] of Object.entries(mapVdisSrs)) {
mapVdisSrRefs[vdiUuid] = await resolveUuid(xapi, cache, srUuid, 'SR')
}
const baseVdis = {}
baseVm &&
baseVm.$VBDs.forEach(vbd => {
@@ -188,7 +204,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
sr: mapVdisSrs[vdi.uuid] ?? sr.$ref,
sr: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => suspendVdi.$destroy())
@@ -255,7 +271,7 @@ exports.importDeltaVm = defer(async function importDeltaVm(
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
SR: mapVdisSrs[vdi.uuid] ?? sr.$ref,
SR: mapVdisSrRefs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => newVdi.$destroy())

View File

@@ -1,3 +1,5 @@
'use strict'
exports.extractIdsFromSimplePattern = function extractIdsFromSimplePattern(pattern) {
if (pattern === undefined) {
return []

View File

@@ -1,3 +1,5 @@
'use strict'
const { utcFormat, utcParse } = require('d3-time-format')
// Format a date in ISO 8601 in a safe way to be used in filenames

View File

@@ -1,3 +1,5 @@
'use strict'
const eos = require('end-of-stream')
const { PassThrough } = require('stream')

View File

@@ -1,3 +1,5 @@
'use strict'
// returns all entries but the last retention-th
exports.getOldEntries = function getOldEntries(retention, entries) {
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries

View File

@@ -1,4 +1,6 @@
const Disposable = require('promise-toolbox/Disposable.js')
'use strict'
const Disposable = require('promise-toolbox/Disposable')
const { join } = require('path')
const { mkdir, rmdir } = require('fs-extra')
const { tmpdir } = require('os')

View File

@@ -1,3 +1,5 @@
'use strict'
const BACKUP_DIR = 'xo-vm-backups'
exports.BACKUP_DIR = BACKUP_DIR

View File

@@ -1,11 +1,26 @@
'use strict'
const assert = require('assert')
const isGzipFile = async (handler, fd) => {
const COMPRESSED_MAGIC_NUMBERS = [
// https://tools.ietf.org/html/rfc1952.html#page-5
const magicNumber = Buffer.allocUnsafe(2)
Buffer.from('1F8B', 'hex'),
assert.strictEqual((await handler.read(fd, magicNumber, 0)).bytesRead, magicNumber.length)
return magicNumber[0] === 31 && magicNumber[1] === 139
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#zstandard-frames
Buffer.from('28B52FFD', 'hex'),
]
const MAGIC_NUMBER_MAX_LENGTH = Math.max(...COMPRESSED_MAGIC_NUMBERS.map(_ => _.length))
const isCompressedFile = async (handler, fd) => {
const header = Buffer.allocUnsafe(MAGIC_NUMBER_MAX_LENGTH)
assert.strictEqual((await handler.read(fd, header, 0)).bytesRead, header.length)
for (const magicNumber of COMPRESSED_MAGIC_NUMBERS) {
if (magicNumber.compare(header, 0, magicNumber.length) === 0) {
return true
}
}
return false
}
// TODO: better check?
@@ -43,8 +58,8 @@ async function isValidXva(path) {
return false
}
return (await isGzipFile(handler, fd))
? true // gzip files cannot be validated at this time
return (await isCompressedFile(handler, fd))
? true // compressed files cannot be validated at this time
: await isValidTar(handler, size, fd)
} finally {
handler.closeFile(fd).catch(noop)

View File

@@ -1,4 +1,6 @@
const fromCallback = require('promise-toolbox/fromCallback.js')
'use strict'
const fromCallback = require('promise-toolbox/fromCallback')
const { createLogger } = require('@xen-orchestra/log')
const { createParser } = require('parse-pairs')
const { execFile } = require('child_process')

View File

@@ -1,4 +1,6 @@
const fromCallback = require('promise-toolbox/fromCallback.js')
'use strict'
const fromCallback = require('promise-toolbox/fromCallback')
const { createParser } = require('parse-pairs')
const { execFile } = require('child_process')

View File

@@ -1,3 +1,5 @@
'use strict'
exports.watchStreamSize = function watchStreamSize(stream, container = { size: 0 }) {
stream.on('data', data => {
container.size += data.length

View File

@@ -1,3 +1,5 @@
'use strict'
const mapValues = require('lodash/mapValues.js')
const { dirname } = require('path')

View File

@@ -0,0 +1,90 @@
#!/usr/bin/env node
'use strict'
const { catchGlobalErrors } = require('@xen-orchestra/log/configure.js')
const { createLogger } = require('@xen-orchestra/log')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { join } = require('path')
const Disposable = require('promise-toolbox/Disposable')
const min = require('lodash/min')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { RemoteAdapter } = require('../RemoteAdapter.js')
const { CLEAN_VM_QUEUE } = require('./index.js')
// -------------------------------------------------------------------
catchGlobalErrors(createLogger('xo:backups:mergeWorker'))
const { fatal, info, warn } = createLogger('xo:backups:mergeWorker')
// -------------------------------------------------------------------
const main = Disposable.wrap(async function* main(args) {
const handler = yield getSyncedHandler({ url: 'file://' + process.cwd() })
yield handler.lock(CLEAN_VM_QUEUE)
const adapter = new RemoteAdapter(handler)
const listRetry = async () => {
const timeoutResolver = resolve => setTimeout(resolve, 10e3)
for (let i = 0; i < 10; ++i) {
const entries = await handler.list(CLEAN_VM_QUEUE)
if (entries.length !== 0) {
return entries
}
await new Promise(timeoutResolver)
}
}
let taskFiles
while ((taskFiles = await listRetry()) !== undefined) {
const taskFileBasename = min(taskFiles)
const previousTaskFile = join(CLEAN_VM_QUEUE, taskFileBasename)
const taskFile = join(CLEAN_VM_QUEUE, '_' + taskFileBasename)
// move this task to the end
try {
await handler.rename(previousTaskFile, taskFile)
} catch (error) {
// this error occurs if the task failed too many times (i.e. too many `_` prefixes)
// there is nothing more that can be done
if (error.code === 'ENAMETOOLONG') {
await handler.unlink(previousTaskFile)
}
throw error
}
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {
throw error
}
}
handler.unlink(taskFile).catch(error => warn('deleting task failure', { error }))
} catch (error) {
warn('failure handling task', { error })
}
}
})
info('starting')
main(process.argv.slice(2)).then(
() => {
info('bye :-)')
},
error => {
fatal(error)
process.exit(1)
}
)

View File

@@ -0,0 +1,27 @@
'use strict'
const { join, resolve } = require('path')
const { spawn } = require('child_process')
const { check } = require('proper-lockfile')
const CLEAN_VM_QUEUE = (exports.CLEAN_VM_QUEUE = '/xo-vm-backups/.queue/clean-vm/')
const CLI_PATH = resolve(__dirname, 'cli.js')
exports.run = async function runMergeWorker(remotePath) {
try {
// TODO: find a way to pass the acquire the lock and then pass it down the worker
if (await check(join(remotePath, CLEAN_VM_QUEUE))) {
// already locked, don't start another worker
return
}
spawn(CLI_PATH, {
cwd: remotePath,
detached: true,
stdio: 'inherit',
}).unref()
} catch (error) {
// we usually don't want to throw if the merge worker failed to start
return error
}
}

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.13.0",
"version": "0.20.0",
"engines": {
"node": ">=14.6"
},
@@ -16,14 +16,15 @@
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/compose": "^2.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^1.0.0",
"@vates/disposable": "^0.1.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^0.18.0",
"@xen-orchestra/fs": "^0.20.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^3.6.0",
"compare-versions": "^4.0.1",
"d3-time-format": "^3.0.0",
"end-of-stream": "^1.4.4",
"fs-extra": "^10.0.0",
@@ -32,13 +33,14 @@
"lodash": "^4.17.20",
"node-zone": "^0.4.0",
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.19.2",
"pump": "^3.0.0",
"vhd-lib": "^1.2.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^8.3.2",
"vhd-lib": "^3.1.0",
"yazl": "^2.5.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.7.0"
"@xen-orchestra/xapi": "^0.9.0"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -1,3 +1,5 @@
'use strict'
const { DIR_XO_CONFIG_BACKUPS, DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter.js')
exports.parseMetadataBackupId = function parseMetadataBackupId(backupId) {

View File

@@ -1,3 +1,5 @@
'use strict'
const path = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { fork } = require('child_process')

View File

@@ -0,0 +1,94 @@
'use strict'
// a valid footer of a 2
exports.VHDFOOTER = {
cookie: 'conectix',
features: 2,
fileFormatVersion: 65536,
dataOffset: 512,
timestamp: 0,
creatorApplication: 'caml',
creatorVersion: 1,
creatorHostOs: 0,
originalSize: 53687091200,
currentSize: 53687091200,
diskGeometry: { cylinders: 25700, heads: 16, sectorsPerTrackCylinder: 255 },
diskType: 3,
checksum: 4294962945,
uuid: Buffer.from('d8dbcad85265421e8b298d99c2eec551', 'utf-8'),
saved: '',
hidden: '',
reserved: '',
}
exports.VHDHEADER = {
cookie: 'cxsparse',
dataOffset: undefined,
tableOffset: 2048,
headerVersion: 65536,
maxTableEntries: 25600,
blockSize: 2097152,
checksum: 4294964241,
parentUuid: null,
parentTimestamp: 0,
reserved1: 0,
parentUnicodeName: '',
parentLocatorEntry: [
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
{
platformCode: 0,
platformDataSpace: 0,
platformDataLength: 0,
reserved: 0,
platformDataOffset: 0,
},
],
reserved2: '',
}

View File

@@ -1,9 +1,11 @@
'use strict'
const assert = require('assert')
const map = require('lodash/map.js')
const mapValues = require('lodash/mapValues.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { asyncMap } = require('@xen-orchestra/async-map')
const { chainVhd, checkVhdChain, default: Vhd } = require('vhd-lib')
const { chainVhd, checkVhdChain, openVhd, VhdAbstract } = require('vhd-lib')
const { createLogger } = require('@xen-orchestra/log')
const { dirname } = require('path')
@@ -16,6 +18,7 @@ const { MixinBackupWriter } = require('./_MixinBackupWriter.js')
const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
@@ -23,6 +26,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
async checkBaseVdis(baseUuidToSrcVdi) {
const { handler } = this._adapter
const backup = this._backup
const adapter = this._adapter
const backupDir = getVmBackupDir(backup.vm.uuid)
const vdisDir = `${backupDir}/vdis/${backup.job.id}`
@@ -34,16 +38,21 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
await asyncMap(vhds, async path => {
try {
await checkVhdChain(handler, path)
// Warning, this should not be written as found = found || await adapter.isMergeableParent(packedBaseUuid, path)
//
// since all the checks of a path are done in parallel, found would be containing
// only the last answer of isMergeableParent which is probably not the right one
// this led to the support tickets https://help.vates.fr/#ticket/zoom/4751 , 4729, 4665 and 4300
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
found = found || vhd.footer.uuid.equals(packUuid(baseUuid))
const isMergeable = await adapter.isMergeableParent(packedBaseUuid, path)
found = found || isMergeable
} catch (error) {
warn('checkBaseVdis', { error })
await ignoreErrors.call(handler.unlink(path))
await ignoreErrors.call(VhdAbstract.unlink(handler, path))
}
})
} catch (error) {
@@ -113,19 +122,13 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
async _deleteOldEntries() {
return Task.run({ name: 'merge' }, async () => {
const adapter = this._adapter
const oldEntries = this._oldEntries
const adapter = this._adapter
const oldEntries = this._oldEntries
let size = 0
// delete sequentially from newest to oldest to avoid unnecessary merges
for (let i = oldEntries.length; i-- > 0; ) {
size += await adapter.deleteDeltaVmBackups([oldEntries[i]])
}
return {
size,
}
})
// delete sequentially from newest to oldest to avoid unnecessary merges
for (let i = oldEntries.length; i-- > 0; ) {
await adapter.deleteDeltaVmBackups([oldEntries[i]])
}
}
async _transfer({ timestamp, deltaExport, sizeContainers }) {
@@ -150,7 +153,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
// don't do delta for it
vdi.uuid
: vdi.$snapshot_of$uuid
}/${basename}.vhd`
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = `${backupDir}/${basename}.json`
@@ -194,7 +197,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
await adapter.outputStream(path, deltaExport.streams[`${id}.vhd`], {
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
@@ -206,11 +209,11 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
// set the correct UUID in the VHD
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
await Disposable.use(openVhd(handler, path), async vhd => {
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
})
})
)
return {

View File

@@ -1,5 +1,7 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { formatDateTime } = require('@xen-orchestra/xapi')
const { formatFilenameDate } = require('../_filenameDate.js')

View File

@@ -1,3 +1,5 @@
'use strict'
const { formatFilenameDate } = require('../_filenameDate.js')
const { getOldEntries } = require('../_getOldEntries.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')

View File

@@ -1,4 +1,6 @@
const ignoreErrors = require('promise-toolbox/ignoreErrors.js')
'use strict'
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { formatDateTime } = require('@xen-orchestra/xapi')

View File

@@ -1,3 +1,5 @@
'use strict'
const { AbstractWriter } = require('./_AbstractWriter.js')
exports.AbstractDeltaWriter = class AbstractDeltaWriter extends AbstractWriter {

View File

@@ -1,3 +1,5 @@
'use strict'
const { AbstractWriter } = require('./_AbstractWriter.js')
exports.AbstractFullWriter = class AbstractFullWriter extends AbstractWriter {

Some files were not shown because too many files have changed in this diff Show More