Compare commits

..

153 Commits

Author SHA1 Message Date
Nicolas Raynaud
6f19968456 try to fix big backups. 2021-03-12 13:42:22 +01:00
Nicolas Raynaud
89cc46f69e Merge branch 'master' into nr-s3-region-http 2021-03-11 16:46:12 +01:00
Julien Fontanet
aff874c68a chore(xo-server,xo-server-load-balancer): phase out mapToArray (#5662)
Use native `Array#map` or `Object.values` where possible and import directly from `lodash`.

Reasons:
- less dependencies
- more idiomatic
- better example for new code
2021-03-11 15:17:28 +01:00
Julien Fontanet
27abee0850 chore(xo-server-load-balancer): typo ressource → resource 2021-03-11 14:17:02 +01:00
Julien Fontanet
bcfb19f7c5 feat(normalize-packages): delete empty bin field 2021-03-11 12:15:53 +01:00
Nicolas Raynaud
d3ea469f46 add http and region parameters to S3 2021-03-10 13:58:57 +01:00
Julien Fontanet
306a8ce0df feat(@xen-orchestra/proxy): 0.11.5 2021-03-10 13:48:15 +01:00
Julien Fontanet
d9ea8d2c9c feat: release 5.56.1 2021-03-10 13:04:11 +01:00
Nicolas Raynaud
31faa34ca4 add http and region parameters to S3 2021-03-10 12:55:30 +01:00
Julien Fontanet
b479956bb2 feat: technical release (#5657) 2021-03-10 11:11:27 +01:00
Julien Fontanet
b32dc0e450 fix(xen-api/call): allow *.get_all_records in read only 2021-03-10 09:41:49 +01:00
badrAZ
5cca5d69af fix(@xen-orchestra/backups/Backup.js): fix rolling snapshot not ran if alone (#5656) 2021-03-09 18:24:58 +01:00
badrAZ
e0e89213d3 chore(xo-server): delete unused workers mixin (#5654)
Due to 0811da9014
2021-03-09 15:26:21 +01:00
Julien Fontanet
e246c19eb3 feat(xen-api/Ref): introduce new utils to manipulate refs (#5650)
Fixes xoa-support#3463

See xapi-project/xen-api#4338
2021-03-09 14:59:32 +01:00
badrAZ
d282d8dd52 fix(@xen-orchestra/backups): missing targets no longer prevent runs (#5651)
Fixes #5353
2021-03-09 14:37:52 +01:00
badrAZ
9601ad13ee fix(xo-web/proxies): fix "invalid parameters" error on canceling proxy deploy (#5649)
Issue: fetchProxyUpgrades called with an undefined proxy in the proxies collection.

Solution: Interrupt the deployment process on cancel, in order to not fetch updates in this case.
2021-03-09 10:37:59 +01:00
badrAZ
b7603e109d feat(xo-web/backup/new): ability to force full backup per schedule in case of CR (#5648)
Fixes #5541
2021-03-09 09:38:56 +01:00
Julien Fontanet
066f54906b chore: format with Prettier 2021-03-08 17:41:10 +01:00
Julien Fontanet
ea0aa9df70 chore(xen-api): disable problematic ESLint rules on specific lines 2021-03-08 14:23:57 +01:00
badrAZ
0811da9014 feat(xo-server): use @xen-orchestra/backups lib to run VM backups (#5642) 2021-03-08 14:05:41 +01:00
Julien Fontanet
d601290c46 fix(destroy VM): try harder to destroy VDIs (#5645)
Should fix #4926

Work-around XCP-ng/XenServer unmount from control-domain delay, especially with iSCSI SRs.

This issue impacts a lot XO backups which create snapshots, export them and delete them.
2021-03-08 09:46:51 +01:00
badrAZ
64357aff55 fix(@xen-orchestra/xapi): fix full VM backup imported as a template (#5646) 2021-03-05 17:41:50 +01:00
badrAZ
a20a3311b5 fix(xo-server/backup-ng): fix "xapi._assertHealthyVdiChains is not a function" error (#5647)
Introduced by 4c27562650
2021-03-05 17:33:24 +01:00
Julien Fontanet
ffce5d4bb5 chore(@xen-orchestra/xapi): make xen-api a peer dep 2021-03-05 16:17:02 +01:00
badrAZ
cbfadc019a fix(@xen-orchestra/backups): fix "asyncMapSettled is not a function" error (#5643) 2021-03-05 12:23:25 +01:00
Julien Fontanet
bf5427f3e8 feat(@xen-orchestra/proxy): 0.11.4 2021-03-05 11:39:06 +01:00
badrAZ
4c27562650 fix(xo-server/xapi): dont override @xen-orchestra/xapi#_assertHealthyVdiChain (#5641) 2021-03-05 11:38:49 +01:00
badrAZ
e8d20532ba feat(xo-server): use @xen-orchestra/backups lib to run metadata backups (#5616) 2021-03-05 10:51:00 +01:00
Mathieu
d928157569 fix(xo-web/vm/tab-network): an error has occurred when trying to sort empty network (#5639)
This issue happens when you have an ACL role on one VM, but you don't have an ACL role on the network of this VM.
2021-03-05 09:37:40 +01:00
Mathieu
872b05a7de feat(xo-server/VIF): set MAC address requires Admin ACL on network (#5631)
Fixes #4700
2021-03-05 09:27:22 +01:00
Julien Fontanet
6ea71ec6a2 chore(xapi/VM_destroy): add new lines 2021-03-04 16:11:08 +01:00
Julien Fontanet
139cb72209 chore(xapi/VM_destroy): use VM_getDisks 2021-03-04 16:11:08 +01:00
Julien Fontanet
855a15e696 fix(xapi/VM_getDisks): sync iteration 2021-03-04 16:11:08 +01:00
Mathieu
eeebd3fc1b fix(xo-web/DropdownButton): add required id prop (#5628)
See https://react-bootstrap.netlify.app/components/dropdowns/#dropdown-button-props
2021-03-04 15:56:01 +01:00
Julien Fontanet
a4b209c654 fix(disposable/deduped): race condition when disposed during acquisition
Introduced in 43aad3d11
2021-03-04 10:46:03 +01:00
Julien Fontanet
43aad3d117 feat(disposable/deduped): works with sync factories 2021-03-03 17:43:39 +01:00
Mathieu
f2d4fdd4d2 fix(xo-web/editable/number): throw error if onChange fails (#5634) 2021-03-03 16:33:41 +01:00
Julien Fontanet
a630106d80 feat(@xen-orchestra/backups): 0.6.1 2021-03-03 12:05:33 +01:00
Julien Fontanet
c7acd455c5 feat(@xen-orchestra/async-map): 0.1.2 2021-03-03 12:05:05 +01:00
Julien Fontanet
555a9d4883 fix(async-map/asyncMapSettled): issue when latest promise rejects 2021-03-03 11:58:51 +01:00
Julien Fontanet
ec4ce0c70c fix(async-map/test): missing await 2021-03-03 11:57:51 +01:00
Julien Fontanet
edf275badc fix(backups/RemoteAdapter#readDeltaVmBackup): new asyncMapSettled does not support plain objects
Introduced by 20377e9c56
2021-03-03 11:49:21 +01:00
Julien Fontanet
2e91285f02 feat(xo-web/debug): similar display for success and result 2021-03-03 10:06:36 +01:00
Julien Fontanet
ec69ba7e0e feat(xo-web/debug): display API call result 2021-03-03 10:06:03 +01:00
Julien Fontanet
3804ca18cb feat(@xen-orchestra/proxy): 0.11.3 2021-03-03 09:19:14 +01:00
Julien Fontanet
9ea3222da8 feat(@xen-orchestra/xapi): 0.4.3 2021-03-03 09:18:58 +01:00
Julien Fontanet
df48524ca5 feat(@xen-orchestra/async-map): 0.1.1 2021-03-03 09:17:08 +01:00
Julien Fontanet
b3aff1162c feat(@xen-orchestra/proxy): 0.11.2 2021-03-03 08:58:42 +01:00
Julien Fontanet
891ca8a31b feat(@xen-orchestra/xapi): 0.4.2 2021-03-03 08:58:30 +01:00
Julien Fontanet
ba99ac8b17 fix(package.json): support non-transpiled @xen-orchestra/* and xo-* 2021-03-02 16:53:02 +01:00
badrAZ
1ff25943dc fix(xo-server): enable async_hooks support in Bluebird (#5635)
Necessary for `@xen-orchestra/backups`.
2021-03-02 13:22:47 +01:00
Julien Fontanet
deb58e40d5 chore(xo-server/bin): format with Prettier 2021-03-02 11:39:42 +01:00
Julien Fontanet
eab6eb8fab chore(xen-api): event-to-promise → promise-toolbox/fromEvent 2021-03-02 10:20:38 +01:00
Julien Fontanet
ff65367851 chore(async-map): add basic tests 2021-03-01 17:25:08 +01:00
Julien Fontanet
f16e29c63e fix(async-map/asyncMapSettled): fix hasError condition 2021-03-01 17:22:05 +01:00
Julien Fontanet
cdfeb094b3 chore(async-map/package.json): remove unused browserslist 2021-03-01 17:01:48 +01:00
Julien Fontanet
b63c5d2987 chore: dont import @xen-orchestra/log/dist 2021-03-01 17:00:42 +01:00
Julien Fontanet
015309c882 chore(backups-cli): use @xen-orchestra/async-map 2021-03-01 16:55:06 +01:00
Julien Fontanet
20377e9c56 feat(async-map): new implementations
These implementations are no longer compatible with plain objects but support iterables.

The previous implementation is still available as `@xen-orchestra/async-map/legacy`.
2021-03-01 16:55:06 +01:00
Mathieu
08857a6198 fix(xo-server): fix asyncMap is not defined (#5632)
Introduced by 57612ee
2021-03-01 16:54:53 +01:00
badrAZ
d9ce1b3a97 feat(xo-server#importVmBackupNg): use @xen-orchestra/backups lib (#5630) 2021-03-01 13:36:23 +01:00
Julien Fontanet
d166073b16 chore(xo-server/package.json): fix deps sorting
Introduced by 624f32826

Sorting values is different than sorting JSON text.
2021-03-01 09:48:13 +01:00
Julien Fontanet
f858c196f4 chore: rename asyncMap → asyncMapSettled
To express more clearly this function's behavior.
2021-03-01 09:45:56 +01:00
Julien Fontanet
57612eeced feat(async-map): remove build step 2021-02-28 23:32:23 +01:00
Julien Fontanet
be2257153c feat(proxy-cli): clearer call headers 2021-02-26 17:50:47 +01:00
Julien Fontanet
d920a97f4f feat(proxy-cli): supports nested sequences 2021-02-26 17:23:49 +01:00
Julien Fontanet
322f2a1728 chore(xo-server/runJob): group runningJobs logic 2021-02-26 16:52:43 +01:00
Julien Fontanet
cfe6b0d9ab fix(xo-server/runJob): emit job:terminated and forward error
Introduced in fd560c351
2021-02-26 16:52:43 +01:00
Pierre Donias
e229deb238 feat: release 5.56.0 (#5629) 2021-02-26 16:50:00 +01:00
Julien Fontanet
8cdde947bc feat(@xen-orchestra/proxy): 0.11.1 2021-02-26 16:07:47 +01:00
Julien Fontanet
c1b3ddf87a fix(proxy): add missing peerdep xen-api 2021-02-26 16:07:47 +01:00
Pierre Donias
27d97add1e feat: technical release (#5627) 2021-02-26 16:07:15 +01:00
Mathieu
3783724c40 fix(xo-web/task): items-per-page dropdown position (#5584) 2021-02-26 15:21:45 +01:00
Julien Fontanet
67bc4ffe68 feat(@xen-orchestra/proxy): 0.11.0 2021-02-26 15:15:05 +01:00
Julien Fontanet
453bbfbbde feat(@xen-orchestra/backups): 0.6.0 2021-02-26 15:11:35 +01:00
Julien Fontanet
ff463c4261 chore(proxy-cli): mutualize request options 2021-02-26 15:10:42 +01:00
Damien Thenot
748b77ae7a fix(docs/advanced): typo telemtry → telemetry (#5625) 2021-02-26 14:57:11 +01:00
Rajaa.BARHTAOUI
58c1005657 fix(xo-web/migrateVms): explicit main SR (#5615)
See #5577

So that when there's no default SR on the pool and the VM's snapshot has orphan
VDI-snapshots, xo-server knows where to migrate them
2021-02-26 14:01:48 +01:00
Rajaa.BARHTAOUI
9271eb61ac fix(xo-web/vm/advanced): fix 'an error has occurred' (#5604)
Fixes #5592
2021-02-26 13:39:34 +01:00
Rajaa.BARHTAOUI
c82cee25a5 fix(xo-web): fix 'mapVdisSrs is assigned a value but never used' error (#5617)
Introduced by 90cafa126f
2021-02-26 13:34:33 +01:00
badrAZ
2e5dfa5845 fix(xo-server#deleteVmBackupNg): pass remote record to getBackupsRemoteAdapter instead of ID (#5624)
Introduced by baa5847949
2021-02-26 12:07:04 +01:00
Julien Fontanet
693c07b927 chore: update app-conf to 0.9.0 2021-02-26 12:02:39 +01:00
Julien Fontanet
71a6f70f46 chore: update promise-toolbox to 0.17.0
Allow using `Disposable.use()`.
2021-02-26 12:02:39 +01:00
badrAZ
2952b5a7ec feat(xo-server#deleteVmBackupNg): use @xen-orchestra/backups lib (#5623) 2021-02-26 11:40:28 +01:00
badrAZ
baa5847949 feat(xo-server#_listVmBackupsOnRemote): use @xen-orchestra/backups lib (#5622) 2021-02-26 11:17:45 +01:00
Rajaa.BARHTAOUI
b9ce0bd99d fix(xo-web): fix 'mapValues is defined but never used' error (#5618)
Introduced by 062fb3ba30
2021-02-26 10:11:04 +01:00
Julien Fontanet
aac61d8120 chore: update golike-defer to 0.5.1 2021-02-25 18:41:11 +01:00
Julien Fontanet
1f6edfdbcc fix(xo-server/runJob): upgrade defer to fix import
Introduced by fd560c351
2021-02-25 18:40:55 +01:00
Mathieu
9d1ce7fadf fix(backups/importDeltaVm): restore the bios_strings (#5598)
Aligned with XAPI behavior for XVA exports/imports.
2021-02-25 17:26:43 +01:00
Julien Fontanet
fd560c351f fix(xo-server/runJob): register job as soon as job.start (#5620) 2021-02-25 17:00:07 +01:00
badrAZ
b45556062d fix(@xen-orchestra/backups): don't double JSON config (#5621)
Config returned by `Xo#exportConfig` is already a string, it must not be JSON encoded again.
2021-02-25 16:49:45 +01:00
badrAZ
5be45599ed fix(xo-server/metadata-backup): XO config not restored (#5619)
Introduced by 61c3057060

`log.taskId` cannot be compared with the `rootTaskId` because it's generated by the Task lib and the `rootTaskId` is generated by the `xo-server` logger.
2021-02-25 16:41:39 +01:00
Julien Fontanet
9b2533dbc9 chore(yarn.lock): update 2021-02-25 14:43:16 +01:00
Julien Fontanet
ec1a4b1974 fix(proxy/backup.run): bind getConnectedRecord
Introduced in 4eb9aa9cc
2021-02-25 11:19:20 +01:00
Julien Fontanet
bb9fde17c9 chore(xo-server/_backupVm): update todo list 2021-02-25 11:03:54 +01:00
Rajaa.BARHTAOUI
8cb524080c fix(xo-server#_migrateVmWithStorageMotion): don't migrate VM VDIs to default SR (#5577)
See xoa-support#3248
See xoa-support#3355
2021-02-25 10:35:14 +01:00
badrAZ
171ec54781 feat(xo-server#restoreMetadataBackup): use @xen-orchestra/backups lib (#5611) 2021-02-25 09:43:15 +01:00
Julien Fontanet
5d9503b78c feat(backups/Backup): getAdapter accepts ids instead of remotes
This should make it easier to interface with xo-server.
2021-02-24 17:33:26 +01:00
Julien Fontanet
f56cb69c2e chore(backups/Backup): remove unused property 2021-02-24 17:30:43 +01:00
Julien Fontanet
4eb9aa9ccb feat(backups/Backup): pass directly getConnectedRecord (#5614)
This should make it easier to interface with xo-server.
2021-02-24 17:25:46 +01:00
Pierre Donias
11801f306c feat: technical release (#5613) 2021-02-24 15:54:37 +01:00
badrAZ
95c2944f30 feat(xo-server#deleteMetadataBackup): use @xen-orchestra/backups lib (#5610) 2021-02-24 11:38:27 +01:00
badrAZ
5bd4c54ab6 feat(xo-server#_listXoMetadataBackups): use @xen-orchestra/backups lib (#5609) 2021-02-24 10:16:27 +01:00
Julien Fontanet
95d6d0a0fe chore(backups/formatVmBackup): ensure function is nammed 2021-02-24 10:08:59 +01:00
Julien Fontanet
7941be083a chore(backups): rename task.js → Task.js
To be in line with other modules in this lib.
2021-02-24 10:07:36 +01:00
badrAZ
e36efaec08 feat(xo-server#_listPoolMetadataBackups): use @xen-orchestra/backups lib (#5607) 2021-02-24 10:02:57 +01:00
Julien Fontanet
637afdb540 feat(@vates/toggle-scripts): 1.0.0 2021-02-24 08:55:38 +01:00
Julien Fontanet
dafdedef9a feat(toggle-scripts): supports npm < 7 2021-02-24 08:54:29 +01:00
Julien Fontanet
ce17ee2ae6 fix(toggle-scripts/package.json): fix files entry 2021-02-24 08:48:33 +01:00
Julien Fontanet
e74daa97d2 fix(toggle-scripts): fix usage 2021-02-23 21:54:31 +01:00
Julien Fontanet
44d64d1b80 fix(proxy): dont run systemd-service-installer in dev
See ronivay/XenOrchestraInstallerUpdater#62
2021-02-23 21:37:26 +01:00
Julien Fontanet
1a4731aa83 feat(@vates/toggle-scripts): 0.1.0 2021-02-23 21:32:58 +01:00
Julien Fontanet
a75e1c52b7 feat(toggle-scripts): CLI to enable/disable package.json scripts 2021-02-23 21:28:23 +01:00
Julien Fontanet
1b97cb263c feat(proxy/config): resourceDebouce → resourceCacheDelay
Similar to xo-server.
2021-02-23 19:54:20 +01:00
badrAZ
5c9a47b6b7 feat(xo-server#fetchBackupNgPartitionFiles): use @xen-orchestra/backups lib (#5606) 2021-02-23 17:48:11 +01:00
badrAZ
8a5fe86193 feat(xo-server#listBackupNgPartitionFiles): use @xen-orchestra/backups lib (#5605) 2021-02-23 17:40:02 +01:00
badrAZ
d9531e24a3 feat(xo-server/listBackupNgDiskPartitions): use @xen-orchestra/backups lib (#5599) 2021-02-23 17:34:55 +01:00
Julien Fontanet
624f328269 chore(xo-server/package.json): sort deps 2021-02-23 16:29:46 +01:00
badrAZ
a6f4e6771d fix(xo-server/package.json): missing dependency (#5603)
Introduced by a958fe86d7

Used in a506c21b80/%40xen-orchestra/backups/RemoteAdapter.js (L14)
2021-02-23 15:34:31 +01:00
Julien Fontanet
a506c21b80 feat(docs/installation): XO now requires Node >=14.5 2021-02-23 15:26:21 +01:00
Julien Fontanet
981193ed23 feat(docs/from the sources): XO now requires Node >=14.5 2021-02-23 15:12:42 +01:00
Julien Fontanet
85a6204db2 feat(@xen-orchestra/backups-cli): 0.4.0 2021-02-23 14:41:47 +01:00
Julien Fontanet
b82aba1181 chore(backups-cli): add usage to README 2021-02-23 14:40:15 +01:00
Julien Fontanet
0a6dea2c79 feat(backups-cli/clean-vms): better usage 2021-02-23 14:32:16 +01:00
Julien Fontanet
69b6d75927 feat(CHANGELOG.unreleased): release backups 2021-02-23 12:35:04 +01:00
Julien Fontanet
eff2d48cc5 feat(backups/RemoteAdapter#outputStream): make path first param
Similar to `fs/Abstract#outputStream()`.
2021-02-23 12:34:26 +01:00
Julien Fontanet
ca5af2505c fix(fs/outputStream): always make path first param
Introduced by 7a1377119
2021-02-23 11:47:19 +01:00
Julien Fontanet
a958fe86d7 feat(proxy): version 1 (#4495)
Co-authored-by: badrAZ <azizbibadr@gmail.com>
Co-authored-by: Mathieu <70369997+MathieuRA@users.noreply.github.com>
2021-02-23 08:58:10 +01:00
Julien Fontanet
3ed488e10f chore(compose): regenerate README 2021-02-22 15:09:23 +01:00
Julien Fontanet
dcc11f16b1 feat(@vates/compose): 2.0.0 2021-02-22 14:42:45 +01:00
Julien Fontanet
209706b70d feat(compose): forwards this to all functions 2021-02-22 14:42:18 +01:00
badrAZ
1bc80eb485 fix(package.json#moduleNameMapper): fix tests for non-transpiled @vates/ packages (#5593) 2021-02-19 16:34:14 +01:00
badrAZ
9ab9e3fe46 feat(vates/disposable): utilities for disposables (#5590) 2021-02-19 16:22:41 +01:00
Mathieu
d654c096ed feat(xo-web/menu): xoa.check sync between menu and support page (#5534) 2021-02-19 10:21:52 +01:00
Olivier Lambert
f5d5884988 feat(docs/advanced): add a section about terraform provider (#5589) 2021-02-19 09:22:05 +01:00
Julien Fontanet
2c016204bf feat(@vates/compose): 1.2.0 2021-02-18 17:03:17 +01:00
Julien Fontanet
04fd625bde feat(compose): this and args passed to first function 2021-02-18 17:03:17 +01:00
Mathieu
8455d4a49f fix(xo-web/select): wrapping text if label is too long (#5580)
See https://xcp-ng.org/forum/topic/4072/create-vm-network-names-too-large
2021-02-18 16:51:52 +01:00
Yannick Achy
a3960bb7c5 feat(docs/full backup): document offline backup (#5582)
Co-authored-by: yannick Achy <yannick.achy@vates.fr>
2021-02-18 15:15:21 +01:00
Mathieu
769262d60e fix(changelog): add # to PR reference (#5585) 2021-02-18 14:49:14 +01:00
Julien Fontanet
942567586f feat(@vates/compose): 1.1.1 2021-02-18 11:29:10 +01:00
Julien Fontanet
ba6baaec0a fix(compose): update README 2021-02-18 11:27:24 +01:00
Julien Fontanet
a8ac6fc738 fix(compose): require Node 7.6 2021-02-18 11:26:00 +01:00
Julien Fontanet
b027d3b1d6 feat(@vates/compose): 1.1.0 2021-02-18 11:25:33 +01:00
Julien Fontanet
71f9d268c9 feat(compose): async functions support 2021-02-18 11:25:14 +01:00
Julien Fontanet
2b91d4af99 feat(compose): right to left support 2021-02-18 11:25:14 +01:00
Julien Fontanet
0ec0e286ba feat(compose/README): document functions in an array 2021-02-18 11:25:14 +01:00
Rajaa.BARHTAOUI
258ae64568 fix(xo-web/home/vm): bulk intra pool migration: fix map VDI -> SR (#5578)
See xoa-support#3355
See xoa-support#3248
2021-02-18 11:04:47 +01:00
Rajaa.BARHTAOUI
90cafa126f feat(xo-web/migrateVm): show error when no main SR selected (#5568)
See xoa-support#3355
2021-02-18 10:57:00 +01:00
Julien Fontanet
43d31e285c feat(@vates/compose): 1.0.0 2021-02-18 10:40:27 +01:00
Julien Fontanet
57945e6751 feat(compose): new lib to compose functions 2021-02-18 10:39:57 +01:00
Rajaa.BARHTAOUI
fce56cbf4c fix(xo-server/utils/parseXml): keep all values as strings (#5581)
Fixes #5497

All values must be kept as strings because that's what the previous implementation used to do.

Introduced by 525369e0ce

See https://github.com/vatesfr/xen-orchestra/issues/5497#issuecomment-780314187

```
{
  'iscsi-target': {
    LUN: {
      vendor: 'TrueNAS',
      serial: '9eaa394581f3003',
      LUNid: 55,
      size: 10995116277760,
      SCSIid: '36589cfc000000581d40d6d5140d9b9da'
    }
  }
}
```
2021-02-18 09:46:17 +01:00
Julien Fontanet
7a13771198 fix(fs/outputStream): make path the first param
From #5373
2021-02-17 17:11:55 +01:00
230 changed files with 8601 additions and 3462 deletions

2
.gitignore vendored
View File

@@ -9,6 +9,8 @@
/packages/*/dist/
/packages/*/node_modules/
/@xen-orchestra/proxy/src/app/mixins/index.js
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/

81
@vates/compose/README.md Normal file
View File

@@ -0,0 +1,81 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/compose
[![Package Version](https://badgen.net/npm/v/@vates/compose)](https://npmjs.org/package/@vates/compose) ![License](https://badgen.net/npm/license/@vates/compose) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/compose)](https://bundlephobia.com/result?p=@vates/compose) [![Node compatibility](https://badgen.net/npm/node/@vates/compose)](https://npmjs.org/package/@vates/compose)
> Compose functions from left to right
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/compose):
```
> npm install --save @vates/compose
```
## Usage
```js
import { compose } from '@vates/compose'
const add2 = x => x + 2
const mul3 = x => x * 3
// const f = x => mul3(add2(x))
const f = compose(add2, mul3)
console.log(f(5))
// → 21
```
> The call context (`this`) of the composed function is forwarded to all functions.
The first function is called with all arguments of the composed function:
```js
const add = (x, y) => x + y
const mul3 = x => x * 3
// const f = (x, y) => mul3(add(x, y))
const f = compose(add, mul3)
console.log(f(4, 5))
// → 27
```
Functions may also be passed in an array:
```js
const f = compose([add2, mul3])
```
Options can be passed as first parameters:
```js
const f = compose(
{
// compose async functions
async: true,
// compose from right to left
right: true,
},
[add2, mul3]
)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

48
@vates/compose/USAGE.md Normal file
View File

@@ -0,0 +1,48 @@
```js
import { compose } from '@vates/compose'
const add2 = x => x + 2
const mul3 = x => x * 3
// const f = x => mul3(add2(x))
const f = compose(add2, mul3)
console.log(f(5))
// → 21
```
> The call context (`this`) of the composed function is forwarded to all functions.
The first function is called with all arguments of the composed function:
```js
const add = (x, y) => x + y
const mul3 = x => x * 3
// const f = (x, y) => mul3(add(x, y))
const f = compose(add, mul3)
console.log(f(4, 5))
// → 27
```
Functions may also be passed in an array:
```js
const f = compose([add2, mul3])
```
Options can be passed as first parameters:
```js
const f = compose(
{
// compose async functions
async: true,
// compose from right to left
right: true,
},
[add2, mul3]
)
```

46
@vates/compose/index.js Normal file
View File

@@ -0,0 +1,46 @@
'use strict'
const defaultOpts = { async: false, right: false }
exports.compose = function compose(opts, fns) {
if (Array.isArray(opts)) {
fns = opts
opts = defaultOpts
} else if (typeof opts === 'object') {
opts = Object.assign({}, defaultOpts, opts)
if (!Array.isArray(fns)) {
fns = Array.prototype.slice.call(arguments, 1)
}
} else {
fns = Array.from(arguments)
opts = defaultOpts
}
const n = fns.length
if (n === 0) {
throw new TypeError('at least one function must be passed')
}
if (n === 1) {
return fns[0]
}
if (opts.right) {
fns.reverse()
}
return opts.async
? async function () {
let value = await fns[0].apply(this, arguments)
for (let i = 1; i < n; ++i) {
value = await fns[i].call(this, value)
}
return value
}
: function () {
let value = fns[0].apply(this, arguments)
for (let i = 1; i < n; ++i) {
value = fns[i].call(this, value)
}
return value
}
}

View File

@@ -0,0 +1,66 @@
/* eslint-env jest */
const { compose } = require('./')
const add2 = x => x + 2
const mul3 = x => x * 3
describe('compose()', () => {
it('throws when no functions is passed', () => {
expect(() => compose()).toThrow(TypeError)
expect(() => compose([])).toThrow(TypeError)
})
it('applies from left to right', () => {
expect(compose(add2, mul3)(5)).toBe(21)
})
it('accepts functions in an array', () => {
expect(compose([add2, mul3])(5)).toBe(21)
})
it('can apply from right to left', () => {
expect(compose({ right: true }, add2, mul3)(5)).toBe(17)
})
it('accepts options with functions in an array', () => {
expect(compose({ right: true }, [add2, mul3])(5)).toBe(17)
})
it('can compose async functions', async () => {
expect(
await compose(
{ async: true },
async x => x + 2,
async x => x * 3
)(5)
).toBe(21)
})
it('forwards all args to first function', () => {
expect.assertions(1)
const expectedArgs = [Math.random(), Math.random()]
compose(
(...args) => {
expect(args).toEqual(expectedArgs)
},
// add a second function to avoid the one function special case
Function.prototype
)(...expectedArgs)
})
it('forwards context to all functions', () => {
expect.assertions(2)
const expectedThis = {}
compose(
function () {
expect(this).toBe(expectedThis)
},
function () {
expect(this).toBe(expectedThis)
}
).call(expectedThis)
})
})

View File

@@ -0,0 +1,24 @@
{
"private": false,
"name": "@vates/compose",
"description": "Compose functions from left to right",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/compose",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/compose",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "2.0.0",
"engines": {
"node": ">=7.6"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -0,0 +1,89 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/disposable
[![Package Version](https://badgen.net/npm/v/@vates/disposable)](https://npmjs.org/package/@vates/disposable) ![License](https://badgen.net/npm/license/@vates/disposable) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/disposable)](https://bundlephobia.com/result?p=@vates/disposable) [![Node compatibility](https://badgen.net/npm/node/@vates/disposable)](https://npmjs.org/package/@vates/disposable)
> Utilities for disposables
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/disposable):
```
> npm install --save @vates/disposable
```
## Usage
This library contains utilities for disposables as defined by the [`promise-toolbox` library](https://github.com/JsCommunity/promise-toolbox#resource-management).
### `deduped(fn, keyFn)`
Creates a new function that wraps `fn` and instead of creating a new disposables at each call, returns copies of the same one when `keyFn` returns the same keys.
Those copies contains the same value and can be disposed independently, the source disposable will only be disposed when all copies are disposed.
`keyFn` is called with the same context and arguments as the wrapping function and must returns an array of keys which will be used to identify which disposables should be grouped together.
```js
import { deduped } from '@vates/disposable/deduped'
// the connection with the passed host will be established once at the first call, then, it will be shared with the next calls
const getConnection = deduped(async function (host)) {
const connection = new Connection(host)
return new Disposabe(connection, () => connection.close())
}, host => [host])
```
### `debounceResource(disposable, delay)`
Creates a new disposable with the same value and with a delayed disposer.
On calling this disposer, the source disposable will be disposed when the `delay` is passed.
```js
import { createDebounceResource } from '@vates/disposable/debounceResource'
const debounceResource = createDebounceResource()
// it will wait for 10 seconds before calling the disposer
using(debounceResource(getConnection(host), 10e3), connection => {})
```
### `debounceResource.flushAll()`
Disposes all delayed disposers and cancels the delaying of the disposables that are in usage.
```js
import { createDebounceResource } from '@vates/disposable/debounceResource'
const debounceResource = createDebounceResource()
const res1 = await debounceResource(res, 10e3)
const res2 = await debounceResource(res, 10e3)
const res3 = await debounceResource(res, 10e3)
rest1.dispose()
rest2.dispose()
// res3 is in usage
debounceResource.flushAll()
// res1 and res2 are immediately disposed
// res3 will be disposed immediately when its disposer will be called
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,56 @@
This library contains utilities for disposables as defined by the [`promise-toolbox` library](https://github.com/JsCommunity/promise-toolbox#resource-management).
### `deduped(fn, keyFn)`
Creates a new function that wraps `fn` and instead of creating a new disposables at each call, returns copies of the same one when `keyFn` returns the same keys.
Those copies contains the same value and can be disposed independently, the source disposable will only be disposed when all copies are disposed.
`keyFn` is called with the same context and arguments as the wrapping function and must returns an array of keys which will be used to identify which disposables should be grouped together.
```js
import { deduped } from '@vates/disposable/deduped'
// the connection with the passed host will be established once at the first call, then, it will be shared with the next calls
const getConnection = deduped(async function (host)) {
const connection = new Connection(host)
return new Disposabe(connection, () => connection.close())
}, host => [host])
```
### `debounceResource(disposable, delay)`
Creates a new disposable with the same value and with a delayed disposer.
On calling this disposer, the source disposable will be disposed when the `delay` is passed.
```js
import { createDebounceResource } from '@vates/disposable/debounceResource'
const debounceResource = createDebounceResource()
// it will wait for 10 seconds before calling the disposer
using(debounceResource(getConnection(host), 10e3), connection => {})
```
### `debounceResource.flushAll()`
Disposes all delayed disposers and cancels the delaying of the disposables that are in usage.
```js
import { createDebounceResource } from '@vates/disposable/debounceResource'
const debounceResource = createDebounceResource()
const res1 = await debounceResource(res, 10e3)
const res2 = await debounceResource(res, 10e3)
const res3 = await debounceResource(res, 10e3)
rest1.dispose()
rest2.dispose()
// res3 is in usage
debounceResource.flushAll()
// res1 and res2 are immediately disposed
// res3 will be disposed immediately when its disposer will be called
```

View File

@@ -0,0 +1,56 @@
const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { warn } = createLogger('vates:disposable:debounceResource')
exports.createDebounceResource = () => {
const flushers = new Set()
async function debounceResource(pDisposable, delay = debounceResource.defaultDelay) {
if (delay === 0) {
return pDisposable
}
const disposable = await pDisposable
let timeoutId
const disposeWrapper = async () => {
if (timeoutId !== undefined) {
clearTimeout(timeoutId)
timeoutId = undefined
flushers.delete(flusher)
try {
await disposable.dispose()
} catch (error) {
warn(error)
}
}
}
const flusher = () => {
const shouldDisposeNow = timeoutId !== undefined
if (shouldDisposeNow) {
return disposeWrapper()
} else {
// will dispose ASAP
delay = 0
}
}
flushers.add(flusher)
return {
dispose() {
timeoutId = setTimeout(disposeWrapper, delay)
},
value: disposable.value,
}
}
debounceResource.flushAll = () => {
// iterate on a sync way in order to not remove a flusher added on processing flushers
const promise = asyncMap(flushers, flush => flush())
flushers.clear()
return promise
}
return debounceResource
}

View File

@@ -0,0 +1,29 @@
/* eslint-env jest */
const { createDebounceResource } = require('./debounceResource')
jest.useFakeTimers()
describe('debounceResource()', () => {
it('calls the resource disposer after 10 seconds', async () => {
const debounceResource = createDebounceResource()
const delay = 10e3
const dispose = jest.fn()
const resource = await debounceResource(
Promise.resolve({
value: '',
dispose,
}),
delay
)
resource.dispose()
expect(dispose).not.toBeCalled()
jest.advanceTimersByTime(delay)
expect(dispose).toBeCalled()
})
})

View File

@@ -0,0 +1,52 @@
const ensureArray = require('ensure-array')
const { MultiKeyMap } = require('@vates/multi-key-map')
function State(factory) {
this.factory = factory
this.users = 0
}
const call = fn => fn()
exports.deduped = (factory, keyFn = (...args) => args) =>
(function () {
const states = new MultiKeyMap()
return function () {
const keys = ensureArray(keyFn.apply(this, arguments))
let state = states.get(keys)
if (state === undefined) {
const result = factory.apply(this, arguments)
const createFactory = ({ value, dispose }) => {
const wrapper = {
dispose() {
if (--state.users === 0) {
states.delete(keys)
return dispose()
}
},
value,
}
return () => {
return wrapper
}
}
if (typeof result.then !== 'function') {
state = new State(createFactory(result))
} else {
result.catch(() => {
states.delete(keys)
})
const pFactory = result.then(createFactory)
state = new State(() => pFactory.then(call))
}
states.set(keys, state)
}
++state.users
return state.factory()
}
})()

View File

@@ -0,0 +1,76 @@
/* eslint-env jest */
const { deduped } = require('./deduped')
describe('deduped()', () => {
it('calls the resource function only once', async () => {
const value = {}
const getResource = jest.fn(async () => ({
value,
dispose: Function.prototype,
}))
const dedupedGetResource = deduped(getResource)
const { value: v1 } = await dedupedGetResource()
const { value: v2 } = await dedupedGetResource()
expect(getResource).toHaveBeenCalledTimes(1)
expect(v1).toBe(value)
expect(v2).toBe(value)
})
it('only disposes the source disposable when its all copies dispose', async () => {
const dispose = jest.fn()
const getResource = async () => ({
value: '',
dispose,
})
const dedupedGetResource = deduped(getResource)
const { dispose: d1 } = await dedupedGetResource()
const { dispose: d2 } = await dedupedGetResource()
d1()
expect(dispose).not.toHaveBeenCalled()
d2()
expect(dispose).toHaveBeenCalledTimes(1)
})
it('works with sync factory', () => {
const value = {}
const dispose = jest.fn()
const dedupedGetResource = deduped(() => ({ value, dispose }))
const d1 = dedupedGetResource()
expect(d1.value).toBe(value)
const d2 = dedupedGetResource()
expect(d2.value).toBe(value)
d1.dispose()
expect(dispose).not.toHaveBeenCalled()
d2.dispose()
expect(dispose).toHaveBeenCalledTimes(1)
})
it('no race condition on dispose before async acquisition', async () => {
const dispose = jest.fn()
const dedupedGetResource = deduped(async () => ({ value: 42, dispose }))
const d1 = await dedupedGetResource()
dedupedGetResource()
d1.dispose()
expect(dispose).not.toHaveBeenCalled()
})
})

View File

@@ -0,0 +1,29 @@
{
"private": false,
"name": "@vates/disposable",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/disposable",
"description": "Utilities for disposables",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/disposable",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/log": "^0.2.0",
"ensure-array": "^1.0.0"
}
}

View File

@@ -0,0 +1,59 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/toggle-scripts
[![Package Version](https://badgen.net/npm/v/@vates/toggle-scripts)](https://npmjs.org/package/@vates/toggle-scripts) ![License](https://badgen.net/npm/license/@vates/toggle-scripts) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/toggle-scripts)](https://bundlephobia.com/result?p=@vates/toggle-scripts) [![Node compatibility](https://badgen.net/npm/node/@vates/toggle-scripts)](https://npmjs.org/package/@vates/toggle-scripts)
> Easily enable/disable scripts in package.json
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/toggle-scripts):
```
> npm install --save @vates/toggle-scripts
```
## Usage
```
Usage: toggle-scripts options...
Easily enable/disable scripts in package.json
Options
+<script> Enable the script <script>, ie remove the prefix `_`
-<script> Disable the script <script>, ie prefix it with `_`
Examples
toggle-scripts +postinstall +preuninstall
toggle-scripts -postinstall -preuninstall
```
For example, if you want `postinstall` hook only in dev:
```json
// package.json
{
"scripts": {
"postinstall": "<some dev only command>",
"prepublishOnly": "toggle-scripts -postinstall",
"postpublish": "toggle-scripts +postinstall"
}
}
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,26 @@
```
Usage: toggle-scripts options...
Easily enable/disable scripts in package.json
Options
+<script> Enable the script <script>, ie remove the prefix `_`
-<script> Disable the script <script>, ie prefix it with `_`
Examples
toggle-scripts +postinstall +preuninstall
toggle-scripts -postinstall -preuninstall
```
For example, if you want `postinstall` hook only in dev:
```json
// package.json
{
"scripts": {
"postinstall": "<some dev only command>",
"prepublishOnly": "toggle-scripts -postinstall",
"postpublish": "toggle-scripts +postinstall"
}
}
```

60
@vates/toggle-scripts/index.js Executable file
View File

@@ -0,0 +1,60 @@
#!/usr/bin/env node
const fs = require('fs')
const mapKeys = (object, iteratee) => {
const result = {}
for (const key of Object.keys(object)) {
result[iteratee(key, object)] = object[key]
}
return result
}
const args = process.argv.slice(2)
if (args.length === 0) {
const { description, name, version } = require('./package.json')
const bin = 'toggle-scripts'
process.stdout.write(`Usage: ${bin} options...
${description}
Options
+<script> Enable the script <script>, ie remove the prefix \`_\`
-<script> Disable the script <script>, ie prefix it with \`_\`
Examples
${bin} +postinstall +preuninstall
${bin} -postinstall -preuninstall
${name} v${version}
`)
process.exit()
}
const plan = { __proto__: null }
for (const arg of args) {
const action = arg[0]
const script = arg.slice(1)
if (action === '+') {
plan['_' + script] = script
} else if (action === '-') {
plan[script] = '_' + script
} else {
throw new Error('invalid param: ' + arg)
}
}
const pkgPath = process.env.npm_package_json || './package.json'
const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'))
pkg.scripts = mapKeys(pkg.scripts, (name, scripts) => {
const newName = plan[name]
if (newName === undefined) {
return name
}
if (newName in scripts) {
throw new Error('script already defined: ' + name)
}
return newName
})
fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + '\n')

View File

@@ -0,0 +1,41 @@
{
"private": false,
"name": "@vates/toggle-scripts",
"description": "Easily enable/disable scripts in package.json",
"keywords": [
"dev",
"disable",
"enable",
"lifecycle",
"npm",
"package.json",
"pinst",
"postinstall",
"script",
"scripts",
"toggle"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/toggle-scripts",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/toggle-scripts",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=6"
},
"files": [
"index.js"
],
"bin": "./index.js",
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -4,7 +4,7 @@
[![Package Version](https://badgen.net/npm/v/@xen-orchestra/async-map)](https://npmjs.org/package/@xen-orchestra/async-map) ![License](https://badgen.net/npm/license/@xen-orchestra/async-map) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@xen-orchestra/async-map)](https://bundlephobia.com/result?p=@xen-orchestra/async-map) [![Node compatibility](https://badgen.net/npm/node/@xen-orchestra/async-map)](https://npmjs.org/package/@xen-orchestra/async-map)
> Similar to Promise.all + lodash.map but wait for all promises to be settled
> Promise.all + map for all iterables
## Install
@@ -16,10 +16,61 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/async
## Usage
```js
import asyncMap from '@xen-orchestra/async-map'
### `asyncMap(iterable, iteratee, thisArg = iterable)`
const array = await asyncMap(collection, iteratee)
Similar to `Promise.all + Array#map` for all iterables: calls `iteratee` for each item in `iterable`, and returns a promise of an array containing the awaited result of each calls to `iteratee`.
It rejects as soon as te first call to `iteratee` rejects.
```js
import { asyncMap } from '@xen-orchestra/async-map'
const array = await asyncMap(iterable, iteratee, thisArg)
```
It can be used with any iterables (`Array`, `Map`, etc.):
```js
const map = new Map()
map.set('foo', 42)
map.set('bar', 3.14)
const array = await asyncMap(map, async function ([key, value]) {
// TODO: do async computation
//
// the map can be accessed via `this`
})
```
#### Use with plain objects
Plain objects are not iterable, but you can use `Object.keys`, `Object.values` or `Object.entries` to help:
```js
const object = {
foo: 42,
bar: 3.14,
}
const array = await asyncMap(
Object.entries(object),
async function ([key, value]) {
// TODO: do async computation
//
// the object can be accessed via `this` because it's been passed as third arg
},
object
)
```
### `asyncMapSettled(iterable, iteratee, thisArg = iterable)`
Similar to `asyncMap` but waits for all promises to settle before rejecting.
```js
import { asyncMapSettled } from '@xen-orchestra/async-map'
const array = await asyncMapSettled(iterable, iteratee, thisArg)
```
## Contributions

View File

@@ -1,5 +1,56 @@
```js
import asyncMap from '@xen-orchestra/async-map'
### `asyncMap(iterable, iteratee, thisArg = iterable)`
const array = await asyncMap(collection, iteratee)
Similar to `Promise.all + Array#map` for all iterables: calls `iteratee` for each item in `iterable`, and returns a promise of an array containing the awaited result of each calls to `iteratee`.
It rejects as soon as te first call to `iteratee` rejects.
```js
import { asyncMap } from '@xen-orchestra/async-map'
const array = await asyncMap(iterable, iteratee, thisArg)
```
It can be used with any iterables (`Array`, `Map`, etc.):
```js
const map = new Map()
map.set('foo', 42)
map.set('bar', 3.14)
const array = await asyncMap(map, async function ([key, value]) {
// TODO: do async computation
//
// the map can be accessed via `this`
})
```
#### Use with plain objects
Plain objects are not iterable, but you can use `Object.keys`, `Object.values` or `Object.entries` to help:
```js
const object = {
foo: 42,
bar: 3.14,
}
const array = await asyncMap(
Object.entries(object),
async function ([key, value]) {
// TODO: do async computation
//
// the object can be accessed via `this` because it's been passed as third arg
},
object
)
```
### `asyncMapSettled(iterable, iteratee, thisArg = iterable)`
Similar to `asyncMap` but waits for all promises to settle before rejecting.
```js
import { asyncMapSettled } from '@xen-orchestra/async-map'
const array = await asyncMapSettled(iterable, iteratee, thisArg)
```

View File

@@ -0,0 +1,71 @@
const wrapCall = (fn, arg, thisArg) => {
try {
return Promise.resolve(fn.call(thisArg, arg))
} catch (error) {
return Promise.reject(error)
}
}
/**
* Similar to Promise.all + Array#map but supports all iterables and does not trigger ESLint array-callback-return
*
* WARNING: Does not handle plain objects
*
* @template Item,This
* @param {Iterable<Item>} arrayLike
* @param {(this: This, item: Item) => (Item | PromiseLike<Item>)} mapFn
* @param {This} [thisArg]
* @returns {Promise<Item[]>}
*/
exports.asyncMap = function asyncMap(iterable, mapFn, thisArg = iterable) {
return Promise.all(Array.from(iterable, mapFn, thisArg))
}
/**
* Like `asyncMap` but wait for all promises to settle before rejecting
*
* @template Item,This
* @param {Iterable<Item>} iterable
* @param {(this: This, item: Item) => (Item | PromiseLike<Item>)} mapFn
* @param {This} [thisArg]
* @returns {Promise<Item[]>}
*/
exports.asyncMapSettled = function asyncMapSettled(iterable, mapFn, thisArg = iterable) {
return new Promise((resolve, reject) => {
const onError = e => {
if (result !== undefined) {
error = e
result = undefined
}
if (--n === 0) {
reject(error)
}
}
const onValue = (i, value) => {
const hasError = result === undefined
if (!hasError) {
result[i] = value
}
if (--n === 0) {
if (hasError) {
reject(error)
} else {
resolve(result)
}
}
}
let n = 0
for (const item of iterable) {
const i = n++
wrapCall(mapFn, item, thisArg).then(value => onValue(i, value), onError)
}
if (n === 0) {
return resolve([])
}
let error
let result = new Array(n)
})
}

View File

@@ -0,0 +1,71 @@
/* eslint-env jest */
const { asyncMapSettled } = require('./')
const noop = Function.prototype
describe('asyncMapSettled', () => {
it('works', async () => {
const values = [Math.random(), Math.random()]
const spy = jest.fn(async v => v * 2)
const iterable = new Set(values)
// returns an array containing the result of each calls
expect(await asyncMapSettled(iterable, spy)).toEqual(values.map(value => value * 2))
for (let i = 0, n = values.length; i < n; ++i) {
// each call receive the current item as sole argument
expect(spy.mock.calls[i]).toEqual([values[i]])
// each call as this bind to the iterable
expect(spy.mock.instances[i]).toBe(iterable)
}
})
it('can use a specified thisArg', () => {
const thisArg = {}
const spy = jest.fn()
asyncMapSettled(['foo'], spy, thisArg)
expect(spy.mock.instances[0]).toBe(thisArg)
})
it('rejects only when all calls as resolved', async () => {
const defers = []
const promise = asyncMapSettled([1, 2], () => {
let resolve, reject
// eslint-disable-next-line promise/param-names
const promise = new Promise((_resolve, _reject) => {
resolve = _resolve
reject = _reject
})
defers.push({ promise, resolve, reject })
return promise
})
let hasSettled = false
promise.catch(noop).then(() => {
hasSettled = true
})
const error = new Error()
defers[0].reject(error)
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
expect(hasSettled).toBe(false)
defers[1].resolve()
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
expect(hasSettled).toBe(true)
await expect(promise).rejects.toBe(error)
})
it('issues when latest promise rejects', async () => {
const error = new Error()
await expect(asyncMapSettled([1], () => Promise.reject(error))).rejects.toBe(error)
})
})

View File

@@ -9,14 +9,18 @@
// (V1, K) => MaybePromise<V2>
// ): Promise<V2[]>
import map from 'lodash/map'
const map = require('lodash/map')
// Similar to map() + Promise.all() but wait for all promises to
// settle before rejecting (with the first error)
const asyncMap = (collection, iteratee) => {
/**
* Similar to map() + Promise.all() but wait for all promises to settle before
* rejecting (with the first error)
*
* @deprecated Don't support iterables, please use new implementations
*/
module.exports = function asyncMapLegacy(collection, iteratee) {
let then
if (collection != null && typeof (then = collection.then) === 'function') {
return then.call(collection, collection => asyncMap(collection, iteratee))
return then.call(collection, collection => asyncMapLegacy(collection, iteratee))
}
let errorContainer
@@ -39,5 +43,3 @@ const asyncMap = (collection, iteratee) => {
return values
})
}
export { asyncMap as default }

View File

@@ -1,10 +1,17 @@
{
"private": false,
"name": "@xen-orchestra/async-map",
"version": "0.0.0",
"version": "0.1.2",
"license": "ISC",
"description": "Similar to Promise.all + lodash.map but wait for all promises to be settled",
"keywords": [],
"description": "Promise.all + map for all iterables",
"keywords": [
"array",
"async",
"iterable",
"map",
"settled",
"typescript"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/async-map",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -17,13 +24,9 @@
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"browserslist": [
">2%"
"index.js",
"legacy.js"
],
"engines": {
"node": ">=6"
@@ -31,22 +34,7 @@
"dependencies": {
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -32,7 +32,7 @@
"dependencies": {
"@xen-orchestra/log": "^0.2.0",
"core-js": "^3.6.4",
"golike-defer": "^0.4.1",
"golike-defer": "^0.5.1",
"lodash": "^4.17.15",
"object-hash": "^2.0.1"
},

View File

@@ -12,6 +12,26 @@ Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/backu
> npm install --global @xen-orchestra/backups-cli
```
## Usage
```
> xo-backups --help
Usage:
xo-backups clean-vms [--merge] [--remove] xo-vm-backups/*
Detects and repair issues with VM backups.
Options:
-m, --merge Merge (or continue merging) VHD files that are unused
-r, --remove Remove unused, incomplete, orphan, or corrupted files
xo-backups create-symlink-index xo-vm-backups <field path>
xo-backups info xo-vm-backups/*
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -0,0 +1,17 @@
```
> xo-backups --help
Usage:
xo-backups clean-vms [--merge] [--remove] xo-vm-backups/*
Detects and repair issues with VM backups.
Options:
-m, --merge Merge (or continue merging) VHD files that are unused
-r, --remove Remove unused, incomplete, orphan, or corrupted files
xo-backups create-symlink-index xo-vm-backups <field path>
xo-backups info xo-vm-backups/*
```

View File

@@ -1,5 +0,0 @@
const curryRight = require('lodash/curryRight')
module.exports = curryRight((iterable, fn) =>
Promise.all(Array.isArray(iterable) ? iterable.map(fn) : Array.from(iterable, fn))
)

View File

@@ -6,6 +6,7 @@ let merge, remove
// -----------------------------------------------------------------------------
const assert = require('assert')
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
const flatten = require('lodash/flatten')
const getopts = require('getopts')
const limitConcurrency = require('limit-concurrency-decorator').default
@@ -16,7 +17,6 @@ const { dirname, resolve } = require('path')
const { DISK_TYPE_DIFFERENCING } = require('vhd-lib/dist/_constants')
const { isValidXva } = require('@xen-orchestra/backups/isValidXva')
const asyncMap = require('../_asyncMap')
const fs = require('../_fs')
const handler = require('@xen-orchestra/fs').getHandler({ url: 'file://' })

View File

@@ -1,8 +1,8 @@
const filenamify = require('filenamify')
const get = require('lodash/get')
const { asyncMap } = require('@xen-orchestra/async-map')
const { dirname, join, relative } = require('path')
const asyncMap = require('../_asyncMap')
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {

View File

@@ -1,8 +1,8 @@
const groupBy = require('lodash/groupBy')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createHash } = require('crypto')
const { dirname, resolve } = require('path')
const asyncMap = require('../_asyncMap')
const { readdir2, readFile, getSize } = require('../_fs')
const sha512 = str => createHash('sha512').update(str).digest('hex')

View File

@@ -5,7 +5,14 @@ require('./_composeCommands')({
get main() {
return require('./commands/clean-vms')
},
usage: '[--force] [--merge] xo-vm-backups/*',
usage: `[--merge] [--remove] xo-vm-backups/*
Detects and repair issues with VM backups.
Options:
-m, --merge Merge (or continue merging) VHD files that are unused
-r, --remove Remove unused, incomplete, orphan, or corrupted files
`,
},
'create-symlink-index': {
get main() {

View File

@@ -6,13 +6,14 @@
"preferGlobal": true,
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/backups": "^0.1.1",
"@xen-orchestra/fs": "^0.12.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.7.0",
"@xen-orchestra/fs": "^0.13.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.15",
"promise-toolbox": "^0.16.0",
"promise-toolbox": "^0.17.0",
"proper-lockfile": "^4.1.1",
"vhd-lib": "^1.0.0"
},
@@ -33,7 +34,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.3.0",
"version": "0.4.0",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -0,0 +1,264 @@
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const Disposable = require('promise-toolbox/Disposable')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const limitConcurrency = require('limit-concurrency-decorator').default
const using = require('promise-toolbox/using')
const { compileTemplate } = require('@xen-orchestra/template')
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern')
const { PoolMetadataBackup } = require('./_PoolMetadataBackup')
const { Task } = require('./Task')
const { VmBackup } = require('./_VmBackup')
const { XoMetadataBackup } = require('./_XoMetadataBackup')
const noop = Function.prototype
const getAdaptersByRemote = adapters => {
const adaptersByRemote = {}
adapters.forEach(({ adapter, remoteId }) => {
adaptersByRemote[remoteId] = adapter
})
return adaptersByRemote
}
const runTask = (...args) => Task.run(...args).catch(noop) // errors are handled by logs
exports.Backup = class Backup {
constructor({ config, getAdapter, getConnectedRecord, job, schedule }) {
this._config = config
this._getRecord = getConnectedRecord
this._job = job
this._schedule = schedule
this._getAdapter = Disposable.factory(function* (remoteId) {
return {
adapter: yield getAdapter(remoteId),
remoteId,
}
})
this._getSnapshotNameLabel = compileTemplate(config.snapshotNameLabelTpl, {
'{job.name}': job.name,
'{vm.name_label}': vm => vm.name_label,
})
}
run() {
const type = this._job.type
if (type === 'backup') {
return this._runVmBackup()
} else if (type === 'metadataBackup') {
return this._runMetadataBackup()
} else {
throw new Error(`No runner for the backup type ${type}`)
}
}
async _runMetadataBackup() {
const schedule = this._schedule
const job = this._job
const remoteIds = extractIdsFromSimplePattern(job.remotes)
if (remoteIds.length === 0) {
throw new Error('metadata backup job cannot run without remotes')
}
const config = this._config
const settings = {
...config.defaultSettings,
...config.metadata.defaultSettings,
...job.settings[''],
...job.settings[schedule.id],
}
const poolIds = extractIdsFromSimplePattern(job.pools)
const isEmptyPools = poolIds.length === 0
const isXoMetadata = job.xoMetadata !== undefined
if (!isXoMetadata && isEmptyPools) {
throw new Error('no metadata mode found')
}
const { retentionPoolMetadata, retentionXoMetadata } = settings
if (
(retentionPoolMetadata === 0 && retentionXoMetadata === 0) ||
(!isXoMetadata && retentionPoolMetadata === 0) ||
(isEmptyPools && retentionXoMetadata === 0)
) {
throw new Error('no retentions corresponding to the metadata modes found')
}
await using(
Disposable.all(
poolIds.map(id =>
this._getRecord('pool', id).catch(error => {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
runTask(
{
name: 'get pool record',
data: { type: 'pool', id },
},
() => Promise.reject(error)
)
})
)
),
Disposable.all(
remoteIds.map(id =>
this._getAdapter(id).catch(error => {
// See https://github.com/vatesfr/xen-orchestra/commit/6aa6cfba8ec939c0288f0fa740f6dfad98c43cbb
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id },
},
() => Promise.reject(error)
)
})
)
),
async (pools, remoteAdapters) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
if (remoteAdapters.length === 0) {
return
}
remoteAdapters = getAdaptersByRemote(remoteAdapters)
// remove pools that failed (already handled)
pools = pools.filter(_ => _ !== undefined)
const promises = []
if (pools.length !== 0 && settings.retentionPoolMetadata !== 0) {
promises.push(
asyncMap(pools, async pool =>
runTask(
{
name: `Starting metadata backup for the pool (${pool.$id}). (${job.id})`,
data: {
id: pool.$id,
pool,
poolMaster: await ignoreErrors.call(pool.$xapi.getRecord('host', pool.master)),
type: 'pool',
},
},
() =>
new PoolMetadataBackup({
config,
job,
pool,
remoteAdapters,
schedule,
settings,
}).run()
)
)
)
}
if (job.xoMetadata !== undefined && settings.retentionXoMetadata !== 0) {
promises.push(
runTask(
{
name: `Starting XO metadata backup. (${job.id})`,
data: {
type: 'xo',
},
},
() =>
new XoMetadataBackup({
config,
job,
remoteAdapters,
schedule,
settings,
}).run()
)
)
}
await Promise.all(promises)
}
)
}
async _runVmBackup() {
const job = this._job
// FIXME: proper SimpleIdPattern handling
const getSnapshotNameLabel = this._getSnapshotNameLabel
const schedule = this._schedule
const config = this._config
const { settings } = job
const scheduleSettings = {
...config.defaultSettings,
...config.vm.defaultSettings,
...settings[''],
...settings[schedule.id],
}
await using(
Disposable.all(
extractIdsFromSimplePattern(job.srs).map(id =>
this._getRecord('SR', id).catch(error => {
runTask(
{
name: 'get SR record',
data: { type: 'SR', id },
},
() => Promise.reject(error)
)
})
)
),
Disposable.all(
extractIdsFromSimplePattern(job.remotes).map(id =>
this._getAdapter(id).catch(error => {
runTask(
{
name: 'get remote adapter',
data: { type: 'remote', id },
},
() => Promise.reject(error)
)
})
)
),
async (srs, remoteAdapters) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)
// remove srs that failed (already handled)
srs = srs.filter(_ => _ !== undefined)
if (remoteAdapters.length === 0 && srs.length === 0 && scheduleSettings.snapshotRetention === 0) {
return
}
const vmIds = extractIdsFromSimplePattern(job.vms)
Task.info('vms', { vms: vmIds })
remoteAdapters = getAdaptersByRemote(remoteAdapters)
const handleVm = vmUuid =>
runTask({ name: 'backup VM', data: { type: 'VM', id: vmUuid } }, () =>
using(this._getRecord('VM', vmUuid), vm =>
new VmBackup({
config,
getSnapshotNameLabel,
job,
// remotes,
remoteAdapters,
schedule,
settings: { ...scheduleSettings, ...settings[vmUuid] },
srs,
vm,
}).run()
)
)
const { concurrency } = scheduleSettings
await asyncMapSettled(vmIds, concurrency === 0 ? handleVm : limitConcurrency(concurrency)(handleVm))
}
)
}
}

View File

@@ -0,0 +1,40 @@
const { asyncMap } = require('@xen-orchestra/async-map')
exports.DurablePartition = class DurablePartition {
// private resource API is used exceptionally to be able to separate resource creation and release
#partitionDisposers = {}
flushAll() {
const partitionDisposers = this.#partitionDisposers
return asyncMap(Object.keys(partitionDisposers), path => {
const disposers = partitionDisposers[path]
delete partitionDisposers[path]
return asyncMap(disposers, d => d(path).catch(noop => {}))
})
}
async mount(adapter, diskId, partitionId) {
const { value: path, dispose } = await adapter.getPartition(diskId, partitionId)
const partitionDisposers = this.#partitionDisposers
if (partitionDisposers[path] === undefined) {
partitionDisposers[path] = []
}
partitionDisposers[path].push(dispose)
return path
}
async unmount(path) {
const partitionDisposers = this.#partitionDisposers
const disposers = partitionDisposers[path]
if (disposers === undefined) {
throw new Error(`No partition corresponding to the path ${path} found`)
}
await disposers.pop()()
if (disposers.length === 0) {
delete partitionDisposers[path]
}
}
}

View File

@@ -0,0 +1,59 @@
const assert = require('assert')
const { formatFilenameDate } = require('./_filenameDate')
const { importDeltaVm } = require('./_deltaVm')
const { Task } = require('./Task')
exports.ImportVmBackup = class ImportVmBackup {
constructor({ adapter, metadata, srUuid, xapi }) {
this._adapter = adapter
this._metadata = metadata
this._srUuid = srUuid
this._xapi = xapi
}
async run() {
const adapter = this._adapter
const metadata = this._metadata
const isFull = metadata.mode === 'full'
let backup
if (isFull) {
backup = await adapter.readFullVmBackup(metadata)
} else {
assert.strictEqual(metadata.mode, 'delta')
backup = await adapter.readDeltaVmBackup(metadata)
}
return Task.run(
{
name: 'transfer',
},
async () => {
const xapi = this._xapi
const srRef = await xapi.call('SR.get_by_uuid', this._srUuid)
const vmRef = isFull
? await xapi.VM_import(backup, srRef)
: await importDeltaVm(backup, await xapi.getRecord('SR', srRef), {
detectBase: false,
})
await Promise.all([
xapi.call('VM.add_tags', vmRef, 'restored from backup'),
xapi.call(
'VM.set_name_label',
vmRef,
`${metadata.vm.name_label} (${formatFilenameDate(metadata.timestamp)})`
),
])
return {
size: metadata.size,
id: await xapi.getField('VM', vmRef, 'uuid'),
}
}
).catch(() => {}) // errors are handled by logs
}
}

View File

@@ -0,0 +1,554 @@
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const Disposable = require('promise-toolbox/Disposable')
const fromCallback = require('promise-toolbox/fromCallback')
const fromEvent = require('promise-toolbox/fromEvent')
const pDefer = require('promise-toolbox/defer')
const pump = require('pump')
const using = require('promise-toolbox/using')
const { basename, dirname, join, normalize, resolve } = require('path')
const { createLogger } = require('@xen-orchestra/log')
const { createSyntheticStream, mergeVhd, default: Vhd } = require('vhd-lib')
const { deduped } = require('@vates/disposable/deduped')
const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
const { ZipFile } = require('yazl')
const { BACKUP_DIR } = require('./_getVmBackupDir')
const { getTmpDir } = require('./_getTmpDir')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions')
const { lvs, pvs } = require('./_lvm')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
const { warn } = createLogger('xo:proxy:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
const isMetadataFile = filename => filename.endsWith('.json')
const isVhdFile = filename => filename.endsWith('.vhd')
const noop = Function.prototype
const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
files.push({
realPath,
metadataPath,
})
}
}
const createSafeReaddir = (handler, methodName) => (path, options) =>
handler.list(path, options).catch(error => {
if (error?.code !== 'ENOENT') {
warn(`${methodName} ${path}`, { error })
}
return []
})
const debounceResourceFactory = factory =>
function () {
return this._debounceResource(factory.apply(this, arguments))
}
exports.RemoteAdapter = class RemoteAdapter {
constructor(handler, { debounceResource, dirMode }) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
}
get handler() {
return this._handler
}
async _deleteVhd(path) {
const handler = this._handler
const vhds = await asyncMapSettled(
await handler.list(dirname(path), {
filter: isVhdFile,
prependDir: true,
}),
async path => {
try {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
header: vhd.header,
path,
}
} catch (error) {
// Do not fail on corrupted VHDs (usually uncleaned temporary files),
// they are probably inconsequent to the backup process and should not
// fail it.
warn(`BackupNg#_deleteVhd ${path}`, { error })
}
}
)
const base = basename(path)
const child = vhds.find(_ => _ !== undefined && _.header.parentUnicodeName === base)
if (child === undefined) {
await handler.unlink(path)
return 0
}
try {
const childPath = child.path
const mergedDataSize = await mergeVhd(handler, path, handler, childPath)
await handler.rename(path, childPath)
return mergedDataSize
} catch (error) {
handler.unlink(path).catch(warn)
throw error
}
}
async _findPartition(devicePath, partitionId) {
const partitions = await listPartitions(devicePath)
const partition = partitions.find(_ => _.id === partitionId)
if (partition === undefined) {
throw new Error(`partition ${partitionId} not found`)
}
return partition
}
_getLvmLogicalVolumes = Disposable.factory(this._getLvmLogicalVolumes)
_getLvmLogicalVolumes = deduped(this._getLvmLogicalVolumes, (devicePath, pvId, vgName) => [devicePath, pvId, vgName])
_getLvmLogicalVolumes = debounceResourceFactory(this._getLvmLogicalVolumes)
async *_getLvmLogicalVolumes(devicePath, pvId, vgName) {
yield this._getLvmPhysicalVolume(devicePath, pvId && (await this._findPartition(devicePath, pvId)))
await fromCallback(execFile, 'vgchange', ['-ay', vgName])
try {
yield lvs(['lv_name', 'lv_path'], vgName)
} finally {
await fromCallback(execFile, 'vgchange', ['-an', vgName])
}
}
_getLvmPhysicalVolume = Disposable.factory(this._getLvmPhysicalVolume)
_getLvmPhysicalVolume = deduped(this._getLvmPhysicalVolume, (devicePath, partition) => [devicePath, partition?.id])
_getLvmPhysicalVolume = debounceResourceFactory(this._getLvmPhysicalVolume)
async *_getLvmPhysicalVolume(devicePath, partition) {
const args = []
if (partition !== undefined) {
args.push('-o', partition.start * 512, '--sizelimit', partition.size)
}
args.push('--show', '-f', devicePath)
const path = (await fromCallback(execFile, 'losetup', args)).trim()
try {
await fromCallback(execFile, 'pvscan', ['--cache', path])
yield path
} finally {
try {
const vgNames = await pvs('vg_name', path)
await fromCallback(execFile, 'vgchange', ['-an', ...vgNames])
} finally {
await fromCallback(execFile, 'losetup', ['-d', path])
}
}
}
_getPartition = Disposable.factory(this._getPartition)
_getPartition = deduped(this._getPartition, (devicePath, partition) => [devicePath, partition?.id])
_getPartition = debounceResourceFactory(this._getPartition)
async *_getPartition(devicePath, partition) {
const options = ['loop', 'ro']
if (partition !== undefined) {
const { size, start } = partition
options.push(`sizelimit=${size}`)
if (start !== undefined) {
options.push(`offset=${start * 512}`)
}
}
const path = yield getTmpDir()
const mount = options => {
return fromCallback(execFile, 'mount', [
`--options=${options.join(',')}`,
`--source=${devicePath}`,
`--target=${path}`,
])
}
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
// another fs, try without
try {
await mount([...options, 'norecovery'])
} catch (error) {
await mount(options)
}
try {
yield path
} finally {
await fromCallback(execFile, 'umount', ['--lazy', path])
}
}
_listLvmLogicalVolumes(devicePath, partition, results = []) {
return using(this._getLvmPhysicalVolume(devicePath, partition), async path => {
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], path)
const partitionId = partition !== undefined ? partition.id : ''
lvs.forEach((lv, i) => {
const name = lv.lv_name
if (name !== '') {
results.push({
id: `${partitionId}/${lv.vg_name}/${name}`,
name,
size: lv.lv_size,
})
}
})
return results
})
}
_usePartitionFiles = Disposable.factory(this._usePartitionFiles)
async *_usePartitionFiles(diskId, partitionId, paths) {
const path = yield this.getPartition(diskId, partitionId)
const files = []
await asyncMap(paths, file =>
addDirectory(files, resolveSubpath(path, file), normalize('./' + file).replace(/\/+$/, ''))
)
return files
}
fetchPartitionFiles(diskId, partitionId, paths) {
const { promise, reject, resolve } = pDefer()
using(
async function* () {
const files = yield this._usePartitionFiles(diskId, partitionId, paths)
const zip = new ZipFile()
files.forEach(({ realPath, metadataPath }) => zip.addFile(realPath, metadataPath))
zip.end()
const { outputStream } = zip
resolve(outputStream)
await fromEvent(outputStream, 'end')
}.bind(this)
).catch(error => {
warn(error)
reject(error)
})
return promise
}
async deleteDeltaVmBackups(backups) {
const handler = this._handler
let mergedDataSize = 0
await asyncMapSettled(backups, ({ _filename, vhds }) =>
Promise.all([
handler.unlink(_filename),
asyncMap(Object.values(vhds), async _ => {
mergedDataSize += await this._deleteVhd(resolveRelativeFromFile(_filename, _))
}),
])
)
return mergedDataSize
}
async deleteMetadataBackup(backupId) {
const uuidReg = '\\w{8}(-\\w{4}){3}-\\w{12}'
const metadataDirReg = 'xo-(config|pool-metadata)-backups'
const timestampReg = '\\d{8}T\\d{6}Z'
const regexp = new RegExp(`^${metadataDirReg}/${uuidReg}(/${uuidReg})?/${timestampReg}`)
if (!regexp.test(backupId)) {
throw new Error(`The id (${backupId}) not correspond to a metadata folder`)
}
await this._handler.rmtree(backupId)
}
async deleteOldMetadataBackups(dir, retention) {
const handler = this.handler
let list = await handler.list(dir)
list.sort()
list = list.filter(timestamp => /^\d{8}T\d{6}Z$/.test(timestamp)).slice(0, -retention)
await asyncMapSettled(list, timestamp => handler.rmtree(`${dir}/${timestamp}`))
}
async deleteFullVmBackups(backups) {
const handler = this._handler
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
}
async deleteVmBackup(filename) {
const metadata = JSON.parse(String(await this._handler.readFile(filename)))
metadata._filename = filename
if (metadata.mode === 'delta') {
await this.deleteDeltaVmBackups([metadata])
} else if (metadata.mode === 'full') {
await this.deleteFullVmBackups([metadata])
} else {
throw new Error(`no deleter for backup mode ${metadata.mode}`)
}
}
getDisk = Disposable.factory(this.getDisk)
getDisk = deduped(this.getDisk, diskId => [diskId])
getDisk = debounceResourceFactory(this.getDisk)
async *getDisk(diskId) {
const handler = this._handler
const diskPath = handler._getFilePath('/' + diskId)
const mountDir = yield getTmpDir()
await fromCallback(execFile, 'vhdimount', [diskPath, mountDir])
try {
let max = 0
let maxEntry
const entries = await readdir(mountDir)
entries.forEach(entry => {
const matches = RE_VHDI.exec(entry)
if (matches !== null) {
const value = +matches[1]
if (value > max) {
max = value
maxEntry = entry
}
}
})
if (max === 0) {
throw new Error('no disks found')
}
yield `${mountDir}/${maxEntry}`
} finally {
await fromCallback(execFile, 'fusermount', ['-uz', mountDir])
}
}
// partitionId values:
//
// - undefined: raw disk
// - `<partitionId>`: partitioned disk
// - `<pvId>/<vgName>/<lvName>`: LVM on a partitioned disk
// - `/<vgName>/lvName>`: LVM on a raw disk
getPartition = Disposable.factory(this.getPartition)
async *getPartition(diskId, partitionId) {
const devicePath = yield this.getDisk(diskId)
if (partitionId === undefined) {
return yield this._getPartition(devicePath)
}
const isLvmPartition = partitionId.includes('/')
if (isLvmPartition) {
const [pvId, vgName, lvName] = partitionId.split('/')
const lvs = yield this._getLvmLogicalVolumes(devicePath, pvId !== '' ? pvId : undefined, vgName)
return yield this._getPartition(lvs.find(_ => _.lv_name === lvName).lv_path)
}
return yield this._getPartition(devicePath, await this._findPartition(devicePath, partitionId))
}
async listAllVmBackups() {
const handler = this._handler
const backups = { __proto__: null }
await asyncMap(await handler.list(BACKUP_DIR), async vmUuid => {
const vmBackups = await this.listVmBackups(vmUuid)
backups[vmUuid] = vmBackups
})
return backups
}
listPartitionFiles(diskId, partitionId, path) {
return using(this.getPartition(diskId, partitionId), async rootPath => {
path = resolveSubpath(rootPath, path)
const entriesMap = {}
await asyncMap(await readdir(path), async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
}
})
return entriesMap
})
}
listPartitions(diskId) {
return using(this.getDisk(diskId), async devicePath => {
const partitions = await listPartitions(devicePath)
if (partitions.length === 0) {
try {
// handle potential raw LVM physical volume
return await this._listLvmLogicalVolumes(devicePath, undefined, partitions)
} catch (error) {
return []
}
}
const results = []
await asyncMapSettled(partitions, partition =>
partition.type === LVM_PARTITION_TYPE
? this._listLvmLogicalVolumes(devicePath, partition, results)
: results.push(partition)
)
return results
})
}
async listPoolMetadataBackups() {
const handler = this._handler
const safeReaddir = createSafeReaddir(handler, 'listPoolMetadataBackups')
const backupsByPool = {}
await asyncMap(await safeReaddir(DIR_XO_POOL_METADATA_BACKUPS, { prependDir: true }), async scheduleDir =>
asyncMap(await safeReaddir(scheduleDir), async poolId => {
const backups = backupsByPool[poolId] ?? (backupsByPool[poolId] = [])
return asyncMap(await safeReaddir(`${scheduleDir}/${poolId}`, { prependDir: true }), async backupDir => {
try {
backups.push({
id: backupDir,
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
})
} catch (error) {
warn(`listPoolMetadataBackups ${backupDir}`, {
error,
})
}
})
})
)
// delete empty entries and sort backups
Object.keys(backupsByPool).forEach(poolId => {
const backups = backupsByPool[poolId]
if (backups.length === 0) {
delete backupsByPool[poolId]
} else {
backups.sort(compareTimestamp)
}
})
return backupsByPool
}
async listVmBackups(vmUuid, predicate) {
const handler = this._handler
const backups = []
try {
const files = await handler.list(`${BACKUP_DIR}/${vmUuid}`, {
filter: isMetadataFile,
prependDir: true,
})
await asyncMap(files, async file => {
try {
const metadata = await this.readVmBackupMetadata(file)
if (predicate === undefined || predicate(metadata)) {
// inject an id usable by importVmBackupNg()
metadata.id = metadata._filename
backups.push(metadata)
}
} catch (error) {
warn(`listVmBackups ${file}`, { error })
}
})
} catch (error) {
let code
if (error == null || ((code = error.code) !== 'ENOENT' && code !== 'ENOTDIR')) {
throw error
}
}
return backups.sort(compareTimestamp)
}
async listXoMetadataBackups() {
const handler = this._handler
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
const backups = []
await asyncMap(await safeReaddir(DIR_XO_CONFIG_BACKUPS, { prependDir: true }), async scheduleDir =>
asyncMap(await safeReaddir(scheduleDir, { prependDir: true }), async backupDir => {
try {
backups.push({
id: backupDir,
...JSON.parse(String(await handler.readFile(`${backupDir}/metadata.json`))),
})
} catch (error) {
warn(`listXoMetadataBackups ${backupDir}`, { error })
}
})
)
return backups.sort(compareTimestamp)
}
async outputStream(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
input = await input
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await handler.createOutputStream(tmpPath, {
checksum,
dirMode: this._dirMode,
})
try {
await Promise.all([fromCallback(pump, input, output), output.checksumWritten, input.task])
await validator(tmpPath)
await handler.rename(tmpPath, path, { checksum })
} catch (error) {
await handler.unlink(tmpPath, { checksum })
throw error
}
}
async readDeltaVmBackup(metadata) {
const handler = this._handler
const { vbds, vdis, vhds, vifs, vm } = metadata
const dir = dirname(metadata._filename)
const streams = {}
await asyncMapSettled(Object.entries(vdis), async ([id, vdi]) => {
streams[`${id}.vhd`] = await createSyntheticStream(handler, join(dir, vhds[id]))
})
return {
streams,
vbds,
vdis,
version: '1.0.0',
vifs,
vm,
}
}
readFullVmBackup(metadata) {
return this._handler.createReadStream(resolve('/', dirname(metadata._filename), metadata.xva))
}
async readVmBackupMetadata(path) {
return Object.defineProperty(JSON.parse(await this._handler.readFile(path)), '_filename', { value: path })
}
}

View File

@@ -0,0 +1,24 @@
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
const { PATH_DB_DUMP } = require('./_PoolMetadataBackup')
exports.RestoreMetadataBackup = class RestoreMetadataBackup {
constructor({ backupId, handler, xapi }) {
this._backupId = backupId
this._handler = handler
this._xapi = xapi
}
async run() {
const backupId = this._backupId
const handler = this._handler
const xapi = this._xapi
if (backupId.split('/')[0] === DIR_XO_POOL_METADATA_BACKUPS) {
return xapi.putResource(await handler.createReadStream(`${backupId}/data`), PATH_DB_DUMP, {
task: xapi.createTask('Import pool metadata'),
})
} else {
return String(await handler.readFile(`${backupId}/data.json`))
}
}
}

View File

@@ -0,0 +1,174 @@
const Zone = require('node-zone')
const { SyncThenable } = require('./_syncThenable')
const logAfterEnd = () => {
throw new Error('task has already ended')
}
// Create a serializable object from an error.
//
// Otherwise some fields might be non-enumerable and missing from logs.
const serializeError = error =>
error instanceof Error
? {
...error, // Copy enumerable properties.
code: error.code,
message: error.message,
name: error.name,
stack: error.stack,
}
: error
exports.serializeError = serializeError
class TaskLogger {
constructor(logFn, parentId) {
this._log = logFn
this._parentId = parentId
this._taskId = undefined
}
get taskId() {
const taskId = this._taskId
if (taskId === undefined) {
throw new Error('start the task first')
}
return taskId
}
// create a subtask
fork() {
return new TaskLogger(this._log, this.taskId)
}
info(message, data) {
return this._log({
data,
event: 'info',
message,
taskId: this.taskId,
timestamp: Date.now(),
})
}
run(message, data, fn) {
if (arguments.length === 2) {
fn = data
data = undefined
}
return SyncThenable.tryUnwrap(
SyncThenable.fromFunction(() => {
if (this._taskId !== undefined) {
throw new Error('task has already started')
}
this._taskId = Math.random().toString(36).slice(2)
return this._log({
data,
event: 'start',
message,
parentId: this._parentId,
taskId: this.taskId,
timestamp: Date.now(),
})
})
.then(fn)
.then(
result => {
const log = this._log
this._log = logAfterEnd
return SyncThenable.resolve(
log({
event: 'end',
result,
status: 'success',
taskId: this.taskId,
timestamp: Date.now(),
})
).then(() => result)
},
error => {
const log = this._log
this._log = logAfterEnd
return SyncThenable.resolve(
log({
event: 'end',
result: serializeError(error),
status: 'failure',
taskId: this.taskId,
timestamp: Date.now(),
})
).then(() => {
throw error
})
}
)
)
}
warning(message, data) {
return this._log({
data,
event: 'warning',
message,
taskId: this.taskId,
timestamp: Date.now(),
})
}
wrapFn(fn, message, data) {
const logger = this
return function () {
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
return logger.run(evaluate(message), evaluate(data), () => fn.apply(this, arguments))
}
}
}
const $$task = Symbol('current task logger')
const getCurrent = () => Zone.current.data[$$task]
const Task = {
info(message, data) {
const task = getCurrent()
if (task !== undefined) {
return task.info(message, data)
}
},
run({ name, data, onLog }, fn) {
let parentId
if (onLog === undefined) {
const parent = getCurrent()
if (parent === undefined) {
return fn()
}
onLog = parent._log
parentId = parent.taskId
}
const task = new TaskLogger(onLog, parentId)
const zone = Zone.current.fork('task')
zone.data[$$task] = task
return task.run(name, data, zone.wrap(fn))
},
warning(message, data) {
const task = getCurrent()
if (task !== undefined) {
return task.warning(message, data)
}
},
wrapFn({ name, data, onLog }, fn) {
return function () {
const evaluate = v => (typeof v === 'function' ? v.apply(this, arguments) : v)
return Task.run({ name: evaluate(name), data: evaluate(data), onLog }, () => fn.apply(this, arguments))
}
},
}
exports.Task = Task

View File

@@ -0,0 +1,116 @@
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { formatDateTime } = require('@xen-orchestra/xapi')
const { formatFilenameDate } = require('./_filenameDate')
const { getOldEntries } = require('./_getOldEntries')
const { importDeltaVm, TAG_COPY_SRC } = require('./_deltaVm')
const { listReplicatedVms } = require('./_listReplicatedVms')
const { Task } = require('./Task')
exports.ContinuousReplicationWriter = class ContinuousReplicationWriter {
constructor(backup, sr, settings) {
this._backup = backup
this._settings = settings
this._sr = sr
this.run = Task.wrapFn(
{
name: 'export',
data: ({ deltaExport }) => ({
id: sr.uuid,
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
type: 'SR',
}),
},
this.run
)
}
async checkBaseVdis(baseUuidToSrcVdi, baseVm) {
const sr = this._sr
const replicatedVm = listReplicatedVms(sr.$xapi, this._backup.job.id, sr.uuid, this._backup.vm.uuid).find(
vm => vm.other_config[TAG_COPY_SRC] === baseVm.uuid
)
if (replicatedVm === undefined) {
return baseUuidToSrcVdi.clear()
}
const xapi = replicatedVm.$xapi
const replicatedVdis = new Set(
await asyncMap(await replicatedVm.$getDisks(), async vdiRef => {
const otherConfig = await xapi.getField('VDI', vdiRef, 'other_config')
return otherConfig[TAG_COPY_SRC]
})
)
for (const uuid of baseUuidToSrcVdi.keys()) {
if (!replicatedVdis.has(uuid)) {
baseUuidToSrcVdi.delete(uuid)
}
}
}
async run({ timestamp, deltaExport, sizeContainers }) {
const sr = this._sr
const settings = this._settings
const { job, scheduleId, vm } = this._backup
const { uuid: srUuid, $xapi: xapi } = sr
// delete previous interrupted copies
ignoreErrors.call(
asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => xapi.VM_destroy(vm.$ref))
)
const oldVms = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
const deleteOldBackups = () => asyncMapSettled(oldVms, vm => xapi.VM_destroy(vm.$ref))
const { deleteFirst } = settings
if (deleteFirst) {
await deleteOldBackups()
}
let targetVmRef
await Task.run({ name: 'transfer' }, async () => {
targetVmRef = await importDeltaVm(
{
__proto__: deltaExport,
vm: {
...deltaExport.vm,
tags: [...deltaExport.vm.tags, 'Continuous Replication'],
},
},
sr
)
return {
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
})
const targetVm = await xapi.getRecord('VM', targetVmRef)
await Promise.all([
targetVm.ha_restart_priority !== '' &&
Promise.all([targetVm.set_ha_restart_priority(''), targetVm.add_tags('HA disabled')]),
targetVm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
targetVm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
targetVm.update_other_config({
'xo:backup:sr': srUuid,
// these entries need to be added in case of offline backup
'xo:backup:datetime': formatDateTime(timestamp),
'xo:backup:job': job.id,
'xo:backup:schedule': scheduleId,
'xo:backup:vm': vm.uuid,
}),
])
if (!deleteFirst) {
await deleteOldBackups()
}
}
}

View File

@@ -0,0 +1,210 @@
const assert = require('assert')
const map = require('lodash/map')
const mapValues = require('lodash/mapValues')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { asyncMap } = require('@xen-orchestra/async-map')
const { chainVhd, checkVhdChain, default: Vhd } = require('vhd-lib')
const { createLogger } = require('@xen-orchestra/log')
const { dirname } = require('path')
const { checkVhd } = require('./_checkVhd')
const { formatFilenameDate } = require('./_filenameDate')
const { getOldEntries } = require('./_getOldEntries')
const { getVmBackupDir } = require('./_getVmBackupDir')
const { packUuid } = require('./_packUuid')
const { Task } = require('./Task')
const { warn } = createLogger('xo:proxy:backups:DeltaBackupWriter')
exports.DeltaBackupWriter = class DeltaBackupWriter {
constructor(backup, remoteId, settings) {
this._adapter = backup.remoteAdapters[remoteId]
this._backup = backup
this._settings = settings
this.run = Task.wrapFn(
{
name: 'export',
data: ({ deltaExport }) => ({
id: remoteId,
isFull: Object.values(deltaExport.vdis).some(vdi => vdi.other_config['xo:base_delta'] === undefined),
type: 'remote',
}),
},
this.run
)
}
async checkBaseVdis(baseUuidToSrcVdi) {
const { handler } = this._adapter
const backup = this._backup
const backupDir = getVmBackupDir(backup.vm.uuid)
const vdisDir = `${backupDir}/vdis/${backup.job.id}`
await asyncMap(baseUuidToSrcVdi, async ([baseUuid, srcVdi]) => {
let found = false
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
prependDir: true,
})
await asyncMap(vhds, async path => {
try {
await checkVhdChain(handler, path)
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
found = found || vhd.footer.uuid.equals(packUuid(baseUuid))
} catch (error) {
warn('checkBaseVdis', { error })
await ignoreErrors.call(handler.unlink(path))
}
})
} catch (error) {
warn('checkBaseVdis', { error })
}
if (!found) {
baseUuidToSrcVdi.delete(baseUuid)
}
})
}
async run({ timestamp, deltaExport, sizeContainers }) {
const adapter = this._adapter
const backup = this._backup
const settings = this._settings
const { job, scheduleId, vm } = backup
const jobId = job.id
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
const oldBackups = getOldEntries(
settings.exportRetention - 1,
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'delta' && _.scheduleId === scheduleId)
)
// FIXME: implement optimized multiple VHDs merging with synthetic
// delta
//
// For the time being, limit the number of deleted backups by run
// because it can take a very long time and can lead to
// interrupted backup with broken VHD chain.
//
// The old backups will be eventually merged in future runs of the
// job.
const { maxMergedDeltasPerRun } = this._settings
if (oldBackups.length > maxMergedDeltasPerRun) {
oldBackups.length = maxMergedDeltasPerRun
}
const deleteOldBackups = () =>
Task.run({ name: 'merge' }, async () => {
let size = 0
// delete sequentially from newest to oldest to avoid unnecessary merges
for (let i = oldBackups.length; i-- > 0; ) {
size += await adapter.deleteDeltaVmBackups([oldBackups[i]])
}
return {
size,
}
})
const basename = formatFilenameDate(timestamp)
const vhds = mapValues(
deltaExport.vdis,
vdi =>
`vdis/${jobId}/${
vdi.type === 'suspend'
? // doesn't make sense to group by parent for memory because we
// don't do delta for it
vdi.uuid
: vdi.$snapshot_of$uuid
}/${basename}.vhd`
)
const metadataFilename = `${backupDir}/${basename}.json`
const metadataContent = {
jobId,
mode: job.mode,
scheduleId,
timestamp,
vbds: deltaExport.vbds,
vdis: deltaExport.vdis,
version: '2.0.0',
vifs: deltaExport.vifs,
vhds,
vm,
vmSnapshot: this._backup.exportedVm,
}
const { deleteFirst } = settings
if (deleteFirst) {
await deleteOldBackups()
}
const { size } = await Task.run({ name: 'transfer' }, async () => {
await Promise.all(
map(deltaExport.vdis, async (vdi, id) => {
const path = `${backupDir}/${vhds[id]}`
const isDelta = vdi.other_config['xo:base_delta'] !== undefined
let parentPath
if (isDelta) {
const vdiDir = dirname(path)
parentPath = (
await handler.list(vdiDir, {
filter: filename => filename[0] !== '.' && filename.endsWith('.vhd'),
prependDir: true,
})
)
.sort()
.pop()
assert.notStrictEqual(parentPath, undefined, `missing parent of ${id}`)
parentPath = parentPath.slice(1) // remove leading slash
// TODO remove when this has been done before the export
await checkVhd(handler, parentPath)
}
await adapter.outputStream(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
})
if (isDelta) {
await chainVhd(handler, parentPath, handler, path)
}
// set the correct UUID in the VHD
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
vhd.footer.uuid = packUuid(vdi.uuid)
await vhd.readBlockAllocationTable() // required by writeFooter()
await vhd.writeFooter()
})
)
return {
size: Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0),
}
})
metadataContent.size = size
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
if (!deleteFirst) {
await deleteOldBackups()
}
// TODO: run cleanup?
}
}

View File

@@ -0,0 +1,85 @@
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { asyncMapSettled } = require('@xen-orchestra/async-map')
const { formatDateTime } = require('@xen-orchestra/xapi')
const { formatFilenameDate } = require('./_filenameDate')
const { getOldEntries } = require('./_getOldEntries')
const { listReplicatedVms } = require('./_listReplicatedVms')
const { Task } = require('./Task')
exports.DisasterRecoveryWriter = class DisasterRecoveryWriter {
constructor(backup, sr, settings) {
this._backup = backup
this._settings = settings
this._sr = sr
this.run = Task.wrapFn(
{
name: 'export',
data: {
id: sr.uuid,
type: 'SR',
// necessary?
isFull: true,
},
},
this.run
)
}
async run({ timestamp, sizeContainer, stream }) {
const sr = this._sr
const settings = this._settings
const { job, scheduleId, vm } = this._backup
const { uuid: srUuid, $xapi: xapi } = sr
// delete previous interrupted copies
ignoreErrors.call(
asyncMapSettled(listReplicatedVms(xapi, scheduleId, undefined, vm.uuid), vm => xapi.VM_destroy(vm.$ref))
)
const oldVms = getOldEntries(settings.copyRetention - 1, listReplicatedVms(xapi, scheduleId, srUuid, vm.uuid))
const deleteOldBackups = () => asyncMapSettled(oldVms, vm => xapi.VM_destroy(vm.$ref))
const { deleteFirst } = settings
if (deleteFirst) {
await deleteOldBackups()
}
let targetVmRef
await Task.run({ name: 'transfer' }, async () => {
targetVmRef = await xapi.VM_import(stream, sr.$ref, vm =>
Promise.all([
vm.add_tags('Disaster Recovery'),
vm.ha_restart_priority !== '' && Promise.all([vm.set_ha_restart_priority(''), vm.add_tags('HA disabled')]),
vm.set_name_label(`${vm.name_label} - ${job.name} - (${formatFilenameDate(timestamp)})`),
])
)
return { size: sizeContainer.size }
})
const targetVm = await xapi.getRecord('VM', targetVmRef)
await Promise.all([
targetVm.update_blocked_operations(
'start',
'Start operation for this vm is blocked, clone it if you want to use it.'
),
targetVm.update_other_config({
'xo:backup:sr': srUuid,
// these entries need to be added in case of offline backup
'xo:backup:datetime': formatDateTime(timestamp),
'xo:backup:job': job.id,
'xo:backup:schedule': scheduleId,
'xo:backup:vm': vm.uuid,
}),
])
if (!deleteFirst) {
await deleteOldBackups()
}
}
}

View File

@@ -0,0 +1,90 @@
const { formatFilenameDate } = require('./_filenameDate')
const { getOldEntries } = require('./_getOldEntries')
const { getVmBackupDir } = require('./_getVmBackupDir')
const { isValidXva } = require('./isValidXva')
const { Task } = require('./Task')
exports.FullBackupWriter = class FullBackupWriter {
constructor(backup, remoteId, settings) {
this._backup = backup
this._remoteId = remoteId
this._settings = settings
this.run = Task.wrapFn(
{
name: 'export',
data: {
id: remoteId,
type: 'remote',
// necessary?
isFull: true,
},
},
this.run
)
}
async run({ timestamp, sizeContainer, stream }) {
const backup = this._backup
const remoteId = this._remoteId
const settings = this._settings
const { job, scheduleId, vm } = backup
const adapter = backup.remoteAdapters[remoteId]
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
const oldBackups = getOldEntries(
settings.exportRetention - 1,
await adapter.listVmBackups(vm.uuid, _ => _.mode === 'full' && _.scheduleId === scheduleId)
)
const deleteOldBackups = () => adapter.deleteFullVmBackups(oldBackups)
const basename = formatFilenameDate(timestamp)
const dataBasename = basename + '.xva'
const dataFilename = backupDir + '/' + dataBasename
const metadataFilename = `${backupDir}/${basename}.json`
const metadata = {
jobId: job.id,
mode: job.mode,
scheduleId,
timestamp,
version: '2.0.0',
vm,
vmSnapshot: this._backup.exportedVm,
xva: './' + dataBasename,
}
const { deleteFirst } = settings
if (deleteFirst) {
await deleteOldBackups()
}
await Task.run({ name: 'transfer' }, async () => {
await adapter.outputStream(dataFilename, stream, {
validator: tmpPath => {
if (handler._getFilePath !== undefined) {
return isValidXva(handler._getFilePath('/' + tmpPath))
}
},
})
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
if (!deleteFirst) {
await deleteOldBackups()
}
// TODO: run cleanup?
}
}

View File

@@ -0,0 +1,75 @@
const { asyncMap } = require('@xen-orchestra/async-map')
const { DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
const { forkStreamUnpipe } = require('./_forkStreamUnpipe')
const { formatFilenameDate } = require('./_filenameDate')
const { Task } = require('./Task')
const PATH_DB_DUMP = '/pool/xmldbdump'
exports.PATH_DB_DUMP = PATH_DB_DUMP
exports.PoolMetadataBackup = class PoolMetadataBackup {
constructor({ config, job, pool, remoteAdapters, schedule, settings }) {
this._config = config
this._job = job
this._pool = pool
this._remoteAdapters = remoteAdapters
this._schedule = schedule
this._settings = settings
}
_exportPoolMetadata() {
const xapi = this._pool.$xapi
return xapi.getResource(PATH_DB_DUMP, {
task: xapi.createTask('Export pool metadata'),
})
}
async run() {
const timestamp = Date.now()
const { _job: job, _schedule: schedule, _pool: pool } = this
const poolDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${schedule.id}/${pool.$id}`
const dir = `${poolDir}/${formatFilenameDate(timestamp)}`
const stream = await this._exportPoolMetadata()
const fileName = `${dir}/data`
const metadata = JSON.stringify(
{
jobId: job.id,
jobName: job.name,
pool,
poolMaster: pool.$master,
scheduleId: schedule.id,
scheduleName: schedule.name,
timestamp,
},
null,
2
)
const metaDataFileName = `${dir}/metadata.json`
await asyncMap(
Object.entries(this._remoteAdapters),
([remoteId, adapter]) =>
Task.run(
{
name: `Starting metadata backup for the pool (${pool.$id}) for the remote (${remoteId}). (${job.id})`,
data: {
id: remoteId,
type: 'remote',
},
},
async () => {
// forkStreamUnpipe should be used in a sync way, do not wait for a promise before using it
await adapter.outputStream(fileName, forkStreamUnpipe(stream), { checksum: false })
await adapter.handler.outputFile(metaDataFileName, metadata, {
dirMode: this._config.dirMode,
})
await adapter.deleteOldMetadataBackups(poolDir, this._settings.retentionPoolMetadata)
}
).catch(() => {}) // errors are handled by logs
)
}
}

View File

@@ -0,0 +1,350 @@
const findLast = require('lodash/findLast')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const keyBy = require('lodash/keyBy')
const mapValues = require('lodash/mapValues')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createLogger } = require('@xen-orchestra/log')
const { formatDateTime } = require('@xen-orchestra/xapi')
const { ContinuousReplicationWriter } = require('./_ContinuousReplicationWriter')
const { DeltaBackupWriter } = require('./_DeltaBackupWriter')
const { DisasterRecoveryWriter } = require('./_DisasterRecoveryWriter')
const { exportDeltaVm } = require('./_deltaVm')
const { forkStreamUnpipe } = require('./_forkStreamUnpipe')
const { FullBackupWriter } = require('./_FullBackupWriter')
const { getOldEntries } = require('./_getOldEntries')
const { Task } = require('./Task')
const { watchStreamSize } = require('./_watchStreamSize')
const { debug, warn } = createLogger('xo:proxy:backups:VmBackup')
const forkDeltaExport = deltaExport =>
Object.create(deltaExport, {
streams: {
value: mapValues(deltaExport.streams, forkStreamUnpipe),
},
})
exports.VmBackup = class VmBackup {
constructor({ config, getSnapshotNameLabel, job, remoteAdapters, remotes, schedule, settings, srs, vm }) {
this.config = config
this.job = job
this.remoteAdapters = remoteAdapters
this.remotes = remotes
this.scheduleId = schedule.id
this.timestamp = undefined
// VM currently backed up
this.vm = vm
const { tags } = this.vm
// VM (snapshot) that is really exported
this.exportedVm = undefined
this._fullVdisRequired = undefined
this._getSnapshotNameLabel = getSnapshotNameLabel
this._isDelta = job.mode === 'delta'
this._jobId = job.id
this._jobSnapshots = undefined
this._xapi = vm.$xapi
// Base VM for the export
this._baseVm = undefined
// Settings for this specific run (job, schedule, VM)
if (tags.includes('xo-memory-backup')) {
settings.checkpointSnapshot = true
}
if (tags.includes('xo-offline-backup')) {
settings.offlineSnapshot = true
}
this._settings = settings
// Create writers
{
const writers = []
this._writers = writers
const [BackupWriter, ReplicationWriter] = this._isDelta
? [DeltaBackupWriter, ContinuousReplicationWriter]
: [FullBackupWriter, DisasterRecoveryWriter]
const allSettings = job.settings
Object.keys(remoteAdapters).forEach(remoteId => {
const targetSettings = {
...settings,
...allSettings[remoteId],
}
if (targetSettings.exportRetention !== 0) {
writers.push(new BackupWriter(this, remoteId, targetSettings))
}
})
srs.forEach(sr => {
const targetSettings = {
...settings,
...allSettings[sr.uuid],
}
if (targetSettings.copyRetention !== 0) {
writers.push(new ReplicationWriter(this, sr, targetSettings))
}
})
}
}
// ensure the VM itself does not have any backup metadata which would be
// copied on manual snapshots and interfere with the backup jobs
async _cleanMetadata() {
const { vm } = this
if ('xo:backup:job' in vm.other_config) {
await vm.update_other_config({
'xo:backup:datetime': null,
'xo:backup:deltaChainLength': null,
'xo:backup:exported': null,
'xo:backup:job': null,
'xo:backup:schedule': null,
'xo:backup:vm': null,
})
}
}
async _snapshot() {
const { vm } = this
const xapi = this._xapi
const settings = this._settings
const doSnapshot = this._isDelta || vm.power_state === 'Running' || settings.snapshotRetention !== 0
if (doSnapshot) {
await Task.run({ name: 'snapshot' }, async () => {
if (!settings.bypassVdiChainsCheck) {
await vm.$assertHealthyVdiChains()
}
const snapshotRef = await vm[settings.checkpointSnapshot ? '$checkpoint' : '$snapshot'](
this._getSnapshotNameLabel(vm)
)
this.timestamp = Date.now()
await xapi.setFieldEntries('VM', snapshotRef, 'other_config', {
'xo:backup:datetime': formatDateTime(this.timestamp),
'xo:backup:job': this._jobId,
'xo:backup:schedule': this.scheduleId,
'xo:backup:vm': vm.uuid,
})
this.exportedVm = await xapi.getRecord('VM', snapshotRef)
return this.exportedVm.uuid
})
} else {
this.exportedVm = vm
this.timestamp = Date.now()
}
}
async _copyDelta() {
const { exportedVm } = this
const baseVm = this._baseVm
const deltaExport = await exportDeltaVm(exportedVm, baseVm, {
fullVdisRequired: this._fullVdisRequired,
})
const sizeContainers = mapValues(deltaExport.streams, watchStreamSize)
const timestamp = Date.now()
await asyncMap(this._writers, async writer => {
try {
await writer.run({
deltaExport: forkDeltaExport(deltaExport),
sizeContainers,
timestamp,
})
} catch (error) {
warn('copy failure', {
error,
target: writer.target,
vm: this.vm,
})
}
})
this._baseVm = exportedVm
if (baseVm !== undefined) {
await exportedVm.update_other_config(
'xo:backup:deltaChainLength',
String(+(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1)
)
}
// not the case if offlineBackup
if (exportedVm.is_a_snapshot) {
await exportedVm.update_other_config('xo:backup:exported', 'true')
}
const size = Object.values(sizeContainers).reduce((sum, { size }) => sum + size, 0)
const end = Date.now()
const duration = end - timestamp
debug('transfer complete', {
duration,
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
size,
})
}
async _copyFull() {
const { compression } = this.job
const stream = await this._xapi.VM_export(this.exportedVm.$ref, {
compress: Boolean(compression) && (compression === 'native' ? 'gzip' : 'zstd'),
useSnapshot: false,
})
const sizeContainer = watchStreamSize(stream)
const timestamp = Date.now()
await asyncMap(this._writers, async writer => {
try {
await writer.run({
sizeContainer,
stream: forkStreamUnpipe(stream),
timestamp,
})
} catch (error) {
warn('copy failure', {
error,
target: writer.target,
vm: this.vm,
})
}
})
const { size } = sizeContainer
const end = Date.now()
const duration = end - timestamp
debug('transfer complete', {
duration,
speed: duration !== 0 ? (size * 1e3) / 1024 / 1024 / duration : 0,
size,
})
}
async _fetchJobSnapshots() {
const jobId = this._jobId
const vmRef = this.vm.$ref
const xapi = this._xapi
const snapshotsRef = await xapi.getField('VM', vmRef, 'snapshots')
const snapshotsOtherConfig = await asyncMap(snapshotsRef, ref => xapi.getField('VM', ref, 'other_config'))
const snapshots = []
snapshotsOtherConfig.forEach((other_config, i) => {
if (other_config['xo:backup:job'] === jobId) {
snapshots.push({ other_config, $ref: snapshotsRef[i] })
}
})
snapshots.sort((a, b) => (a.other_config['xo:backup:datetime'] < b.other_config['xo:backup:datetime'] ? -1 : 1))
this._jobSnapshots = snapshots
}
async _removeUnusedSnapshots() {
// TODO: handle all schedules (no longer existing schedules default to 0 retention)
const { scheduleId } = this
const scheduleSnapshots = this._jobSnapshots.filter(_ => _.other_config['xo:backup:schedule'] === scheduleId)
const baseVmRef = this._baseVm?.$ref
const xapi = this._xapi
await asyncMap(getOldEntries(this._settings.snapshotRetention, scheduleSnapshots), ({ $ref }) => {
if ($ref !== baseVmRef) {
return xapi.VM_destroy($ref)
}
})
}
async _selectBaseVm() {
const xapi = this._xapi
let baseVm = findLast(this._jobSnapshots, _ => 'xo:backup:exported' in _.other_config)
if (baseVm === undefined) {
return
}
const fullInterval = this._settings.fullInterval
const deltaChainLength = +(baseVm.other_config['xo:backup:deltaChainLength'] ?? 0) + 1
if (!(fullInterval === 0 || fullInterval > deltaChainLength)) {
return
}
const srcVdis = keyBy(await xapi.getRecords('VDI', await this.vm.$getDisks()), '$ref')
// resolve full record
baseVm = await xapi.getRecord('VM', baseVm.$ref)
const baseUuidToSrcVdi = new Map()
await asyncMap(await baseVm.$getDisks(), async baseRef => {
const snapshotOf = await xapi.getField('VDI', baseRef, 'snapshot_of')
const srcVdi = srcVdis[snapshotOf]
if (srcVdi !== undefined) {
baseUuidToSrcVdi.set(await xapi.getField('VDI', baseRef, 'uuid'), srcVdi)
}
})
const presentBaseVdis = new Map(baseUuidToSrcVdi)
const writers = this._writers
for (let i = 0, n = writers.length; presentBaseVdis.size !== 0 && i < n; ++i) {
await writers[i].checkBaseVdis(presentBaseVdis, baseVm)
}
if (presentBaseVdis.size === 0) {
return
}
const fullVdisRequired = new Set()
baseUuidToSrcVdi.forEach((srcVdi, baseUuid) => {
if (!presentBaseVdis.has(baseUuid)) {
fullVdisRequired.add(srcVdi.uuid)
}
})
this._baseVm = baseVm
this._fullVdisRequired = fullVdisRequired
}
async run() {
await this._fetchJobSnapshots()
if (this._isDelta) {
await this._selectBaseVm()
}
await this._cleanMetadata()
await this._removeUnusedSnapshots()
const { _settings: settings, vm } = this
const isRunning = vm.power_state === 'Running'
const startAfter = isRunning && (settings.offlineBackup ? 'backup' : settings.offlineSnapshot && 'snapshot')
if (startAfter) {
await vm.$callAsync('clean_shutdown')
}
try {
await this._snapshot()
if (startAfter === 'snapshot') {
ignoreErrors.call(vm.$callAsync('start', false, false))
}
if (this._writers.length !== 0) {
await (this._isDelta ? this._copyDelta() : this._copyFull())
}
} finally {
if (startAfter) {
ignoreErrors.call(vm.$callAsync('start', false, false))
}
await this._fetchJobSnapshots()
await this._removeUnusedSnapshots()
}
}
}

View File

@@ -0,0 +1,62 @@
const { asyncMap } = require('@xen-orchestra/async-map')
const { DIR_XO_CONFIG_BACKUPS } = require('./RemoteAdapter')
const { formatFilenameDate } = require('./_filenameDate')
const { Task } = require('./Task')
exports.XoMetadataBackup = class XoMetadataBackup {
constructor({ config, job, remoteAdapters, schedule, settings }) {
this._config = config
this._job = job
this._remoteAdapters = remoteAdapters
this._schedule = schedule
this._settings = settings
}
async run() {
const timestamp = Date.now()
const { _job: job, _schedule: schedule } = this
const scheduleDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
const dir = `${scheduleDir}/${formatFilenameDate(timestamp)}`
const data = job.xoMetadata
const fileName = `${dir}/data.json`
const metadata = JSON.stringify(
{
jobId: job.id,
jobName: job.name,
scheduleId: schedule.id,
scheduleName: schedule.name,
timestamp,
},
null,
2
)
const metaDataFileName = `${dir}/metadata.json`
await asyncMap(
Object.entries(this._remoteAdapters),
([remoteId, adapter]) =>
Task.run(
{
name: `Starting XO metadata backup for the remote (${remoteId}). (${job.id})`,
data: {
id: remoteId,
type: 'remote',
},
},
async () => {
const handler = adapter.handler
const dirMode = this._config.dirMode
await handler.outputFile(fileName, data, { dirMode })
await handler.outputFile(metaDataFileName, metadata, {
dirMode,
})
await adapter.deleteOldMetadataBackups(scheduleDir, this._settings.retentionXoMetadata)
}
).catch(() => {}) // errors are handled by logs
)
}
}

View File

@@ -0,0 +1,20 @@
const cancelable = require('promise-toolbox/cancelable')
const CancelToken = require('promise-toolbox/CancelToken')
// Similar to `Promise.all` + `map` but pass a cancel token to the callback
//
// If any of the executions fails, the cancel token will be triggered and the
// first reason will be rejected.
exports.cancelableMap = cancelable(async function cancelableMap($cancelToken, iterable, callback) {
const { cancel, token } = CancelToken.source([$cancelToken])
try {
return await Promise.all(
Array.from(iterable, function (item) {
return callback.call(this, token, item)
})
)
} catch (error) {
await cancel()
throw error
}
})

View File

@@ -0,0 +1,5 @@
const Vhd = require('vhd-lib').default
exports.checkVhd = async function checkVhd(handler, path) {
await new Vhd(handler, path).readHeaderAndFooter()
}

View File

@@ -0,0 +1,343 @@
const compareVersions = require('compare-versions')
const defer = require('golike-defer').default
const find = require('lodash/find')
const groupBy = require('lodash/groupBy')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
const { createVhdStreamWithLength } = require('vhd-lib')
const { cancelableMap } = require('./_cancelableMap')
const TAG_BASE_DELTA = 'xo:base_delta'
exports.TAG_BASE_DELTA = TAG_BASE_DELTA
const TAG_COPY_SRC = 'xo:copy_of'
exports.TAG_COPY_SRC = TAG_COPY_SRC
const ensureArray = value => (value === undefined ? [] : Array.isArray(value) ? value : [value])
exports.exportDeltaVm = async function exportDeltaVm(
vm,
baseVm,
{
cancelToken = CancelToken.none,
// Sets of UUIDs of VDIs that must be exported as full.
fullVdisRequired = new Set(),
disableBaseTags = false,
} = {}
) {
// refs of VM's VDIs → base's VDIs.
const baseVdis = {}
baseVm &&
baseVm.$VBDs.forEach(vbd => {
let vdi, snapshotOf
if ((vdi = vbd.$VDI) && (snapshotOf = vdi.$snapshot_of) && !fullVdisRequired.has(snapshotOf.uuid)) {
baseVdis[vdi.snapshot_of] = vdi
}
})
const streams = {}
const vdis = {}
const vbds = {}
await cancelableMap(cancelToken, vm.$VBDs, async (cancelToken, vbd) => {
let vdi
if (vbd.type !== 'Disk' || !(vdi = vbd.$VDI)) {
// Ignore this VBD.
return
}
// If the VDI name start with `[NOBAK]`, do not export it.
if (vdi.name_label.startsWith('[NOBAK]')) {
// FIXME: find a way to not create the VDI snapshot in the
// first time.
//
// The snapshot must not exist otherwise it could break the
// next export.
ignoreErrors.call(vdi.$destroy())
return
}
vbds[vbd.$ref] = vbd
const vdiRef = vdi.$ref
if (vdiRef in vdis) {
// This VDI has already been managed.
return
}
// Look for a snapshot of this vdi in the base VM.
const baseVdi = baseVdis[vdi.snapshot_of]
vdis[vdiRef] = {
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: baseVdi && !disableBaseTags ? baseVdi.uuid : undefined,
},
$snapshot_of$uuid: vdi.$snapshot_of?.uuid,
$SR$uuid: vdi.$SR.uuid,
}
streams[`${vdiRef}.vhd`] = await vdi.$exportContent({
baseRef: baseVdi?.$ref,
cancelToken,
format: 'vhd',
})
})
const suspendVdi = vm.$suspend_VDI
if (suspendVdi !== undefined) {
const vdiRef = suspendVdi.$ref
vdis[vdiRef] = {
...suspendVdi,
$SR$uuid: suspendVdi.$SR.uuid,
}
streams[`${vdiRef}.vhd`] = await suspendVdi.$exportContent({
cancelToken,
format: 'vhd',
})
}
const vifs = {}
vm.$VIFs.forEach(vif => {
const network = vif.$network
vifs[vif.$ref] = {
...vif,
$network$uuid: network.uuid,
$network$name_label: network.name_label,
$network$VLAN: network.$PIFs[0]?.VLAN,
}
})
return Object.defineProperty(
{
version: '1.1.0',
vbds,
vdis,
vifs,
vm: {
...vm,
other_config:
baseVm && !disableBaseTags
? {
...vm.other_config,
[TAG_BASE_DELTA]: baseVm.uuid,
}
: omit(vm.other_config, TAG_BASE_DELTA),
},
},
'streams',
{
configurable: true,
value: streams,
writable: true,
}
)
}
exports.importDeltaVm = defer(async function importDeltaVm(
$defer,
deltaVm,
sr,
{ cancelToken = CancelToken.none, detectBase = true, mapVdisSrs = {} } = {}
) {
const { version } = deltaVm
if (compareVersions(version, '1.0.0') < 0) {
throw new Error(`Unsupported delta backup version: ${version}`)
}
const vmRecord = deltaVm.vm
const xapi = sr.$xapi
let baseVm
if (detectBase) {
const remoteBaseVmUuid = vmRecord.other_config[TAG_BASE_DELTA]
if (remoteBaseVmUuid) {
baseVm = find(xapi.objects.all, obj => (obj = obj.other_config) && obj[TAG_COPY_SRC] === remoteBaseVmUuid)
if (!baseVm) {
throw new Error(`could not find the base VM (copy of ${remoteBaseVmUuid})`)
}
}
}
const baseVdis = {}
baseVm &&
baseVm.$VBDs.forEach(vbd => {
const vdi = vbd.$VDI
if (vdi !== undefined) {
baseVdis[vbd.VDI] = vbd.$VDI
}
})
const vdiRecords = deltaVm.vdis
// 0. Create suspend_VDI
let suspendVdi
if (vmRecord.power_state === 'Suspended') {
const vdi = vdiRecords[vmRecord.suspend_VDI]
suspendVdi = await xapi.getRecord(
'VDI',
await xapi.VDI_create({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
sr: mapVdisSrs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => suspendVdi.$destroy())
}
// 1. Create the VM.
const vmRef = await xapi.VM_create(
{
...vmRecord,
affinity: undefined,
blocked_operations: {
...vmRecord.blocked_operations,
start: 'Importing…',
},
ha_always_run: false,
is_a_template: false,
name_label: '[Importing…] ' + vmRecord.name_label,
other_config: {
...vmRecord.other_config,
[TAG_COPY_SRC]: vmRecord.uuid,
},
},
{
bios_strings: vmRecord.bios_strings,
suspend_VDI: suspendVdi?.$ref,
}
)
$defer.onFailure.call(xapi, 'VM_destroy', vmRef)
// 2. Delete all VBDs which may have been created by the import.
await asyncMap(await xapi.getField('VM', vmRef, 'VBDs'), ref => ignoreErrors.call(xapi.call('VBD.destroy', ref)))
// 3. Create VDIs & VBDs.
const vbdRecords = deltaVm.vbds
const vbds = groupBy(vbdRecords, 'VDI')
const newVdis = {}
await asyncMap(Object.keys(vdiRecords), async vdiRef => {
const vdi = vdiRecords[vdiRef]
let newVdi
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (remoteBaseVdiUuid) {
const baseVdi = find(baseVdis, vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
newVdi = await xapi.getRecord('VDI', await baseVdi.$clone())
$defer.onFailure(() => newVdi.$destroy())
await newVdi.update_other_config(TAG_COPY_SRC, vdi.uuid)
} else if (vdiRef === vmRecord.suspend_VDI) {
// suspendVDI has already created
newVdi = suspendVdi
} else {
newVdi = await xapi.getRecord(
'VDI',
await xapi.VDI_create({
...vdi,
other_config: {
...vdi.other_config,
[TAG_BASE_DELTA]: undefined,
[TAG_COPY_SRC]: vdi.uuid,
},
SR: mapVdisSrs[vdi.uuid] ?? sr.$ref,
})
)
$defer.onFailure(() => newVdi.$destroy())
}
const vdiVbds = vbds[vdiRef]
if (vdiVbds !== undefined) {
await asyncMap(Object.values(vdiVbds), vbd =>
xapi.VBD_create({
...vbd,
VDI: newVdi.$ref,
VM: vmRef,
})
)
}
newVdis[vdiRef] = newVdi
})
const networksByNameLabelByVlan = {}
let defaultNetwork
Object.values(xapi.objects.all).forEach(object => {
if (object.$type === 'network') {
const pif = object.$PIFs[0]
if (pif === undefined) {
// ignore network
return
}
const vlan = pif.VLAN
const networksByNameLabel = networksByNameLabelByVlan[vlan] || (networksByNameLabelByVlan[vlan] = {})
defaultNetwork = networksByNameLabel[object.name_label] = object
}
})
const { streams } = deltaVm
await Promise.all([
// Import VDI contents.
cancelableMap(cancelToken, Object.entries(newVdis), async (cancelToken, [id, vdi]) => {
for (let stream of ensureArray(streams[`${id}.vhd`])) {
if (typeof stream === 'function') {
stream = await stream()
}
if (stream.length === undefined) {
stream = await createVhdStreamWithLength(stream)
}
await vdi.$importContent(stream, { cancelToken, format: 'vhd' })
}
}),
// Wait for VDI export tasks (if any) termination.
Promise.all(Object.values(streams).map(stream => stream.task)),
// Create VIFs.
asyncMap(Object.values(deltaVm.vifs), vif => {
let network = vif.$network$uuid && xapi.getObjectByUuid(vif.$network$uuid, undefined)
if (network === undefined) {
const { $network$VLAN: vlan = -1 } = vif
const networksByNameLabel = networksByNameLabelByVlan[vlan]
if (networksByNameLabel !== undefined) {
network = networksByNameLabel[vif.$network$name_label]
if (network === undefined) {
network = networksByNameLabel[Object.keys(networksByNameLabel)[0]]
}
} else {
network = defaultNetwork
}
}
if (network) {
return xapi.VIF_create({
...vif,
network: network.$ref,
VM: vmRef,
})
}
}),
])
await Promise.all([
deltaVm.vm.ha_always_run && xapi.setField('VM', vmRef, 'ha_always_run', true),
xapi.setField('VM', vmRef, 'name_label', deltaVm.vm.name_label),
])
return vmRef
})

View File

@@ -1,4 +1,4 @@
function extractIdsFromSimplePattern(pattern) {
exports.extractIdsFromSimplePattern = function extractIdsFromSimplePattern(pattern) {
if (pattern === undefined) {
return []
}
@@ -27,4 +27,3 @@ function extractIdsFromSimplePattern(pattern) {
throw new Error('invalid pattern')
}
exports.extractIdsFromSimplePattern = extractIdsFromSimplePattern

View File

@@ -0,0 +1,28 @@
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
// create a new readable stream from an existing one which may be piped later
//
// in case of error in the new readable stream, it will simply be unpiped
// from the original one
exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
proxy.destroy(error)
}
})
eos(proxy, _ => {
stream.forks--
stream.unpipe(proxy)
if (stream.forks === 0) {
stream.destroy(new Error('no more consumers for this stream'))
}
})
return proxy
}

View File

@@ -0,0 +1,4 @@
// returns all entries but the last retention-th
exports.getOldEntries = function getOldEntries(retention, entries) {
return entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries
}

View File

@@ -0,0 +1,20 @@
const Disposable = require('promise-toolbox/Disposable')
const { join } = require('path')
const { mkdir, rmdir } = require('fs-extra')
const { tmpdir } = require('os')
const MAX_ATTEMPTS = 3
exports.getTmpDir = async function getTmpDir() {
for (let i = 0; true; ++i) {
const path = join(tmpdir(), Math.random().toString(36).slice(2))
try {
await mkdir(path)
return new Disposable(path, () => rmdir(path))
} catch (error) {
if (i === MAX_ATTEMPTS) {
throw error
}
}
}
}

View File

@@ -0,0 +1,6 @@
const BACKUP_DIR = 'xo-vm-backups'
exports.BACKUP_DIR = BACKUP_DIR
exports.getVmBackupDir = function getVmBackupDir(uuid) {
return `${BACKUP_DIR}/${uuid}`
}

View File

@@ -0,0 +1,52 @@
const fromCallback = require('promise-toolbox/fromCallback')
const { createLogger } = require('@xen-orchestra/log')
const { createParser } = require('parse-pairs')
const { execFile } = require('child_process')
const { debug } = createLogger('xo:proxy:api')
const IGNORED_PARTITION_TYPES = {
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
0x05: true,
0x0f: true,
0x15: true,
0x5e: true,
0x5f: true,
0x85: true,
0x91: true,
0x9b: true,
0xc5: true,
0xcf: true,
0xd5: true,
0x82: true, // swap
}
const LVM_PARTITION_TYPE = 0x8e
exports.LVM_PARTITION_TYPE = LVM_PARTITION_TYPE
const parsePartxLine = createParser({
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
valueTransform: (value, key) => (key === 'start' || key === 'size' || key === 'type' ? +value : value),
})
// returns an empty array in case of a non-partitioned disk
exports.listPartitions = async function listPartitions(devicePath) {
const parts = await fromCallback(execFile, 'partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',
devicePath,
]).catch(error => {
// partx returns 1 since v2.33 when failing to read partitions.
//
// Prior versions are correctly handled by the nominal case.
debug('listPartitions', { error })
return ''
})
return parts
.split(/\r?\n/)
.map(parsePartxLine)
.filter(({ type }) => type != null && !(type in IGNORED_PARTITION_TYPES))
}

View File

@@ -0,0 +1,30 @@
const getReplicatedVmDatetime = vm => {
const { 'xo:backup:datetime': datetime = vm.name_label.slice(-17, -1) } = vm.other_config
return datetime
}
const compareReplicatedVmDatetime = (a, b) => (getReplicatedVmDatetime(a) < getReplicatedVmDatetime(b) ? -1 : 1)
exports.listReplicatedVms = function listReplicatedVms(xapi, scheduleOrJobId, srUuid, vmUuid) {
const { all } = xapi.objects
const vms = {}
for (const key in all) {
const object = all[key]
const oc = object.other_config
if (
object.$type === 'VM' &&
!object.is_a_snapshot &&
!object.is_a_template &&
'start' in object.blocked_operations &&
(oc['xo:backup:job'] === scheduleOrJobId || oc['xo:backup:schedule'] === scheduleOrJobId) &&
oc['xo:backup:sr'] === srUuid &&
(oc['xo:backup:vm'] === vmUuid ||
// 2018-03-28, JFT: to catch VMs replicated before this fix
oc['xo:backup:vm'] === undefined)
) {
vms[object.$id] = object
}
}
return Object.values(vms).sort(compareReplicatedVmDatetime)
}

View File

@@ -0,0 +1,29 @@
const fromCallback = require('promise-toolbox/fromCallback')
const { createParser } = require('parse-pairs')
const { execFile } = require('child_process')
// ===================================================================
const parse = createParser({
keyTransform: key => key.slice(5).toLowerCase(),
})
const makeFunction = command => async (fields, ...args) => {
const info = await fromCallback(execFile, command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
'--unbuffered',
'--units',
'b',
'-o',
String(fields),
...args,
])
return info
.trim()
.split(/\r?\n/)
.map(Array.isArray(fields) ? parse : line => parse(line)[fields])
}
exports.lvs = makeFunction('lvs')
exports.pvs = makeFunction('pvs')

View File

@@ -0,0 +1,5 @@
const PARSE_UUID_RE = /-/g
exports.packUuid = function packUuid(uuid) {
return Buffer.from(uuid.replace(PARSE_UUID_RE, ''), 'hex')
}

View File

@@ -0,0 +1,46 @@
function fulfilledThen(cb) {
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
}
function rejectedThen(_, cb) {
return typeof cb === 'function' ? SyncThenable.fromFunction(cb, this.value) : this
}
class SyncThenable {
static resolve(value) {
if (value != null && typeof value.then === 'function') {
return value
}
return new this(false, value)
}
static fromFunction(fn, ...arg) {
try {
return this.resolve(fn(...arg))
} catch (error) {
return this.reject(error)
}
}
static reject(reason) {
return new this(true, reason)
}
// unwrap if it's a SyncThenable
static tryUnwrap(value) {
if (value instanceof this) {
if (value.then === rejectedThen) {
throw value.value
}
return value.value
}
return value
}
constructor(rejected, value) {
this.then = rejected ? rejectedThen : fulfilledThen
this.value = value
}
}
exports.SyncThenable = SyncThenable

View File

@@ -1,11 +1,8 @@
exports.watchStreamSize = stream => {
exports.watchStreamSize = function watchStreamSize(stream) {
const container = { size: 0 }
const isPaused = stream.isPaused()
stream.on('data', data => {
container.size += data.length
})
if (isPaused) {
stream.pause()
}
stream.pause()
return container
}

View File

@@ -0,0 +1,34 @@
const mapValues = require('lodash/mapValues')
const { dirname } = require('path')
function formatVmBackup(backup) {
return {
disks:
backup.vhds === undefined
? []
: Object.keys(backup.vhds).map(vdiId => {
const vdi = backup.vdis[vdiId]
return {
id: `${dirname(backup._filename)}/${backup.vhds[vdiId]}`,
name: vdi.name_label,
uuid: vdi.uuid,
}
}),
id: backup.id,
jobId: backup.jobId,
mode: backup.mode,
scheduleId: backup.scheduleId,
size: backup.size,
timestamp: backup.timestamp,
vm: {
name_description: backup.vm.name_description,
name_label: backup.vm.name_label,
},
}
}
// format all backups as returned by RemoteAdapter#listAllVmBackups()
exports.formatVmBackups = function formatVmBackups(backupsByVM) {
return mapValues(backupsByVM, backups => backups.map(formatVmBackup))
}

View File

@@ -1,3 +0,0 @@
// returns all entries but the last retention-th
exports.getOldEntries = (retention, entries) =>
entries === undefined ? [] : retention > 0 ? entries.slice(0, -retention) : entries

View File

@@ -8,16 +8,35 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.1",
"version": "0.7.0",
"engines": {
"node": ">=8.10"
"node": ">=14.5"
},
"scripts": {
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/disposable": "^0.1.0",
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^3.6.0",
"d3-time-format": "^3.0.0",
"fs-extra": "^9.0.0"
"end-of-stream": "^1.4.4",
"ensure-array": "^1.0.0",
"fs-extra": "^9.0.0",
"golike-defer": "^0.5.1",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.20",
"node-zone": "^0.4.0",
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.17.0",
"vhd-lib": "^1.0.0",
"yazl": "^2.5.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^0.4.4"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -0,0 +1,23 @@
const { DIR_XO_CONFIG_BACKUPS, DIR_XO_POOL_METADATA_BACKUPS } = require('./RemoteAdapter')
exports.parseMetadataBackupId = function parseMetadataBackupId(backupId) {
const [dir, ...rest] = backupId.split('/')
if (dir === DIR_XO_CONFIG_BACKUPS) {
const [scheduleId, timestamp] = rest
return {
type: 'xoConfig',
scheduleId,
timestamp,
}
} else if (dir === DIR_XO_POOL_METADATA_BACKUPS) {
const [scheduleId, poolUuid, timestamp] = rest
return {
type: 'pool',
poolUuid,
scheduleId,
timestamp,
}
}
throw new Error(`not supported backup dir (${dir})`)
}

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env node
const defer = require('golike-defer').default
const { NULL_REF, Xapi } = require('xen-api')
const { Ref, Xapi } = require('xen-api')
const pkg = require('./package.json')
@@ -11,7 +11,7 @@ Xapi.prototype.getVmDisks = async function (vm) {
...vm.VBDs.map(async vbdRef => {
const vbd = await this.getRecord('VBD', vbdRef)
let vdiRef
if (vbd.type === 'Disk' && (vdiRef = vbd.VDI) !== NULL_REF) {
if (vbd.type === 'Disk' && Ref.isNotEmpty((vdiRef = vbd.VDI))) {
disks[vbd.userdevice] = await this.getRecord('VDI', vdiRef)
}
}),

View File

@@ -17,8 +17,8 @@
},
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.4.1",
"xen-api": "^0.29.0"
"golike-defer": "^0.5.1",
"xen-api": "^0.30.0"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -28,7 +28,6 @@
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],

View File

@@ -18,7 +18,6 @@
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],

View File

@@ -18,7 +18,6 @@
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.12.1",
"version": "0.13.0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -14,7 +14,6 @@
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
@@ -25,7 +24,7 @@
"@marsaud/smb2": "^0.17.2",
"@sindresorhus/df": "^3.1.1",
"@sullux/aws-sdk": "^1.0.5",
"@xen-orchestra/async-map": "^0.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"aws-sdk": "^2.686.0",
"decorator-synchronized": "^0.5.0",
"execa": "^5.0.0",
@@ -33,7 +32,7 @@
"get-stream": "^6.0.0",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.16.0",
"promise-toolbox": "^0.17.0",
"readable-stream": "^3.0.6",
"through2": "^4.0.2",
"tmp": "^0.2.1",

View File

@@ -3,8 +3,7 @@
// $FlowFixMe
import getStream from 'get-stream'
import asyncMap from '@xen-orchestra/async-map'
import CancelToken from 'promise-toolbox/CancelToken'
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import limit from 'limit-concurrency-decorator'
import path, { basename } from 'path'
import synchronized from 'decorator-synchronized'
@@ -120,6 +119,42 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
// TODO: remove method
async createOutputStream(file: File, { checksum = false, dirMode, ...options }: Object = {}): Promise<LaxWritable> {
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout.call(
this._createOutputStream(file, {
dirMode,
flags: 'wx',
...options,
}),
this._timeout
)
if (!checksum) {
return streamP
}
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
// $FlowFixMe
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
.catch(forwardError)
return checksumStream
}
createReadStream(
file: File,
{ checksum = false, ignoreMissingChecksum = false, ...options }: Object = {}
@@ -176,13 +211,12 @@ export default class RemoteHandlerAbstract {
async outputStream(
path: string,
input: Readable | Promise<Readable>,
{ checksum = true, dirMode, cancelToken = CancelToken.none }: { checksum?: boolean, dirMode?: number } = {}
{ checksum = true, dirMode }: { checksum?: boolean, dirMode?: number } = {}
): Promise<void> {
path = normalizePath(path)
return this._outputStream(await input, normalizePath(path), {
return this._outputStream(normalizePath(path), await input, {
checksum,
dirMode,
cancelToken,
})
}
@@ -443,51 +477,13 @@ export default class RemoteHandlerAbstract {
return this._outputFile(file, data, { flags })
}
async _createOutputStreamChecksum(file: File, { checksum = false, ...options }: Object = {}): Promise<LaxWritable> {
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout.call(
this._createOutputStream(file, {
flags: 'wx',
...options,
}),
this._timeout
)
if (!checksum) {
return streamP
}
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
// $FlowFixMe
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
.catch(forwardError)
return checksumStream
}
async _outputStream(
input: Readable,
path: string,
{ checksum, dirMode, cancelToken = CancelToken.none }: { checksum?: boolean, dirMode?: number }
) {
async _outputStream(path: string, input: Readable, { checksum, dirMode }: { checksum?: boolean, dirMode?: number }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await this._createOutputStreamChecksum(tmpPath, { checksum })
const output = await this.createOutputStream(tmpPath, {
checksum,
dirMode,
})
try {
cancelToken.promise.then(reason => {
input.destroy(reason)
})
input.pipe(output)
await fromEvent(output, 'finish')
await output.checksumWritten
@@ -526,7 +522,7 @@ export default class RemoteHandlerAbstract {
}
const files = await this._list(dir)
await asyncMap(files, file =>
await asyncMapSettled(files, file =>
this._unlink(`${dir}/${file}`).catch(error => {
if (error.code === 'EISDIR') {
return this._rmtree(`${dir}/${file}`)

View File

@@ -30,6 +30,18 @@ describe('closeFile()', () => {
})
})
describe('createOutputStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createOutputStream: () => new Promise(() => {}),
})
const promise = testHandler.createOutputStream('File')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({

View File

@@ -3,9 +3,11 @@
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { tmpdir } from 'os'
import { getHandler } from './'
import { getHandler } from '.'
// https://gist.github.com/julien-f/3228c3f34fdac01ade09
const unsecureRandomBytes = n => {
@@ -80,9 +82,10 @@ handlers.forEach(url => {
})
})
describe('#outputStream()', () => {
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
await handler.outputStream('dir/file', createTestDataStream())
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})

View File

@@ -1,5 +1,6 @@
// @flow
import execa from 'execa'
import { parse } from 'xo-remote-parser'
import type RemoteHandler from './abstract'
import RemoteHandlerLocal from './local'
@@ -25,10 +26,7 @@ try {
}
export const getHandler = (remote: Remote, ...rest: any): RemoteHandler => {
// FIXME: should be done in xo-remote-parser.
const type = remote.url.split('://')[0]
const Handler = HANDLERS[type]
const Handler = HANDLERS[parse(remote.url).type]
if (!Handler) {
throw new Error('Unhandled remote type')
}

View File

@@ -5,7 +5,6 @@ import { parse } from 'xo-remote-parser'
import RemoteHandlerAbstract from './abstract'
import { createChecksumStream } from './checksum'
import CancelToken from 'promise-toolbox/CancelToken'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
@@ -15,13 +14,10 @@ const MAX_PART_SIZE = 1024 * 1024 * 1024 * 5 // 5GB
const MAX_PARTS_COUNT = 10000
const MAX_OBJECT_SIZE = 1024 * 1024 * 1024 * 1024 * 5 // 5TB
const IDEAL_FRAGMENT_SIZE = Math.ceil(MAX_OBJECT_SIZE / MAX_PARTS_COUNT) // the smallest fragment size that still allows a 5TB upload in 10000 fragments, about 524MB
const USE_SSL = true
export default class S3Handler extends RemoteHandlerAbstract {
constructor(remote, _opts) {
super(remote)
const { host, path, username, password } = parse(remote.url)
// https://www.zenko.io/blog/first-things-first-getting-started-scality-s3-server/
const { host, path, username, password, protocol, region } = parse(remote.url)
const params = {
accessKeyId: username,
apiVersion: '2006-03-01',
@@ -33,10 +29,14 @@ export default class S3Handler extends RemoteHandlerAbstract {
timeout: 600000,
},
}
if (!USE_SSL) {
if (protocol === 'http') {
params.httpOptions.agent = new http.Agent()
params.sslEnabled = false
}
if (region !== undefined) {
params.region = region
}
this._s3 = aws(params).s3
const splitPath = path.split('/').filter(s => s.length)
@@ -52,10 +52,7 @@ export default class S3Handler extends RemoteHandlerAbstract {
return { Bucket: this._bucket, Key: this._dir + file }
}
async _outputStream(input, path, { checksum, cancelToken = CancelToken.none }) {
cancelToken.promise.then(reason => {
input.destroy(reason)
})
async _outputStream(path, input, { checksum }) {
let inputStream = input
if (checksum) {
const checksumStream = createChecksumStream()
@@ -66,20 +63,10 @@ export default class S3Handler extends RemoteHandlerAbstract {
input.on('error', forwardError)
inputStream = checksumStream
}
await this._s3.upload(
{
...this._createParams(path),
Body: inputStream,
},
{ partSize: IDEAL_FRAGMENT_SIZE, queueSize: 1 }
)
await this._s3.putObject({ ...this._createParams(path), Body: inputStream })
if (checksum) {
const checksum = await inputStream.checksum
const params = {
...this._createParams(path + '.checksum'),
Body: checksum,
}
await this._s3.upload(params)
await this._s3.putObject({ ...this._createParams(path + '.checksum'), Body: checksum })
}
await input.task
}
@@ -278,26 +265,4 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {}
// https://stackoverflow.com/a/48955582/72637
async _rmtree(dir) {
const listParams = {
Bucket: this._bucket,
Prefix: this._dir + dir,
}
let listedObjects = {}
do {
listedObjects = await this._s3.listObjectsV2({
...listParams,
ContinuationToken: listedObjects.NextContinuationToken,
})
if (listedObjects.Contents.length === 0) {
return
}
await this._s3.deleteObjects({
Bucket: this._bucket,
Delete: { Objects: listedObjects.Contents.map(({ Key }) => ({ Key })) },
})
} while (listedObjects.IsTruncated)
}
}

View File

@@ -18,7 +18,6 @@
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"configure.js",
"dist/",
@@ -32,7 +31,7 @@
},
"dependencies": {
"lodash": "^4.17.4",
"promise-toolbox": "^0.16.0"
"promise-toolbox": "^0.17.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -18,7 +18,6 @@
},
"preferGlobal": false,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],

View File

@@ -0,0 +1,24 @@
/benchmark/
/benchmarks/
*.bench.js
*.bench.js.map
/examples/
example.js
example.js.map
*.example.js
*.example.js.map
/fixture/
/fixtures/
*.fixture.js
*.fixture.js.map
*.fixtures.js
*.fixtures.js.map
/test/
/tests/
*.spec.js
*.spec.js.map
__snapshots__/

View File

@@ -0,0 +1,46 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @xen-orchestra/proxy-cli
[![Package Version](https://badgen.net/npm/v/@xen-orchestra/proxy-cli)](https://npmjs.org/package/@xen-orchestra/proxy-cli) ![License](https://badgen.net/npm/license/@xen-orchestra/proxy-cli) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@xen-orchestra/proxy-cli)](https://bundlephobia.com/result?p=@xen-orchestra/proxy-cli) [![Node compatibility](https://badgen.net/npm/node/@xen-orchestra/proxy-cli)](https://npmjs.org/package/@xen-orchestra/proxy-cli)
## Install
Installation of the [npm package](https://npmjs.org/package/@xen-orchestra/proxy-cli):
```
> npm install --global @xen-orchestra/proxy-cli
```
## Usage
```
$ xo-proxy-cli --help
Usage:
xo-proxy-cli <method> [<param>=<value>]...
Call a method of the API and display its result.
xo-proxy-cli [--file | -f] <file>
Read a CSON or JSON file containing an object with `method` and `params`
properties and call the API method.
The file can also contain an array containing multiple calls, which will be
run in sequence.
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,15 @@
```
$ xo-proxy-cli --help
Usage:
xo-proxy-cli <method> [<param>=<value>]...
Call a method of the API and display its result.
xo-proxy-cli [--file | -f] <file>
Read a CSON or JSON file containing an object with `method` and `params`
properties and call the API method.
The file can also contain an array containing multiple calls, which will be
run in sequence.
```

View File

@@ -0,0 +1,69 @@
{
"private": false,
"name": "@xen-orchestra/proxy-cli",
"version": "0.2.0",
"license": "AGPL-3.0-or-later",
"description": "",
"keywords": [
"backup",
"proxy",
"xen-orchestra",
"xo"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/proxy-cli",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/proxy-cli",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {
"xo-proxy-cli": "dist/index.js"
},
"files": [
"dist/"
],
"engines": {
"node": ">=8.10"
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@vates/read-chunk": "^0.1.1",
"ansi-colors": "^4.1.1",
"app-conf": "^0.9.0",
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",
"http-request-plus": "^0.9.1",
"json-rpc-protocol": "^0.13.1",
"promise-toolbox": "^0.15.1",
"pump": "^3.0.0",
"pumpify": "^2.0.1",
"split2": "^3.1.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.7.4",
"@babel/plugin-proposal-optional-chaining": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"postversion": "npm publish --access public",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
}
}

View File

@@ -0,0 +1,191 @@
#!/usr/bin/env node
import assert from 'assert'
import colors from 'ansi-colors'
import contentType from 'content-type'
import CSON from 'cson-parser'
import fromCallback from 'promise-toolbox/fromCallback'
import fs from 'fs'
import getopts from 'getopts'
import hrp from 'http-request-plus'
import pump from 'pump'
import split2 from 'split2'
import pumpify from 'pumpify'
import { extname, join } from 'path'
import { format, parse } from 'json-rpc-protocol'
import { inspect } from 'util'
import { load as loadConfig } from 'app-conf'
import { readChunk } from '@vates/read-chunk'
import pkg from '../package.json'
const FORMATS = {
__proto__: null,
cson: CSON.parse,
json: JSON.parse,
}
const parseValue = value => (value.startsWith('json:') ? JSON.parse(value.slice(5)) : value)
async function main(argv) {
const config = await loadConfig('xo-proxy', {
appDir: join(__dirname, '..'),
ignoreUnknownFormats: true,
})
const { hostname = 'localhost', port } = config?.http?.listen?.https ?? {}
const { _: args, file, help, host, raw, token } = getopts(argv, {
alias: { file: 'f', help: 'h' },
boolean: ['help', 'raw'],
default: {
token: config.authenticationToken,
},
stopEarly: true,
string: ['file', 'host', 'token'],
})
if (help || (file === '' && args.length === 0)) {
return console.log(
'%s',
`Usage:
xo-proxy-cli <method> [<param>=<value>]...
Call a method of the API and display its result.
xo-proxy-cli [--file | -f] <file>
Read a CSON or JSON file containing an object with \`method\` and \`params\`
properties and call the API method.
The file can also contain an array containing multiple calls, which will be
run in sequence.
${pkg.name} v${pkg.version}`
)
}
// sequence path of the current call
const callPath = []
const baseRequest = {
headers: {
'content-type': 'application/json',
cookie: `authenticationToken=${token}`,
},
pathname: '/api/v1',
protocol: 'https:',
rejectUnauthorized: false,
}
if (host !== '') {
baseRequest.host = host
} else {
baseRequest.hostname = hostname
baseRequest.port = port
}
const call = async ({ method, params }) => {
if (callPath.length !== 0) {
process.stderr.write(`\n${colors.bold(`--- call #${callPath.join('.')}`)} ---\n\n`)
}
const response = await hrp.post(baseRequest, {
body: format.request(0, method, params),
})
const { stdout } = process
const responseType = contentType.parse(response).type
if (responseType === 'application/octet-stream') {
if (stdout.isTTY) {
throw new Error('binary data, pipe to a file!')
}
await fromCallback(pump, response, stdout)
return
}
assert.strictEqual(responseType, 'application/json')
const lines = pumpify.obj(response, split2())
const firstLine = await readChunk(lines)
try {
const result = await parse.result(firstLine)
if (
result !== null &&
typeof result === 'object' &&
Object.keys(result).length === 1 &&
result.$responseType === 'ndjson'
) {
let line
while ((line = await readChunk(lines)) !== null) {
stdout.write(inspect(JSON.parse(line), { colors: true, depth: null }))
stdout.write('\n')
}
} else if (raw && typeof result === 'string') {
stdout.write(result)
} else {
stdout.write(inspect(result, { colors: true, depth: null }))
stdout.write('\n')
}
} catch (error) {
if (!(error?.code === 10 && 'errors' in error.data)) {
throw error
}
// we should be able to do better but the messages returned by ajv are not
// precise enough
//
// see https://github.com/epoberezkin/ajv/issues/1099
throw error.data.errors
}
}
const seq = async seq => {
const j = callPath.length
for (let i = 0, n = seq.length; i < n; ++i) {
callPath[j] = i + 1
await visit(seq[i])
}
callPath.pop()
}
const visit = node => {
if (Array.isArray(node)) {
return seq(node)
}
return call(node)
}
if (file !== '') {
const data = fs.readFileSync(file, 'utf8')
const ext = extname(file).slice(1).toLowerCase()
const parse = FORMATS[ext]
if (parse === undefined) {
throw new Error(`unsupported file: ${file}`)
}
await visit(parse(data))
} else {
const method = args[0]
const params = {}
for (let i = 1, n = args.length; i < n; ++i) {
const param = args[i]
const j = param.indexOf('=')
if (j === -1) {
throw new Error(`invalid param format: ${param}`)
}
params[param.slice(0, j)] = parseValue(param.slice(j + 1))
}
await call({ method, params })
}
}
main(process.argv.slice(2)).then(
() => {
process.exit(0)
},
error => {
console.error('exception in main:', error)
process.exit(1)
}
)

View File

@@ -0,0 +1 @@
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -0,0 +1,24 @@
/benchmark/
/benchmarks/
*.bench.js
*.bench.js.map
/examples/
example.js
example.js.map
*.example.js
*.example.js.map
/fixture/
/fixtures/
*.fixture.js
*.fixture.js.map
*.fixtures.js
*.fixtures.js.map
/test/
/tests/
*.spec.js
*.spec.js.map
__snapshots__/

View File

@@ -0,0 +1,18 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @xen-orchestra/proxy
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[AGPL-3.0-or-later](https://spdx.org/licenses/AGPL-3.0-or-later) © [Vates SAS](https://vates.fr)

View File

View File

@@ -0,0 +1,80 @@
# Vendor config: DO NOT TOUCH!
#
# See sample.config.toml to override.
# This secret string is used to authenticate clients to the API.
#
# It must be defined to a non-empty string for the proxy to run.
authenticationToken = ''
datadir = '/var/lib/xo-proxy/data'
resourceCacheDelay = '5m'
[api]
keepAliveInterval = 10e3
[backups]
# Mode to use for newly created backup directories
#
# https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation
dirMode = 0o700
snapshotNameLabelTpl = '[XO Backup {job.name}] {vm.name_label}'
[backups.defaultSettings]
reportWhen = 'failure'
[backups.metadata.defaultSettings]
retentionPoolMetadata = 0
retentionXoMetadata = 0
[backups.vm.defaultSettings]
bypassVdiChainsCheck = false
checkpointSnapshot = false
concurrency = 2
deleteFirst = false
exportRetention = 0
fullInterval = 0
offlineBackup = false
offlineSnapshot = false
snapshotRetention = 0
timeout = 0
vmTimeout = 0
# This is a work-around.
#
# See https://github.com/vatesfr/xen-orchestra/pull/4674
maxMergedDeltasPerRun = 2
# Each `http.listen.<name>` entry defines a specific listening configuration for
# the HTTP server.
#
# `<name>` can be freely choosen.
[http.listen.https]
# Generate self-signed certificate if missing
autoCert = true
cert = '/var/lib/xo-proxy/certificate.pem'
key = '/var/lib/xo-proxy/key.pem'
port = 443
[remoteOptions]
mountsDir = '/run/xo-proxy/mounts'
# timeout in milliseconds (set to 0 to disable)
timeout = 600e3
# see https:#github.com/vatesfr/xen-orchestra/issues/3419
# useSudo = false
[remotes]
disableFileRemotes = true
[xapiOptions]
# VDIs with `[NOBAK]` flag can be ignored while snapshotting an halted VM.
#
# This is disabled by default for the time being but will be turned on after enough testing.
ignoreNobakVdis = false
maxUncoalescedVdis = 1
watchEvents = ['network', 'PIF', 'pool', 'SR', 'task', 'VBD', 'VDI', 'VIF', 'VM']

View File

@@ -0,0 +1,191 @@
## Transport
The API is based on line-delimited [JSON-RPC
2.0](https://www.jsonrpc.org/specification) over HTTP.
### Authentication
A valid authentication token should be attached as a cookie to all HTTP
requests:
```http
POST /api/v1 HTTP/1.1
Cookie: authenticationToken=TN2YBOMYtXB_hHtf4wTzm9p5tTuqq2i15yeuhcz2xXM
```
The server will respond to an invalid token with a `401 Unauthorized` status.
The server can request the client to update its token with a `Set-Cookie` header:
```http
HTTP/1.1 200 OK
Set-Cookie: authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs
```
### Remote Procedure Call
#### Request
A call is a JSON-RPC request over a POST HTTP request:
```http
POST /api/v1 HTTP/1.1
Host: proxy1.xo.company.tld
Content-Type: application/json
Content-Length: 69
```
#### Response
##### JSON-RPC response
Plain JSON-RPC response
```http
HTTP/1.1 200 OK
Content-Type: application/json
```
NDJSON Response
```http
HTTP/1.1 200 OK
Content-Type: application/json
```
##### Binary response
```http
HTTP/1.1 200 OK
Content-Type: application/octet-stream
```
## Methods
```ts
interface Remote {
url: string
options?: string
}
declare namespace system {
function listMethods(): string[]
function methodSignature(_: { name: string }): { params: { [string]: object } }
}
declare namespace event {
interface Event {
class: 'Task'
operation: 'add' | 'mod' | 'del'
snapshot: Task
}
function from(_: {
token: string = ''
timeout?: number
}): {
events: Event[]
token: string
}
}
declare namespace backup {
type SimpleIdPattern = { id: string | { __or: string[] } }
interface BackupJob {
id: string
type: 'backup'
compression?: 'native' | 'zstd' | ''
mode: Mode
name: string
remotes?: SimpleIdPattern
settings: $Dict<Settings>
srs?: SimpleIdPattern
type: 'backup'
vms: Pattern
}
interface MetadataBackupJob {
id: string
name: string
pools?: SimpleIdPattern
remotes: SimpleIdPattern
settings: Settings
type: 'metadataBackup'
xoMetadata?: string
}
interface Schedule {
id: string
}
interface Xapi {
allowUnauthorized: boolean
credentials: object
url: string
}
function importVmBackup(_: {
backupId: string
remote: Remote
srUuid: string
xapi: Xapi
streamLogs: boolean = false
}): string
function listPoolMetadataBackups(_: {
remotes: { [id: string]: Remote }
}): { [remoteId: string]: { [poolUuid: string]: object[] } }
function listVmBackups(_: {
remotes: { [remoteId: string]: Remote }
}): { [remoteId: string]: { [vmUuid: string]: object[] } }
function listXoMetadataBackups(_: { remotes: { [id: string]: Remote } }): { [remoteId: string]: object[] }
function run(_: {
job: BackupJob | MetadataBackupJob
remotes: { [id: string]: Remote }
schedule: Schedule
xapis?: { [id: string]: Xapi }
recordToXapi?: { [recordUuid: string]: string }
streamLogs: boolean = false
}): string
function restoreMetadataBackup(_: { backupId: string; remote: Remote; xapi: Xapi }): ReadableStream
}
declare namespace task {
type Status = 'canceled' | 'failure' | 'interrupted' | 'pending' | 'skipped' | 'success'
interface Task {
data: any
end?: number
id: string
start: number
status: Status
tasks?: Task[]
}
function cancel(_: { taskId: string })
function destroy(_: { taskId: string })
function get(_: { taskId: string }): string
function getAll(): Task[]
}
declare namespace remote {
function test(Remote): object
}
```
function destroy(_: { taskId: string })
function get(_: { taskId: string }): string
function getAll(): Task[]
}
declare namespace remote {
function test(Remote): object
}
```

View File

@@ -0,0 +1,102 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.11.5",
"license": "AGPL-3.0-or-later",
"description": "",
"keywords": [
"backup",
"proxy",
"xen-orchestra",
"xo"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/proxy",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@xen-orchestra/proxy",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {
"xo-proxy": "dist/index.js"
},
"files": [
"config.toml",
"dist/",
"scripts/"
],
"engines": {
"node": ">=12"
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@vates/compose": "^2.0.0",
"@vates/decorate-with": "^0.0.1",
"@vates/disposable": "^0.1.0",
"@vates/parse-duration": "^0.1.0",
"@xen-orchestra/backups": "^0.7.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.13.0",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/self-signed": "^0.1.0",
"@xen-orchestra/xapi": "^0.4.4",
"ajv": "^6.10.0",
"app-conf": "^0.9.0",
"async-iterator-to-stream": "^1.1.0",
"compare-versions": "^3.4.0",
"fs-extra": "^8.1.0",
"get-stream": "^5.1.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"http-server-plus": "^0.11.0",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.5.0",
"koa": "^2.5.1",
"koa-compress": "^3.0.0",
"koa-helmet": "^5.1.0",
"koa-router": "^7.4.0",
"lodash": "^4.17.10",
"ms": "^2.1.2",
"node-zone": "^0.4.0",
"parse-pairs": "^1.0.0",
"promise-toolbox": "^0.17.0",
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.30.0",
"xo-common": "^0.6.0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/plugin-proposal-class-properties": "^7.1.0",
"@babel/plugin-proposal-decorators": "^7.0.0",
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.7.4",
"@babel/plugin-proposal-optional-chaining": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@vates/toggle-scripts": "^1.0.0",
"babel-plugin-transform-dev": "^2.0.1",
"cross-env": "^7.0.2",
"index-modules": "^0.3.0",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"_postinstall": "./scripts/systemd-service-installer",
"postpack": "toggle-scripts -postinstall -preuninstall",
"prebuild": "yarn run clean && index-modules --cjs-lazy src/app/mixins",
"predev": "yarn run prebuild",
"prepack": "toggle-scripts +postinstall +preuninstall",
"prepublishOnly": "yarn run build",
"_preuninstall": "./scripts/systemd-service-installer",
"start": "./dist/index.js"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
}
}

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env sh
set -eu
if [ "$(id -u)" -ne 0 ]
then
exit
fi
# $npm_package_name is not good enough here because it's a scoped package
NAME=xo-proxy
SERVICE_FILE=$(pwd)/$NAME.service
if [ "$npm_lifecycle_event" = postinstall ]
then
printf %s "[Unit]
Description=$NAME
After=network-online.target
[Service]
ExecStart=$npm_config_prefix/bin/$NAME
Restart=always
SyslogIdentifier=$NAME
[Install]
WantedBy=multi-user.target
" > "$SERVICE_FILE"
systemctl enable --now "$SERVICE_FILE"
elif [ "$npm_lifecycle_event" = "preuninstall" ]
then
systemctl disable --now "$SERVICE_FILE"
rm -f "$SERVICE_FILE"
fi

View File

@@ -0,0 +1 @@
export class Profile {}

View File

@@ -0,0 +1,52 @@
import camelCase from 'lodash/camelCase'
import { createDebounceResource } from '@vates/disposable/debounceResource'
import mixins from './mixins'
const { defineProperties, defineProperty, keys } = Object
const noop = Function.prototype
const MIXIN_CYCLIC_DESCRIPTOR = {
configurable: true,
get() {
throw new Error('cyclic dependency')
},
}
export default class App {
constructor(opts) {
// add lazy property for each of the mixin, this allows mixins to depend on
// one another without any special ordering
const descriptors = {}
keys(mixins).forEach(name => {
const Mixin = mixins[name]
name = camelCase(name)
descriptors[name] = {
configurable: true,
get: () => {
defineProperty(this, name, MIXIN_CYCLIC_DESCRIPTOR)
const instance = new Mixin(this, opts)
defineProperty(this, name, {
value: instance,
})
return instance
},
}
})
defineProperties(this, descriptors)
// access all mixin properties to trigger their creation
keys(descriptors).forEach(name => {
noop(this[name])
})
const debounceResource = createDebounceResource()
this.config.watchDuration('resourceCacheDelay', delay => {
debounceResource.defaultDelay = delay
})
this.hooks.once('stop', debounceResource.flushAll)
this.debounceResource = debounceResource
}
}

View File

@@ -0,0 +1,266 @@
import { format, parse, MethodNotFound } from 'json-rpc-protocol'
import * as errors from 'xo-common/api-errors'
import Ajv from 'ajv'
import asyncIteratorToStream from 'async-iterator-to-stream'
import compress from 'koa-compress'
import forOwn from 'lodash/forOwn'
import getStream from 'get-stream'
import helmet from 'koa-helmet'
import Koa from 'koa'
import once from 'lodash/once'
import Router from 'koa-router'
import Zone from 'node-zone'
import { createLogger } from '@xen-orchestra/log'
import { version as serverVersion } from '../../../package.json'
const { debug, warn } = createLogger('xo:proxy:api')
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
for await (const data of iterable) {
yield JSON.stringify(data) + '\n'
}
})
export default class Api {
constructor(app, { httpServer }) {
this._ajv = new Ajv({ allErrors: true })
this._methods = { __proto__: null }
const router = new Router({ prefix: '/api/v1' }).post('/', async ctx => {
// Before Node 13.0 there was an inactivity timeout of 2 mins, which may
// not be enough for the API.
ctx.req.setTimeout(0)
const profile = await app.authentication.findProfile({
authenticationToken: ctx.cookies.get('authenticationToken'),
})
if (profile === undefined) {
ctx.status = 401
return
}
let body = await getStream(ctx.req)
try {
body = parse(body)
} catch (error) {
ctx.body = format.error(null, error)
return
}
const zone = Zone.current.fork('POST /api')
zone.data.profile = profile
let result
try {
result = await zone.run(() => this._call(body.method, body.params))
} catch (error) {
const { method, params } = body
warn('call error', { method, params, error })
ctx.set('Content-Type', 'application/json')
ctx.body = format.error(body.id, error)
return
}
if (typeof result?.pipe === 'function' && !result._readableState?.objectMode) {
ctx.body = result
return
}
ctx.set('Content-Type', 'application/json')
const isAsyncIterable =
result !== null &&
typeof result === 'object' &&
(typeof result[Symbol.iterator] === 'function' || typeof result[Symbol.asyncIterator] === 'function')
if (isAsyncIterable) {
const stream = ndJsonStream(body.id, result)
ctx.body = stream
const keepAliveInterval = app.config.get('api.keepAliveInterval')
if (keepAliveInterval !== 0) {
// In the wild, long term HTTP requests with period of inactivity often
// breaks, send some data every 10s to keep it opened.
const stopTimer = clearInterval.bind(
undefined,
setInterval(() => stream.push(' '), keepAliveInterval)
)
stream.on('end', stopTimer).on('error', stopTimer)
}
} else {
ctx.body = format.response(body.id, result !== undefined ? result : true)
}
})
const koa = new Koa()
.on('error', warn)
.use(helmet())
.use(compress())
.use(router.routes())
.use(router.allowedMethods())
httpServer.on('request', koa.callback())
this.addMethods({
system: {
getMethodsInfo: [
function* () {
const methods = this._methods
for (const name in methods) {
const { description, params = {} } = methods[name]
yield { description, name, params }
}
}.bind(this),
{
description: 'returns the signatures of all available API methods',
},
],
getServerVersion: [
() => serverVersion,
{
description: 'returns the version of xo-server',
},
],
listMethods: [
function* () {
const methods = this._methods
for (const name in methods) {
yield name
}
}.bind(this),
{
description: 'returns the name of all available API methods',
},
],
methodSignature: [
({ method: name }) => {
const method = this._methods[name]
if (method === undefined) {
throw errors.noSuchObject('method', name)
}
const { description, params = {} } = method
return { description, name, params }
},
{
description: 'returns the signature of an API method',
params: {
method: { type: 'string' },
},
},
],
},
test: {
range: [
function* ({ start = 0, stop, step }) {
if (step === undefined) {
step = start > stop ? -1 : 1
}
if (step > 0) {
for (; start < stop; start += step) {
yield start
}
} else {
for (; start > stop; start += step) {
yield start
}
}
},
{
params: {
start: { optional: true, type: 'number' },
step: { optional: true, type: 'number' },
stop: { type: 'number' },
},
},
],
},
})
}
addMethod(name, method, { description, params = {} } = {}) {
const methods = this._methods
if (name in methods) {
throw new Error(`API method ${name} already exists`)
}
const ajv = this._ajv
const validate = ajv.compile({
// we want additional properties to be disabled by default
additionalProperties: params['*'] || false,
properties: params,
// we want params to be required by default unless explicitly marked so
// we use property `optional` instead of object `required`
required: Object.keys(params).filter(name => {
const param = params[name]
const required = !param.optional
delete param.optional
return required
}),
type: 'object',
})
const m = params => {
if (!validate(params)) {
throw errors.invalidParameters(validate.errors)
}
return method(params)
}
m.description = description
m.params = params
methods[name] = m
return once(() => {
delete methods[name]
})
}
addMethods(methods) {
let base = ''
const removes = []
const addMethod = (method, name) => {
name = base + name
if (typeof method === 'function') {
removes.push(this.addMethod(name, method))
return
} else if (Array.isArray(method)) {
removes.push(this.addMethod(name, ...method))
return
}
const oldBase = base
base = name + '.'
forOwn(method, addMethod)
base = oldBase
}
try {
forOwn(methods, addMethod)
} catch (error) {
// Remove all added methods.
forOwn(removes, remove => remove())
// Forward the error
throw error
}
return once => forOwn(removes, remove => remove())
}
_call(method, params = {}) {
debug(`call: ${method}()`, { method, params })
const fn = this._methods[method]
if (fn === undefined) {
throw new MethodNotFound(method)
}
return fn(params)
}
}

View File

@@ -0,0 +1,159 @@
import Disposable from 'promise-toolbox/Disposable'
import fromCallback from 'promise-toolbox/fromCallback'
import fromEvent from 'promise-toolbox/fromEvent'
import JsonRpcWebsocketClient from 'jsonrpc-websocket-client'
import parsePairs from 'parse-pairs'
import using from 'promise-toolbox/using'
import { createLogger } from '@xen-orchestra/log/dist'
import { deduped } from '@vates/disposable/deduped'
import { execFile, spawn } from 'child_process'
import { readFile } from 'fs-extra'
const TUNNEL_SERVICE = 'xoa-support-tunnel.service'
const { debug, warn } = createLogger('xo:proxy:appliance')
const getUpdater = deduped(async function () {
const updater = new JsonRpcWebsocketClient('ws://localhost:9001')
await updater.open()
return new Disposable(updater, () => updater.close())
})
const callUpdate = params =>
using(
getUpdater(),
updater =>
new Promise((resolve, reject) => {
updater
.on('error', reject)
.on('notification', ({ method, params }) => {
if (method === 'print') {
debug('updater.update: ' + params.content)
} else if (method === 'end') {
resolve(params)
} else if (method === 'server-error') {
reject(new Error(params.message))
} else if (method !== 'connected') {
warn('update.update, unhandled message', {
method,
params,
})
}
})
.notify('update', params)
})
)
async function checkAppliance() {
const child = spawn('xoa', ['check'], {
all: true,
env: {
...process.env,
// dont inherit this var from xo-server or the output will be polluted
DEBUG: '',
FORCE_COLOR: '1',
},
})
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
child.stdout.on('data', onData)
child.stderr.on('data', onData)
await fromEvent(child, 'exit')
return Buffer.concat(chunks, length).toString()
}
async function closeSupportTunnel() {
await fromCallback(execFile, 'systemctl', ['stop', TUNNEL_SERVICE])
}
async function getApplianceInfo() {
const pairs = parsePairs(await readFile('/etc/os-release', 'utf8'))
return {
build: pairs.XOA_BUILD,
os: pairs.ID,
osVersion: pairs.VERSION_ID,
}
}
async function getStateSupportTunnel() {
const isActive =
(await fromEvent(
spawn('systemctl', ['is-active', '--quiet', TUNNEL_SERVICE], {
stdio: 'ignore',
}),
'exit'
)) === 0
const isActiveOrFailed =
isActive ||
(await fromEvent(
spawn('systemctl', ['is-failed', '--quiet', TUNNEL_SERVICE], {
stdio: 'ignore',
}),
'exit'
)) === 0
return {
open: isActive,
stdout: isActiveOrFailed ? await fromCallback(readFile, '/tmp/xoa-support-tunnel.out', 'utf8') : '',
}
}
async function openSupportTunnel() {
await fromCallback(execFile, 'systemctl', ['start', TUNNEL_SERVICE])
}
export default class Appliance {
constructor(app) {
app.api.addMethods({
appliance: {
check: checkAppliance,
getInfo: [
getApplianceInfo,
{
description: 'returns various information about the appliance itself',
},
],
supportTunnel: {
close: [
closeSupportTunnel,
{
description: 'close the support tunnel',
},
],
getState: [
getStateSupportTunnel,
{
description: 'getState the support tunnel',
},
],
open: [
openSupportTunnel,
{
description: 'open the support tunnel',
},
],
},
updater: {
getLocalManifest: () => using(getUpdater(), _ => _.call('getLocalManifest')),
getState: () => callUpdate(),
upgrade: () => callUpdate({ upgrade: true }),
},
},
})
}
// A proxy can be bound to a unique license
getSelfLicense() {
return using(getUpdater(), _ => _.call('getSelfLicenses').then(licenses => licenses[0]))
}
}

View File

@@ -0,0 +1,43 @@
import xdg from 'xdg-basedir'
import { createLogger } from '@xen-orchestra/log'
import { execFileSync } from 'child_process'
import { outputFileSync } from 'fs-extra'
import { Profile } from '../_Profile'
const { warn } = createLogger('xo:proxy:authentication')
const isValidToken = t => typeof t === 'string' && t.length !== 0
export default class Authentication {
constructor(_, { appName, config: { authenticationToken: token } }) {
if (!isValidToken(token)) {
token = JSON.parse(execFileSync('xenstore-read', ['vm-data/xo-proxy-authenticationToken']))
if (!isValidToken(token)) {
throw new Error('missing authenticationToken in configuration')
}
try {
// save this token in the automatically handled conf file
outputFileSync(
// this file must take precedence over normal user config
`${xdg.config}/${appName}/config.z-auto.json`,
JSON.stringify({ authenticationToken: token }),
{ mode: 0o600 }
)
execFileSync('xenstore-rm', ['vm-data/xo-proxy-authenticationToken'])
} catch (error) {
warn('failed to remove token from XenStore', { error })
}
}
this._token = token
}
async findProfile(credentials) {
if (credentials?.authenticationToken === this._token) {
return new Profile()
}
}
}

View File

@@ -0,0 +1,412 @@
import defer from 'golike-defer'
import Disposable from 'promise-toolbox/Disposable'
import fromCallback from 'promise-toolbox/fromCallback'
import using from 'promise-toolbox/using'
import { asyncMap } from '@xen-orchestra/async-map'
import { Backup } from '@xen-orchestra/backups/Backup'
import { compose } from '@vates/compose'
import { createLogger } from '@xen-orchestra/log'
import { decorateWith } from '@vates/decorate-with'
import { deduped } from '@vates/disposable/deduped'
import { DurablePartition } from '@xen-orchestra/backups/DurablePartition'
import { execFile } from 'child_process'
import { formatVmBackups } from '@xen-orchestra/backups/formatVmBackups'
import { ImportVmBackup } from '@xen-orchestra/backups/ImportVmBackup'
import { Readable } from 'stream'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter'
import { RestoreMetadataBackup } from '@xen-orchestra/backups/RestoreMetadataBackup'
import { Task } from '@xen-orchestra/backups/Task'
import { Xapi } from '@xen-orchestra/xapi'
const noop = Function.prototype
const { warn } = createLogger('xo:proxy:backups')
const runWithLogs = (runner, args) =>
new Readable({
objectMode: true,
read() {
this._read = noop
runner(args, log => this.push(log)).then(
() => this.push(null),
error => this.emit('error', error)
)
},
})
export default class Backups {
constructor(app) {
this._app = app
// clean any LVM volumes that might have not been properly
// unmounted
app.hooks.on('start', async () => {
await Promise.all([fromCallback(execFile, 'losetup', ['-D']), fromCallback(execFile, 'vgchange', ['-an'])])
await fromCallback(execFile, 'pvscan', ['--cache'])
})
let run = ({ recordToXapi, remotes, xapis, ...rest }) =>
new Backup({
...rest,
// don't change config during backup execution
config: app.config.get('backups'),
// pass getAdapter in order to mutualize the adapter resources usage
getAdapter: remoteId => this.getAdapter(remotes[remoteId]),
getConnectedRecord: Disposable.factory(async function* getConnectedRecord(type, uuid) {
const xapiId = recordToXapi[uuid]
if (xapiId === undefined) {
throw new Error('no XAPI associated to ' + uuid)
}
const xapi = yield this.getXapi(xapis[xapiId])
return xapi.getRecordByUuid(type, uuid)
}).bind(this),
}).run()
const runningJobs = { __proto__: null }
run = (run => {
return async function (params) {
const jobId = params.job.id
if (jobId === undefined) {
return run.apply(this, arguments)
}
if (jobId in runningJobs) {
const error = new Error('job is already running')
error.jobId = jobId
throw error
}
runningJobs[jobId] = true
try {
return await run.apply(this, arguments)
} finally {
delete runningJobs[jobId]
}
}
})(run)
run = (run =>
async function () {
if (!__DEV__) {
const license = await app.appliance.getSelfLicense()
if (license === undefined || license.expires < Date.now()) {
throw new Error('the proxy license is not valid')
}
}
return run.apply(this, arguments)
})(run)
run = (run => async (params, onLog) => {
if (onLog === undefined) {
return run(params)
}
const { job, schedule } = params
try {
await Task.run(
{
name: 'backup run',
data: {
jobId: job.id,
jobName: job.name,
mode: job.mode,
reportWhen: job.settings['']?.reportWhen,
scheduleId: schedule.id,
},
onLog,
},
() => run(params)
)
} catch (error) {
// do not rethrow, everything is handled via logging
}
})(run)
app.api.addMethods({
backup: {
deleteMetadataBackup: [
({ backupId, remote }) => using(this.getAdapter(remote), adapter => adapter.deleteMetadataBackup(backupId)),
{
description: 'delete Metadata backup',
params: {
backupId: { type: 'string' },
remote: { type: 'object' },
},
},
],
deleteVmBackup: [
({ filename, remote }) => using(this.getAdapter(remote), adapter => adapter.deleteVmBackup(filename)),
{
description: 'delete VM backup',
params: {
filename: { type: 'string' },
remote: { type: 'object' },
},
},
],
fetchPartitionFiles: [
({ disk: diskId, remote, partition: partitionId, paths }) =>
using(this.getAdapter(remote), adapter => adapter.fetchPartitionFiles(diskId, partitionId, paths)),
{
description: 'fetch files from partition',
params: {
disk: { type: 'string' },
partition: { type: 'string', optional: true },
paths: { type: 'array', items: { type: 'string' } },
remote: { type: 'object' },
},
},
],
importVmBackup: [
defer(($defer, { backupId, remote, srUuid, streamLogs = false, xapi: xapiOpts }) =>
using(this.getAdapter(remote), this.getXapi(xapiOpts), async (adapter, xapi) => {
const metadata = await adapter.readVmBackupMetadata(backupId)
const run = () => new ImportVmBackup({ adapter, metadata, srUuid, xapi }).run()
return streamLogs
? runWithLogs(
async (args, onLog) =>
Task.run(
{
data: {
jobId: metadata.jobId,
srId: srUuid,
time: metadata.timestamp,
},
name: 'restore',
onLog,
},
run
).catch(() => {}) // errors are handled by logs
)
: run()
})
),
{
description: 'create a new VM from a backup',
params: {
backupId: { type: 'string' },
remote: { type: 'object' },
srUuid: { type: 'string' },
streamLogs: { type: 'boolean', optional: true },
xapi: { type: 'object' },
},
},
],
listDiskPartitions: [
({ disk: diskId, remote }) => using(this.getAdapter(remote), adapter => adapter.listPartitions(diskId)),
{
description: 'list disk partitions',
params: {
disk: { type: 'string' },
remote: { type: 'object' },
},
},
],
listPartitionFiles: [
({ disk: diskId, remote, partition: partitionId, path }) =>
using(this.getAdapter(remote), adapter => adapter.listPartitionFiles(diskId, partitionId, path)),
{
description: 'list partition files',
params: {
disk: { type: 'string' },
partition: { type: 'string', optional: true },
path: { type: 'string' },
remote: { type: 'object' },
},
},
],
listPoolMetadataBackups: [
async ({ remotes }) => {
const backupsByRemote = {}
await asyncMap(Object.entries(remotes), async ([remoteId, remote]) => {
try {
await using(this.getAdapter(remote), async adapter => {
backupsByRemote[remoteId] = await adapter.listPoolMetadataBackups()
})
} catch (error) {
warn('listPoolMetadataBackups', { error, remote })
}
})
return backupsByRemote
},
{
description: 'list pool metadata backups',
params: {
remotes: {
type: 'object',
additionalProperties: { type: 'object' },
},
},
},
],
listVmBackups: [
async ({ remotes }) => {
const backups = {}
await asyncMap(Object.keys(remotes), async remoteId => {
try {
await using(this.getAdapter(remotes[remoteId]), async adapter => {
backups[remoteId] = formatVmBackups(await adapter.listAllVmBackups())
})
} catch (error) {
warn('listVmBackups', { error, remote: remotes[remoteId] })
}
})
return backups
},
{
description: 'list VM backups',
params: {
remotes: {
type: 'object',
additionalProperties: { type: 'object' },
},
},
},
],
listRunningJobs: [
() => Object.keys(runningJobs),
{
description: 'returns a list of running jobs',
},
],
listXoMetadataBackups: [
async ({ remotes }) => {
const backupsByRemote = {}
await asyncMap(Object.entries(remotes), async ([remoteId, remote]) => {
try {
await using(this.getAdapter(remote), async adapter => {
backupsByRemote[remoteId] = await adapter.listXoMetadataBackups()
})
} catch (error) {
warn('listXoMetadataBackups', { error, remote })
}
})
return backupsByRemote
},
{
description: 'list XO metadata backups',
params: {
remotes: {
type: 'object',
additionalProperties: { type: 'object' },
},
},
},
],
restoreMetadataBackup: [
({ backupId, remote, xapi: xapiOptions }) =>
using(app.remotes.getHandler(remote), xapiOptions && this.getXapi(xapiOptions), (handler, xapi) =>
runWithLogs(
async (args, onLog) =>
Task.run(
{
name: 'metadataRestore',
data: JSON.parse(String(await handler.readFile(`${backupId}/metadata.json`))),
onLog,
},
() =>
new RestoreMetadataBackup({
backupId,
handler,
xapi,
}).run()
).catch(() => {}) // errors are handled by logs
)
),
{
description: 'restore a metadata backup',
params: {
backupId: { type: 'string' },
remote: { type: 'object' },
xapi: { type: 'object', optional: true },
},
},
],
run: [
({ streamLogs = false, ...rest }) => (streamLogs ? runWithLogs(run, rest) : run(rest)),
{
description: 'run a backup job',
params: {
job: { type: 'object' },
remotes: { type: 'object' },
schedule: { type: 'object' },
xapis: { type: 'object', optional: true },
recordToXapi: { type: 'object', optional: true },
streamLogs: { type: 'boolean', optional: true },
},
},
],
},
})
const durablePartition = new DurablePartition()
app.hooks.once('stop', () => durablePartition.flushAll())
app.api.addMethods({
backup: {
mountPartition: [
async ({ disk, partition, remote }) =>
using(this.getAdapter(remote), adapter => durablePartition.mount(adapter, disk, partition)),
{
description: 'mount a partition',
params: {
disk: { type: 'string' },
partition: { type: 'string', optional: true },
remote: { type: 'object' },
},
},
],
unmountPartition: [
async ({ path }) => durablePartition.unmount(path),
{
description: 'unmount a partition',
params: {
path: { type: 'string' },
},
},
],
},
})
}
// FIXME: invalidate cache on remote option change
@decorateWith(compose, function (resource) {
return this._app.debounceResource(resource)
})
@decorateWith(deduped, remote => [remote.url])
@decorateWith(Disposable.factory)
*getAdapter(remote) {
const app = this._app
return new RemoteAdapter(yield app.remotes.getHandler(remote), {
debounceResource: app.debounceResource.bind(app),
dirMode: app.config.get('backups.dirMode'),
})
}
// FIXME: invalidate cache on options change
@decorateWith(compose, function (resource) {
return this._app.debounceResource(resource)
})
@decorateWith(deduped, ({ url }) => [url])
@decorateWith(Disposable.factory)
async *getXapi({ credentials: { username: user, password }, ...opts }) {
const xapi = new Xapi({
...this._app.config.get('xapiOptions'),
...opts,
auth: {
user,
password,
},
})
await xapi.connect()
try {
await xapi.objectsFetched
yield xapi
} finally {
await xapi.disconnect()
}
}
}

View File

@@ -0,0 +1,71 @@
import get from 'lodash/get'
import identity from 'lodash/identity'
import { createLogger } from '@xen-orchestra/log'
import { parseDuration } from '@vates/parse-duration'
import { watch } from 'app-conf'
const { warn } = createLogger('xo:proxy:config')
export default class Config {
constructor(app, { appDir, appName, config }) {
this._config = config
const watchers = (this._watchers = new Set())
app.hooks.on('start', async () => {
app.hooks.on(
'stop',
await watch({ appDir, appName, ignoreUnknownFormats: true }, (error, config) => {
if (error != null) {
return warn(error)
}
this._config = config
watchers.forEach(watcher => {
watcher(config)
})
})
)
})
}
get(path) {
const value = get(this._config, path)
if (value === undefined) {
throw new TypeError('missing config entry: ' + value)
}
return value
}
getDuration(path) {
return parseDuration(this.get(path))
}
watch(path, cb) {
// internal arg
const processor = arguments.length > 2 ? arguments[2] : identity
let prev
const watcher = config => {
try {
const value = processor(get(config, path))
if (value !== prev) {
prev = value
cb(value)
}
} catch (error) {
warn('watch', { error, path })
}
}
// ensure sync initialization
watcher(this._config)
const watchers = this._watchers
watchers.add(watcher)
return () => watchers.delete(watcher)
}
watchDuration(path, cb) {
return this.watch(path, cb, parseDuration)
}
}

View File

@@ -0,0 +1,49 @@
import assert from 'assert'
import emitAsync from '@xen-orchestra/emit-async'
import EventEmitter from 'events'
import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:proxy:hooks')
const runHook = async (emitter, hook) => {
debug(`${hook} start…`)
await emitAsync.call(
emitter,
{
onError: error => warn(`${hook} failure`, { error }),
},
hook
)
debug(`${hook} finished`)
}
export default class Hooks extends EventEmitter {
// Run *clean* async listeners.
//
// They normalize existing data, clear invalid entries, etc.
clean() {
return runHook(this, 'clean')
}
_status = 'stopped'
// Run *start* async listeners.
//
// They initialize the application.
async start() {
assert.strictEqual(this._status, 'stopped')
this._status = 'starting'
await runHook(this, 'start')
this.emit((this._status = 'started'))
}
// Run *stop* async listeners.
//
// They close connections, unmount file systems, save states, etc.
async stop() {
assert.strictEqual(this._status, 'started')
this._status = 'stopping'
await runHook(this, 'stop')
this.emit((this._status = 'stopped'))
}
}

View File

@@ -0,0 +1,55 @@
import Disposable from 'promise-toolbox/Disposable'
import using from 'promise-toolbox/using'
import { compose } from '@vates/compose'
import { decorateWith } from '@vates/decorate-with'
import { deduped } from '@vates/disposable/deduped'
import { getHandler } from '@xen-orchestra/fs'
export default class Remotes {
constructor(app) {
this._app = app
app.api.addMethods({
remote: {
getInfo: [
({ remote }) => using(this.getHandler(remote), handler => handler.getInfo()),
{
params: {
remote: { type: 'object' },
},
},
],
test: [
({ remote }) =>
using(this.getHandler(remote), handler => handler.test()).catch(error => ({
success: false,
error: error.message ?? String(error),
})),
{
params: {
remote: { type: 'object' },
},
},
],
},
})
}
// FIXME: invalidate cache on remote option change
@decorateWith(compose, function (resource) {
return this._app.debounceResource(resource)
})
@decorateWith(deduped, remote => [remote.url])
async getHandler(remote) {
const { config } = this._app
const handler = getHandler(remote, config.get('remoteOptions'))
if (config.get('remotes.disableFileRemotes') && handler.type === 'file') {
throw new Error('Local remotes are disabled in proxies')
}
await handler.sync()
return new Disposable(handler, () => handler.forget())
}
}

Some files were not shown because too many files have changed in this diff Show More