Compare commits

...

220 Commits

Author SHA1 Message Date
Florent Beauchamp
dd0b71bca0 fix: rework optionnal depency 2022-09-22 14:07:52 +02:00
Florent Beauchamp
dc571a5811 fix(Backup/file restore): don't load fuse-vhd on platform not supported by fuse-native 2022-09-21 16:59:58 +02:00
Mathieu
f1ab62524c fix(xo-web/SR): fix "VDIs to coalesce" in SR advanced tab (#6429)
See https://xcp-ng.org/forum/topic/6334/coalesce-not-showing-anymore/3
Introduced by a9c1239149
2022-09-21 16:21:23 +02:00
rajaa-b
ce78d22bb8 fix(xo-web/tasks): fix tasks being displayed to all users (#6422)
See zammad#9509
Introduced by e246c8ee47
2022-09-21 11:25:14 +02:00
rajaa-b
99a1dbeae1 fix(xo-web/tasks): fix tasks filter (#6424)
See zammad#9423
2022-09-21 11:02:03 +02:00
Julien Fontanet
2a71e28253 docs(backups): add cache for a VM 2022-09-20 14:54:47 +01:00
Florent BEAUCHAMP
46fe3be322 feat: implement file restore on top of FUSE instead of vhdimount (#6409)
It brings file restore to VhdDirectory (and related features like encryption and compression).
2022-09-20 11:04:24 +02:00
Florent BEAUCHAMP
9da65b6c7c feat(backups): write and merge block concurrency are now configurable (#6416) 2022-09-16 14:54:33 +02:00
Julien Fontanet
ad02700b51 fix(backups/RemoteAdapter#_getPartition): mount with norecovery option 2022-09-15 11:06:59 +02:00
Julien Fontanet
8fd10bace7 chore: update deps 2022-09-15 00:06:07 +02:00
Julien Fontanet
9d09a3adf5 feat(backups/deleteVmBackups): run cleanVm in parallel 2022-09-12 11:23:46 +02:00
Julien Fontanet
4350f0cd1a fix(backups/deleteVmBackups): don't fail on cleanVm error
Seen when investigating zammad#8842
2022-09-12 11:23:44 +02:00
Julien Fontanet
5dc993255c fix(backups/DeltaBackupWriter#checkBaseVdis): don't warn on missing dir 2022-09-11 14:04:24 +02:00
Julien Fontanet
e9188a9864 feat(proxy/api): more raw errors
Follow up of ae373c3e7
2022-09-11 13:40:27 +02:00
Julien Fontanet
42dd70c2f7 chore(backups/RemoteAdapter): add more cache related debug 2022-09-10 14:16:54 +02:00
Julien Fontanet
191c124130 feat(backups): update VM backups cache (#6411) 2022-09-10 14:16:29 +02:00
Julien Fontanet
2742f948c2 feat(CHANGELOG): move releases before 2022 in other file
Because the main CHANGELOG is getting too big to be displayed on GitHub.
2022-09-10 13:21:15 +02:00
Julien Fontanet
455a3ba677 fix(CHANGELOG): fix version of 5.74.2 and 5.74.3 2022-09-10 13:14:57 +02:00
Julien Fontanet
1961da9aed feat(xo-server): 5.102.3 2022-09-09 18:29:48 +02:00
Julien Fontanet
e82d9d7a74 fix(xo-server/isValidAuthenticationToken): call _getAuthenticationToken
Introduced by d52dcd070
2022-09-09 17:37:44 +02:00
Florent BEAUCHAMP
dfb3166bed fix(backups): add healthcheck to full backup (#6401) 2022-09-09 16:35:38 +02:00
Florent BEAUCHAMP
5a54f7f302 feat(backups/cleanVm): invalidate cache on backup deletion (#6402) 2022-09-09 16:27:12 +02:00
Julien Fontanet
6002a497fe feat(xo-server): 5.102.2 2022-09-09 15:50:37 +02:00
Julien Fontanet
3fa8b6332e chore: update to app-conf@2.3.0 2022-09-09 12:59:59 +02:00
Manon Mercier
1b521b87c5 docs(configuration): add certificates in title to ease search (#6212) 2022-09-09 10:44:32 +02:00
rajaa-b
8b7d2aab6b feat(xo-server#_startVm): add a message for 'NO_HOSTS_AVAILABLE' error (#6408) 2022-09-09 10:43:22 +02:00
Julien Fontanet
b0006f91f4 fix(xo-server/vm.import): allow additional props for data param
Fixes zammad#9521

Introduced by d7f29e736
2022-09-08 11:10:51 +02:00
Julien Fontanet
31aaa968ec docs(users/SAML): remove confusing warning
See zammad#9420
2022-09-07 09:59:11 +02:00
Mathieu
facb4593f0 feat: release 7.74.2 (#6407) 2022-09-06 15:04:27 +02:00
Mathieu
d1a30363b4 feat: patch release (#6406) 2022-09-06 14:12:46 +02:00
Julien Fontanet
eac5347f32 fix(CHANGELOG): remove CHANGELOG.unreleased comments 2022-09-06 11:56:00 +02:00
Julien Fontanet
2006665fe8 feat(CHANGELOG): release 5.74.1 2022-09-06 11:55:06 +02:00
Julien Fontanet
26a3862d61 chore(CHANGELOG.unreleased): clearer error message
Introduced by 2371109b6
2022-09-06 11:53:53 +02:00
Mathieu
2371109b6f fix(xo-server): handle unfetched VDIs in pool.$ha_statefiles (#6404)
Introduced by 4dc7575d5

Fixes zammad#9498
2022-09-06 11:37:13 +02:00
Mathieu
243bffebbd feat(xo-server-auth-saml): support multiline cert (#6403)
Fixes https://xcp-ng.org/forum/topic/6174/saml-auth-with-azure-ad/10
2022-09-06 10:55:02 +02:00
Julien Fontanet
e69ae7b0db chore(CHANGELOG): integrate released changes 2022-09-03 11:37:43 +02:00
Julien Fontanet
5aff7b94d8 feat(xo-web): 5.103.0 2022-09-03 11:37:10 +02:00
Julien Fontanet
a65058ddd5 feat(xo-server): 5.102.0 2022-09-03 11:36:28 +02:00
Julien Fontanet
b1e81d84c6 feat(@xen-orchestra/proxy): 0.26.1 2022-09-03 11:35:27 +02:00
Julien Fontanet
96e60f7e4f feat(@xen-orchestra/mixins): 0.8.0 2022-09-03 11:34:58 +02:00
Julien Fontanet
5e59c617e8 feat(vhd-lib): 4.0.1 2022-09-03 11:33:54 +02:00
Julien Fontanet
69ad0ade6e feat(@xen-orchestra/fs): 3.1.0 2022-09-03 11:32:23 +02:00
Julien Fontanet
37cdbc19ef fix(xo-web): fix signout
Introduced by 281a1cc54
2022-09-02 18:45:10 +02:00
Julien Fontanet
6cbce81faa feat(xo-server): respect disabled setting for VM console
See #6319
2022-09-02 11:32:57 +02:00
Julien Fontanet
8c14906a60 fix(xo-server-recover-account): connect Redis client (#6398) 2022-09-02 11:01:42 +02:00
Florent BEAUCHAMP
62591e1f6f fix(vhd-lib/merge): reduce concurrency to protect slower backends (#6400) 2022-09-02 11:00:53 +02:00
Julien Fontanet
ea4a888c5e fix(xo-server/vm.create): allow additional props for VDIs and existingDisks
Fixes https://xcp-ng.org/forum/post/52561
2022-09-02 10:26:38 +02:00
Julien Fontanet
281a1cc549 feat(xo-server): validate auth token on HTTP request 2022-09-01 17:19:30 +02:00
Julien Fontanet
d52dcd0708 feat(xo-server): validate auth token on HTTP request 2022-09-01 17:15:39 +02:00
Florent BEAUCHAMP
d8e01b2867 fix(fs/s3#copy): normalize error: no such key → ENOENT (#6388) 2022-09-01 12:51:44 +02:00
Florent BEAUCHAMP
dca3f39156 feat(xo-web): remote level encryption (#6321)
Co-authored-by: mathieuRA <contact@mathieu-raisin.fr>
2022-09-01 11:34:48 +02:00
Julien Fontanet
31e964fe0f fix(xo-server/backupNg.{create,edit,run}Job): allow settings other than concurrency
Introduced by d7f29e736
2022-09-01 00:25:12 +02:00
Julien Fontanet
39d973c43f fix(xo-server/api): allow additional props on objects without prop definitions
Fixes #6395

Introduced by d7f29e736
2022-09-01 00:24:12 +02:00
Julien Fontanet
55f921959d fix(xo-server/api/adaptJsonSchema): fix additionalProperties test
Introduced by d7f29e736
2022-09-01 00:22:49 +02:00
Julien Fontanet
6598090662 fix(xo-server/api): keep previous params format
Introduced by d7f29e736

Avoid breaking `xo-cli --list-commands`.
2022-09-01 00:21:59 +02:00
Julien Fontanet
d7f29e7363 chore(xo-server/api): use Ajv instead of schema-inspector
- standard JSON schema
- faster
- maintained

New implementation also pre-compile schemas which means that params validation for each call is faster and incorrect schemas are detected at startup.
2022-08-31 16:46:17 +02:00
Julien Fontanet
82df6089c3 chore: refresh yarn.lock 2022-08-31 15:59:04 +02:00
rajaa-b
80cc66964e feat(xo-web/proxies): ability to bind licence to existing proxy (#6348)
See Zammad#7457
2022-08-31 15:40:34 +02:00
Florent BEAUCHAMP
7883d38622 fix(vhd-lib/VhdDirectory/mergeBlock): write BAT on block creation (#6300) 2022-08-31 15:35:10 +02:00
Julien Fontanet
2cb5169b6d feat(fs/Local): stack traces v2 (#6363)
- better support of lock/release
- handle sync exceptions as well
- save stacks in `syncStack` instead of replacing existing ones
2022-08-31 15:30:08 +02:00
Julien Fontanet
9ad2c07984 feat: release 5.74.0 2022-08-31 15:09:17 +02:00
Mathieu
a9c1239149 feat(xo-server/xo-web/health): detect invalid vhd-parent VDIs (#6356) 2022-08-31 11:35:35 +02:00
Mathieu
cb1223f72e feat: technical release (#6387) 2022-08-30 15:36:30 +02:00
Mathieu
4dc7575d5b feat(xo-web/storage): display SR used for the HA state files (#6384)
Fixes #6339
2022-08-29 17:02:50 +02:00
Julien Fontanet
276d1ce60a feat(backups/Task): add original log to *log after end* error 2022-08-29 10:05:25 +02:00
Julien Fontanet
58ab32a623 feat(backups/_forkStreamUnpipe): add more debug 2022-08-26 10:49:52 +02:00
Julien Fontanet
c1846e6ff3 fix(xen-api/{get,put}Resource): add sync stack traces support
Follows 857a9f3ef
2022-08-25 17:06:37 +02:00
Julien Fontanet
826de17111 feat(backups/VmBackup#_callWriters): add more debug 2022-08-25 16:43:45 +02:00
Julien Fontanet
8a09ea8bc1 feat(backups/VmBackup#_callWriters): unify single/multiple code
The behavior should be the same even if there is a single writer
2022-08-25 16:43:18 +02:00
Florent Beauchamp
1297c925ad feat: server side of backup encryption 2022-08-23 12:04:16 +02:00
Julien Fontanet
74d15e1a92 chore: format with Prettier 2022-08-23 11:56:26 +02:00
Julien Fontanet
ae373c3e77 feat(proxy/api): returns raw errors
Similar to dd5e11e83
2022-08-23 11:08:25 +02:00
Pierre Donias
e9b90caa3a fix(complex-matcher): properly alias RegExp export as RegExpNode (#6375)
Fixes #6365
Introduced by 9ef2c7da4c
2022-08-22 10:44:07 +02:00
Florent BEAUCHAMP
b89e77a6a4 fix: various VHD related test fixes (#6302) 2022-08-12 16:23:59 +02:00
Florent Beauchamp
61691ac46b fix(vhd-lib/VhdDirectory#mergeBlock): fix rename condition
Introduced by fd752fee8
2022-08-12 16:18:25 +02:00
Florent Beauchamp
512b96af24 fix(backups/cleanVm): fix path of merge state
Introduced by ad149740b1
2022-08-12 16:07:44 +02:00
Mathieu
d369593979 fix(xo-web): from ignoreBackup to bypassBackupCheck (#6362)
Introduced by 837b06ef2b
2022-08-12 15:01:22 +02:00
Julien Fontanet
2f38e0564b fix(fs/Local#lock): correctly assign release
Introduced by 4bed4195a
2022-08-11 17:13:40 +02:00
Julien Fontanet
5e8dd4e4bc fix(vhd-lib/mergeVhdChain): inverse condition to use VhdSynthetic
Introduced by 76813737e
2022-08-11 17:04:23 +02:00
Julien Fontanet
8f9f1f566d fix(proxy/api): typo in Array#includes
Introduced by 08cdcf411
2022-08-11 14:20:57 +02:00
olegur
d7870b8860 small docs typo fix 2022-08-11 14:16:12 +02:00
Julien Fontanet
97fa23f890 chore(xo-web): use vm.convertToTemplate instead of alias vm.convert 2022-08-11 11:22:54 +02:00
Julien Fontanet
f839887da8 chore(xo-server/api): remove unused alias vdi.delete_ 2022-08-11 11:22:54 +02:00
Julien Fontanet
15bfaa15ca chore(xo-server/api): remove unused alias network.delete_ 2022-08-11 11:22:54 +02:00
Julien Fontanet
4a3183ffa0 chore(xo-server/api): remove unused method 2022-08-11 11:22:54 +02:00
Pierre Donias
18d03a076b fix(xo-web/backup/restore): don't use UNSAFE_componentWillReceiveProps (#6364)
Introduced by 7d6e832226

`UNSAFE_componentWillReceiveProps` method was only introduced in React 16 but we
are using React 15
`.eslintrc`: ask eslint to check the React version so that it doesn't suggest to
change it to `UNSAFE_componentWillReceiveProps`
2022-08-11 11:19:07 +02:00
Julien Fontanet
4bed4195ac feat(fs/Local#lock): attempt to reacquire in case of compromission
Related to zammad#8826
2022-08-10 17:41:16 +02:00
Julien Fontanet
a963878af5 fix(fs/Local#lock): never fail on release
Related to zammad#8826

Also, log properly if the log is compromised.
2022-08-10 17:32:36 +02:00
Julien Fontanet
d6c3dc87e0 feat(xo-server): avoid warning if client WS has been closed
Fixes part of zammad#8826
2022-08-10 16:53:38 +02:00
Julien Fontanet
5391a9a5ad chore(CHANGELOG.unreleased): bump fs in major
Introduced by b50e95802
2022-08-10 16:52:39 +02:00
Julien Fontanet
b50e95802c feat(fs): remove JS based SMB handler
It's not well tested nor maintained.
2022-08-10 16:28:05 +02:00
Julien Fontanet
75a9799e96 feat(xo-server): make http.listen config an object
It remains compatible with previous configurations.

It now aligns with xo-proxy's config and is easier to overload with other config files.
2022-08-10 14:05:16 +02:00
Julien Fontanet
dbb9e4d60f feat(fs/Local): add stack traces to native fs methods 2022-08-09 11:25:49 +02:00
Julien Fontanet
d27b6bd49d fix(xo-server/collection/redis#{add,update}): cast to string before inserting in db
Fixes https://xcp-ng.org/forum/post/51933
Fixes #6359

Introduced by 36b94f745
2022-08-07 13:27:25 +02:00
Julien Fontanet
c5d2726faa chore(xo-server/collection/redis): remove unused constructor param
Introduced by 36b94f745
2022-08-07 13:01:51 +02:00
Julien Fontanet
a2a98c490f feat(xo-server/db-cli): improve help message 2022-08-07 12:48:34 +02:00
Julien Fontanet
e2dc1d98f1 feat(xo-server/db-cli repl): now has a collection ready for each namespace 2022-08-07 12:46:52 +02:00
Julien Fontanet
658c26d3c9 fix(xo-server/collection/redis#{add,update}): fix ignore id field 2022-08-06 13:23:46 +02:00
Julien Fontanet
612095789a feat(xo-server/db-cli): repl command 2022-08-06 12:31:56 +02:00
Julien Fontanet
7418d9f670 fix(xo-server/collection/redis#{add,update}): save all fields
Fixes https://xcp-ng.org/forum/post/51916

Introduced by 36b94f745

`Redis#hSet` accepts an object instead of a sequence of key/value.

The previous commit corrupted the database by deleting all but one fields per added/updated objects.
2022-08-06 12:30:08 +02:00
Julien Fontanet
f344c58a62 feat(xo-server/db-cli): ensure errors are printed 2022-08-06 11:30:00 +02:00
Julien Fontanet
36b94f745d fix(xo-server): redis@4 usage
Introduced by 9fab15537
2022-08-05 16:53:22 +02:00
Julien Fontanet
08cdcf4112 feat(proxy/api): method results can be documented/validated 2022-08-05 16:15:21 +02:00
Julien Fontanet
76813737ef feat(vhd-cli/merge): replace mergeVhd by mergeVhdChain 2022-08-05 15:04:44 +02:00
Julien Fontanet
53d15d6a77 chore(vhd-lib/merge): remove unnecessary concat 2022-08-05 15:04:44 +02:00
Julien Fontanet
dd01b62b87 feat(vhd-lib/mergeVhd): no longer exported from the index
BREAKING CHANGE
2022-08-05 15:04:44 +02:00
Julien Fontanet
9fab15537b chore: update deps 2022-08-05 14:25:09 +02:00
Florent BEAUCHAMP
d87db05b2b feat: release 5.73.1 (#6352) 2022-08-04 17:47:50 +02:00
Florent BEAUCHAMP
f1f32c962c feat: technical release (#6351) 2022-08-04 16:05:29 +02:00
Florent Beauchamp
ad149740b1 feat(backups/cleanVm,vhd-lib): support resuming merge of VHD chains
The whole chain is now stored in the merge state.
2022-08-04 15:25:31 +02:00
Florent Beauchamp
9a4e938b91 fix(backups/cleanVm): fix parent/child order when resuming merge 2022-08-04 15:25:31 +02:00
Julien Fontanet
a226760b07 fix(xo-web/css): fix double slash in Font Awesome import
Fixes #6350
2022-08-04 10:23:06 +02:00
Yannick Achy
a11450c3a7 docs(xoa): NTP configuration (#6342)
Co-authored-by: yannick Achy <yannick.achy@vates.fr>
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2022-08-04 09:24:00 +02:00
Julien Fontanet
e0cab4f937 feat(vhd-lib/merge): augment errors with VHD paths 2022-08-03 16:41:59 +02:00
Julien Fontanet
468250f291 fix(vhd-lib/merge): fix mergeState.currentBlock
Fixes zammad#8794 and zammad#8168

Introduced by 97d94b795

When the concurrency is one (or race condition), `Math.min(...merging)` could be called with `merging` being empty.

This lead to a `NaN` value which, was stored as `null` in the JSON merge state.
2022-08-03 14:56:49 +02:00
Julien Fontanet
d04b93c17e feat(vhd-cli info): explicit header/footer objects 2022-08-03 14:49:14 +02:00
Florent BEAUCHAMP
911556a1aa fix(backups/cleanVm): the child, not the parent is linked to a backup (#6331) 2022-08-03 14:06:35 +02:00
Per-Ole
c7d3230eef feat(xo-server-auth-saml): allow customizing callback URL (#6278)
This will allow you to enter the full path of the callback URL. As stated in issue #6108 the SAML plugin wont work with Azure AD as the callback URL needs to be HTTPS. This was solved by @jens-rabe
2022-08-03 10:12:32 +02:00
Julien Fontanet
b63086bf09 fix(xo-web): use complex-matcher classes to build filters
Using strings directly breaks with special characters.
2022-08-02 21:36:41 +02:00
Florent BEAUCHAMP
a4118a5676 docs(backups): describes file hierarchy with VHD directories (#6337) 2022-08-02 11:12:24 +02:00
Julien Fontanet
26e7e6467c fix(xo-server,xo-web): prevent backup concurrency from being <=0
Fixes #6338
2022-07-31 20:04:47 +02:00
Julien Fontanet
1c9552fa58 docs(mixins/SslCertificate): don't recommends using staging
This confuses users and XO is not currently able to detect that this valid has changed and generate a new certificate.
2022-07-29 19:40:29 +02:00
Julien Fontanet
9875cb5575 docs(mixins/SslCertificate): explicits that cert/key are required 2022-07-29 19:26:33 +02:00
Julien Fontanet
d1c6bb8829 fix(mixins/SslCertificate): remove unnecessary warnings 2022-07-29 19:24:49 +02:00
Julien Fontanet
ef7005a291 fix(backups/cleanVms): remove useless log 2022-07-29 15:42:46 +02:00
Pierre Donias
8068b83ffe feat: release 5.73.0 (#6336) 2022-07-29 10:48:59 +02:00
Pierre Donias
f01a89710c feat: technical release (#6335) 2022-07-29 10:08:26 +02:00
Julien Fontanet
38ced81ada fix(backups,backups-cli): correctly pass loggers to cleanVm
Introduced by c0b0ba433
2022-07-29 08:54:31 +02:00
Julien Fontanet
9834632d59 fix(vhd-lib/merge): delete merge state after cleanup
Related to https://xcp-ng.org/forum/post/51529

In case the clean-up fails, the merge will be retried in the future.
2022-07-28 21:39:36 +02:00
Julien Fontanet
bb4504dd50 fix(vhd-lib/merge): don't delete children if renaming parent failed
Related to https://xcp-ng.org/forum/post/51529
2022-07-28 21:39:36 +02:00
Julien Fontanet
8864c2f2db fix(backups/cleanVm): wait for unreferenced VHDs to be deleted
Introduced by 249f63849
2022-07-28 21:26:22 +02:00
Julien Fontanet
19208472e6 feat(backups/cleanVm): improve log messages
Introduced byc0b0ba433
2022-07-28 21:26:22 +02:00
Florent BEAUCHAMP
10c77ba3cc feat(mixins/SslCertificate): Let's Encrypt support (#6320) 2022-07-28 18:13:12 +02:00
Pierre Donias
cd28fd4945 feat: technical release (#6332) 2022-07-28 15:57:40 +02:00
Mathieu
6778d6aa4a fix(xo-web/VM): display a confirmation modal to bypass blockedOperation (#6295) 2022-07-28 15:01:22 +02:00
Pierre Donias
433851d771 fix(xo-server/xapi-object-to-xo): handle guest metrics reporting empty IP field (#6328)
See https://xcp-ng.org/forum/topic/4810/netbox-plugin-error-ipaddr-the-address-has-neither-ipv6-nor-ipv4-format/27?_=1658735770330
2022-07-27 12:03:22 +02:00
Julien Fontanet
d157fd3528 feat(mixins/HttpProxy): enable by default 2022-07-26 10:59:37 +02:00
Julien Fontanet
9150823c37 fix(xo-server/unregisterProxy): don't try to unbind license if method unavailable 2022-07-25 18:05:08 +02:00
Julien Fontanet
07c3a44441 fix(xo-server/registerProxy): db.add does not return a model
Introduced by 8a71f8473
2022-07-25 18:01:30 +02:00
Julien Fontanet
051bbf9449 fix(xo-server/callProxyMethod): use stored address before XAPI ones 2022-07-25 17:58:40 +02:00
Julien Fontanet
22ea1c0e2a fix(xo-server/proxy.update): return proxy with its URL 2022-07-25 17:54:31 +02:00
Julien Fontanet
6432a44860 chore(fs/createOutputStream): remove deprecated method
Deprecated since 407586e2d
2022-07-25 16:00:42 +02:00
Julien Fontanet
493d861de3 chore(xo-server,proxy): fix linting errors 2022-07-25 13:26:26 +02:00
Julien Fontanet
82452e9616 feat(xo-server/RestApi): add raw VDI import 2022-07-21 16:28:22 +02:00
Julien Fontanet
2fbeaa618a fix(xapi/SR_importVdi): import as VDH, not raw
Fixes #6327
2022-07-21 15:18:48 +02:00
Julien Fontanet
6c08afaa0e fix(xapi/VDI_importContent): format is not optional 2022-07-21 15:18:48 +02:00
Julien Fontanet
af4cc1f574 fix(xo-cli): extract ws error message
Fixes #6022
2022-07-21 12:36:58 +02:00
Julien Fontanet
2fb27b26cd feat(xo-server): refresh HTTP proxy on config change 2022-07-21 10:42:30 +02:00
Pierre Donias
11e09e1f87 fix(xo-web/home/vm): show error toaster when deleting VMs failed (#6323) 2022-07-21 09:42:16 +02:00
Julien Fontanet
9ccb5f8aa9 feat(xo-server): inject proxy in env (#6322)
Fixes zammad#8073

Related to #6320

- brings `no_proxy` supports
- implicit supports for other libs
2022-07-20 15:27:57 +02:00
Pierre Donias
af87d6a0ea docs(contributing): update contribution steps (#6318)
See https://xcp-ng.org/forum/topic/6070/netbox-plugin-enhancements
2022-07-11 17:01:57 +02:00
Julien Fontanet
d847f45cb3 feat: release 5.72.1 2022-07-11 10:37:01 +02:00
Julien Fontanet
38c615609a feat(xo-web): 5.100.0 2022-07-11 10:36:03 +02:00
Julien Fontanet
144cc4b82f feat(xo-server-audit): 0.10.0 2022-07-11 10:36:03 +02:00
Julien Fontanet
d24ab141e9 feat(xo-server): 5.98.1 2022-07-11 10:36:03 +02:00
Julien Fontanet
8505374fcf feat(@xen-orchestra/proxy): 0.23.5 2022-07-11 10:36:03 +02:00
Julien Fontanet
e53d961fc3 feat(@xen-orchestra/backups-cli): 0.7.5 2022-07-11 10:36:03 +02:00
Julien Fontanet
dc8ca7a8ee feat(vhd-lib): 3.3.2 2022-07-11 10:36:03 +02:00
Julien Fontanet
3d1b87d9dc feat(@xen-orchestra/backups): 0.27.0 2022-07-11 10:35:58 +02:00
Julien Fontanet
01fa2af5cd chore: refresh yarn.lock 2022-07-11 10:11:00 +02:00
Julien Fontanet
20a89ca45a feat(xo-server-audit): ignore more methods 2022-07-09 10:41:36 +02:00
Julien Fontanet
16ca2f8da9 fix(xo-web/vm/console): SSH/RDP URLs
Introduced by 2b0f1b6aa and e9f82558e.
2022-07-07 17:06:49 +02:00
Florent BEAUCHAMP
30fe9764ad fix(backups,vhd-lib): merge with VhdSynthetic (#6317) 2022-07-07 16:57:15 +02:00
rajaa-b
e246c8ee47 fix(xo-web/tasks): tasks with no bound objects not displayed (#6315)
See https://xcp-ng.org/forum/topic/6038/not-seeing-tasks-any-more-as-admin
Introduced by dae37c6a50
2022-07-06 10:52:37 +02:00
Julien Fontanet
ba03a48498 chore(xo-server): update to hashy@0.11.1
Fixes https://xcp-ng.org/forum/post/50866

Introduced by 49890a09b7

Fixes argon2id support which is the new default algorithm since argon2@0.28.7
2022-07-06 09:44:48 +02:00
Julien Fontanet
b96dd0160a feat(async-each): change default concurrency to 10
BREAKING CHANGE
2022-07-05 12:00:07 +02:00
Julien Fontanet
49890a09b7 chore: update dev deps 2022-07-05 11:13:50 +02:00
Julien Fontanet
dfce56cee8 feat(async-each): add basic JsDoc typing 2022-07-04 17:37:52 +02:00
Julien Fontanet
a6fee2946a feat(async-each): concurrency 0 means no limit
It's identical to `Infinity` but has broader support (e.g. in JSON).
2022-07-04 17:22:47 +02:00
Julien Fontanet
34c849ee89 fix(vhd-lib/VhdAbstract#readBlock): return type 2022-07-04 10:57:44 +02:00
Mathieu
c7192ed3bf feat(xo-web): display maintenance mode badge next to the SR name (#6313) 2022-07-01 16:22:45 +02:00
Julien Fontanet
4d3dc0c5f7 feat: release 5.72.0 2022-06-30 16:47:32 +02:00
Julien Fontanet
9ba4afa073 chore(CHANGELOG): integrate released changes 2022-06-30 15:49:25 +02:00
Julien Fontanet
3ea4422d13 feat(xo-web): 5.99.0 2022-06-30 15:47:22 +02:00
Julien Fontanet
de2e314f7d feat(xo-server): 5.98.0 2022-06-30 15:46:58 +02:00
Julien Fontanet
2380fb42fe feat(@xen-orchestra/proxy): 0.23.4 2022-06-30 15:46:14 +02:00
Julien Fontanet
95b76076a3 feat(xo-remote-parser): 0.9.1 2022-06-30 15:45:29 +02:00
Julien Fontanet
b415d4c34c feat(vhd-lib): 3.3.1 2022-06-30 15:44:21 +02:00
Julien Fontanet
2d82b6dd6e feat(@xen-orchestra/xapi): 1.4.0 2022-06-30 15:38:10 +02:00
Mathieu
16b1935f12 feat(xo-server,xo-web/SR): display maintenance mode button (#6308)
Fixes #6215
2022-06-30 15:31:28 +02:00
Florent BEAUCHAMP
50ec614b2a feat(xo-web/remotes): ability to set useVhdDirectory in remote params (#6273) 2022-06-30 15:28:42 +02:00
rajaa-b
9e11a0af6e feat(xapi/VM_import): translate checksum error (#6304) 2022-06-30 12:08:36 +02:00
Florent BEAUCHAMP
0c3e42e0b9 fix(vhd-lib): fix VhdDirectory merge on non-S3 remote (#6310) 2022-06-30 11:40:21 +02:00
Julien Fontanet
36b31bb0b3 chore(vhd-lib/merge): minor comment improvement 2022-06-29 15:29:20 +02:00
Mathieu
c03c41450b feat: technical release (#6311) 2022-06-29 15:27:14 +02:00
Florent BEAUCHAMP
dfc2b5d88b feat(Backup): use vhd directory setting of remote (#6303) 2022-06-29 10:51:13 +02:00
Florent BEAUCHAMP
87e3e3ffe3 fix(xo-remote-parser): properly handle undefined options (#6309) 2022-06-29 10:26:50 +02:00
Rajaa.BARHTAOUI
dae37c6a50 feat(xo-web/tasks): show tasks for Self Service users (#6217)
See zammad#5436
2022-06-28 18:35:58 +02:00
Mathieu
c7df11cc6f feat(xo-web/user): user tokens management through XO interface (#6276) 2022-06-28 17:57:59 +02:00
Julien Fontanet
87f1f208c3 feat(vhd-cli): 0.8.0 2022-06-28 16:52:27 +02:00
Julien Fontanet
ba8c5d740e feat(vhd-cli info): list method with multiple VHDs 2022-06-27 16:24:43 +02:00
Julien Fontanet
c275d5d999 chore(vhd-cli): remove build step 2022-06-27 16:24:43 +02:00
Mathieu
cfc53c9c94 feat(xo-web/proxies): copy proxy URL (#6287) 2022-06-27 15:41:32 +02:00
Julien Fontanet
87df917157 feat(vhd-lib/merge): human readable UUID check
Introduced by a1bcd35e2
2022-06-27 14:10:15 +02:00
Julien Fontanet
395d87d290 chore(xo-common): remove build step 2022-06-23 17:24:54 +02:00
Julien Fontanet
aff8ec08ad chore(template): remove build step 2022-06-23 17:24:54 +02:00
Julien Fontanet
4d40b56d85 fix(xo-server/file restore): ignore non-regular files/dirs (#6305)
Fixes zammad#7648

This also ignore (broken and valid) symlinks.
2022-06-23 16:37:56 +02:00
Julien Fontanet
667d0724c3 docs(configuration/custom ca): fix systemd path
Introduced by 03a66e469
2022-06-22 11:32:24 +02:00
Julien Fontanet
a49395553a docs(configuration/custom ca): fix systemd path
Introduced by 03a66e469
2022-06-22 11:30:09 +02:00
Julien Fontanet
cce09bd9cc docs(configuration/custom ca): add note regarding XO Proxy 2022-06-22 10:44:25 +02:00
Julien Fontanet
03a66e4690 docs(configuration/custom ca): use separate systemd file
This is better as it avoids conflicts with existing config and is compatible with the way XO Proxy service is handled.
2022-06-22 10:44:25 +02:00
Florent BEAUCHAMP
fd752fee80 feat(backups,vhd-lib): implement copyless merge (#6271) 2022-06-22 10:36:57 +02:00
Julien Fontanet
8a71f84733 chore(xo-server): remove Model wrapping 2022-06-22 10:10:39 +02:00
Julien Fontanet
9ef2c7da4c chore(complex-matcher): remove build step 2022-06-22 09:55:59 +02:00
Julien Fontanet
8975073416 fix(xapi): add missing file
Introduced by b12c17947

Thanks @Danp2.
2022-06-22 00:07:32 +02:00
Julien Fontanet
d1c1378c9d feat(xo-server-db): minimal CLI to browser the DB 2022-06-21 18:11:44 +02:00
Julien Fontanet
7941284a1d feat(xo-server/collection/Redis): set of all indexes 2022-06-21 17:47:56 +02:00
Julien Fontanet
af2d17b7a5 feat(xo-server/collection/Redis): set of all namespaces 2022-06-21 17:29:19 +02:00
Julien Fontanet
3ca2b01d9a feat(xo-server/collection/Redis): assert namespace doesnt contain _ or : 2022-06-21 17:24:10 +02:00
Julien Fontanet
67193a2ab7 chore(xo-server/collection/Redis): replace prefix by namespace 2022-06-21 17:23:25 +02:00
Julien Fontanet
9757aa36de chore(xo-server/collection/Redis): _id field was never used 2022-06-21 17:23:18 +02:00
Julien Fontanet
29854a9f87 feat(xo-server): new sr.{enable,disable}MaintenanceMode methods 2022-06-21 15:07:09 +02:00
Julien Fontanet
b12c179470 feat(xapi): new SR_{enable,disable}MaintenanceMode methods 2022-06-21 15:07:09 +02:00
Julien Fontanet
bbef15e4e4 feat(xo-server/proxy.get{,All}); return associated URL(s) (#6291) 2022-06-21 11:33:25 +02:00
Florent BEAUCHAMP
c483929a0d fix(ova import): drain disk entry completly (#6284) 2022-06-20 16:09:20 +02:00
Julien Fontanet
1741f395dd chore(xo-server/deleteAuthenticationTokens): optimization
Don't use xo-server/deleteAuthenticationToken to avoid fetching the records twice.
2022-06-19 11:37:42 +02:00
Julien Fontanet
0f29262797 chore(value-matcher): remove build step 2022-06-19 11:28:11 +02:00
Julien Fontanet
31ed477b96 feat(xo-server/token.delete): available for non-admins 2022-06-17 11:59:29 +02:00
Julien Fontanet
9e5de5413d feat(xo-server/Collection#remove): accept a pattern 2022-06-17 11:59:29 +02:00
Florent BEAUCHAMP
0f297a81a4 feat(xo-remote-parser): additional parameters in URL (#6270) 2022-06-16 23:14:34 +02:00
239 changed files with 11315 additions and 8982 deletions

View File

@@ -4,7 +4,6 @@ about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**

2
.gitignore vendored
View File

@@ -10,8 +10,6 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat

View File

@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -9,7 +9,16 @@ class AggregateError extends Error {
}
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
/**
* @template Item
* @param {Iterable<Item>} iterable
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
* @returns {Promise<void>}
*/
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
if (concurrency === 0) {
concurrency = Infinity
}
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []

View File

@@ -36,7 +36,7 @@ describe('asyncEach', () => {
it('works', async () => {
const iteratee = jest.fn(async () => {})
await asyncEach.call(thisArg, iterable, iteratee)
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
@@ -66,7 +66,7 @@ describe('asyncEach', () => {
}
})
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
})
@@ -91,7 +91,9 @@ describe('asyncEach', () => {
}
})
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
await expect(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal })).rejects.toThrow(
'asyncEach aborted'
)
expect(iteratee).toHaveBeenCalledTimes(2)
})
})

View File

@@ -24,7 +24,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"version": "1.0.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -35,7 +35,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"version": "1.0.1",
"scripts": {
"postversion": "npm publish --access public",
"test": "tap --branches=72"

1
@vates/fuse-vhd/.npmignore Symbolic link
View File

@@ -0,0 +1 @@
../../scripts/npmignore

66
@vates/fuse-vhd/index.js Normal file
View File

@@ -0,0 +1,66 @@
'use strict'
const LRU = require('lru-cache')
const { VhdSynthetic } = require('vhd-lib')
const { Disposable, fromCallback } = require('promise-toolbox')
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
const stat = st => ({
mtime: st.mtime || new Date(),
atime: st.atime || new Date(),
ctime: st.ctime || new Date(),
size: st.size !== undefined ? st.size : 0,
mode: st.mode === 'dir' ? 16877 : st.mode === 'file' ? 33188 : st.mode === 'link' ? 41453 : st.mode,
uid: st.uid !== undefined ? st.uid : process.getuid(),
gid: st.gid !== undefined ? st.gid : process.getgid(),
})
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
const Fuse = require('fuse-native')
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
const cache = new LRU({
max: 16, // each cached block is 2MB in size
})
await vhd.readBlockAllocationTable()
const fuse = new Fuse(mountDir, {
async readdir(path, cb) {
if (path === '/') {
return cb(null, ['vhd0'])
}
cb(Fuse.ENOENT)
},
async getattr(path, cb) {
if (path === '/') {
return cb(
null,
stat({
mode: 'dir',
size: 4096,
})
)
}
if (path === '/vhd0') {
return cb(
null,
stat({
mode: 'file',
size: vhd.footer.currentSize,
})
)
}
cb(Fuse.ENOENT)
},
read(path, fd, buf, len, pos, cb) {
if (path === '/vhd0') {
return vhd.readRawData(pos, len, cache, buf).then(cb)
}
throw new Error(`read file ${path} not exists`)
},
})
return new Disposable(
() => fromCallback(() => fuse.unmount()),
fromCallback(() => fuse.mount())
)
})

View File

@@ -0,0 +1,32 @@
{
"name": "@vates/fuse-vhd",
"version": "0.0.1",
"license": "ISC",
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/fuse-vhd",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"engines": {
"node": ">=10.0"
},
"dependencies": {
"@xen-orchestra/log": "^0.3.0",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.0.1"
},
"optionalDependencies": {
"fuse-native": "^2.2.6"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -19,7 +19,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.2",
"version": "1.0.0",
"engines": {
"node": ">=8.10"
},

View File

@@ -26,7 +26,13 @@ module.exports = async function main(args) {
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
await adapter.cleanVm(vmDir, {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}

View File

@@ -7,8 +7,8 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.25.0",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/fs": "^3.1.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.3",
"version": "0.7.7",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -245,7 +245,7 @@ exports.Backup = class Backup {
})
)
),
() => settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined,
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
async (srs, remoteAdapters, healthCheckSr) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)

View File

@@ -15,26 +15,28 @@ const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
const { readdir, lstat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { formatFilenameDate } = require('./_filenameDate.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
const { asyncEach } = require('@vates/async-each')
const { mount } = require('@vates/fuse-vhd')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
const { warn } = createLogger('xo:backups:RemoteAdapter')
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
@@ -44,16 +46,13 @@ const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
files.push({
realPath,
metadataPath,
@@ -75,12 +74,16 @@ const debounceResourceFactory = factory =>
}
class RemoteAdapter {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression } = {}) {
constructor(
handler,
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
this._useGetDiskLegacy = useGetDiskLegacy
}
get handler() {
@@ -128,7 +131,9 @@ class RemoteAdapter {
}
async *_getPartition(devicePath, partition) {
const options = ['loop', 'ro']
// the norecovery option is necessary because if the partition is dirty,
// mount will try to fix it which is impossible if because the device is read-only
const options = ['loop', 'ro', 'norecovery']
if (partition !== undefined) {
const { size, start } = partition
@@ -225,11 +230,30 @@ class RemoteAdapter {
return promise
}
#removeVmBackupsFromCache(backups) {
for (const [dir, filenames] of Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
)) {
// detached async action, will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
}
}
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
this.#removeVmBackupsFromCache(backups)
}
async deleteMetadataBackup(backupId) {
@@ -257,6 +281,8 @@ class RemoteAdapter {
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
this.#removeVmBackupsFromCache(backups)
}
deleteVmBackup(file) {
@@ -277,14 +303,13 @@ class RemoteAdapter {
full !== undefined && this.deleteFullVmBackups(full),
])
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
await asyncMap(new Set(files.map(file => dirname(file))), dir =>
// - don't merge in main process, unused VHDs will be merged in the next backup run
// - don't error in case this fails:
// - if lock is already being held, a backup is running and cleanVm will be ran at the end
// - otherwise, there is nothing more we can do, orphan file will be cleaned in the future
this.cleanVm(dir, { remove: true, logWarn: warn }).catch(noop)
)
}
#getCompressionType() {
@@ -292,14 +317,15 @@ class RemoteAdapter {
}
#useVhdDirectory() {
return this.handler.type === 's3'
return this.handler.useVhdDirectory()
}
#useAlias() {
return this.#useVhdDirectory()
}
async *getDisk(diskId) {
async *#getDiskLegacy(diskId) {
const RE_VHDI = /^vhdi(\d+)$/
const handler = this._handler
const diskPath = handler._getFilePath('/' + diskId)
@@ -329,6 +355,30 @@ class RemoteAdapter {
}
}
async *getDisk(diskId) {
if (this._useGetDiskLegacy) {
yield* this.#getDiskLegacy(diskId)
return
}
const handler = this._handler
// this is a disposable
const mountDir = yield getTmpDir()
try {
// this is also a disposable
yield mount(handler, diskId, mountDir)
} catch (error) {
// fallback in case of missing dependency
if (error.code === 'MODULE_NOT_FOUND') {
yield* this.#getDiskLegacy(diskId)
} else {
throw error
}
}
// this will yield disk path to caller
yield `${mountDir}/vhd0`
}
// partitionId values:
//
// - undefined: raw disk
@@ -379,18 +429,25 @@ class RemoteAdapter {
listPartitionFiles(diskId, partitionId, path) {
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
path = resolveSubpath(rootPath, path)
const entriesMap = {}
await asyncMap(await readdir(path), async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
await asyncEach(
await readdir(path),
async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
}
}
})
},
{ concurrency: 1 }
)
return entriesMap
})
@@ -455,11 +512,46 @@ class RemoteAdapter {
return backupsByPool
}
#getVmBackupsCache(vmUuid) {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}
async #readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
if (error.code !== 'ENOENT') {
warn('#readCache', { error, path })
}
}
}
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this.#readCache(path)
if (cache !== undefined) {
fn(cache)
await this.#writeCache(path, cache)
}
}
async #writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
warn('#writeCache', { error, path })
}
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
}
async #getCachabledDataListVmBackups(dir) {
debug('generating cache', { path: dir })
const handler = this._handler
const backups = {}
@@ -495,41 +587,26 @@ class RemoteAdapter {
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
const path = this.#getVmBackupsCache(vmUuid)
try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
const cache = await this.#readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(dir)
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
if (backups === undefined) {
return
}
// detached async action, will not reject
this.#writeVmBackupsCache(path, backups)
this.#writeCache(path, backups)
return backups
}
async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
@@ -568,13 +645,35 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
async writeVmBackupMetadata(vmUuid, metadata) {
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`
await this.handler.outputFile(path, JSON.stringify(metadata), {
dirMode: this._dirMode,
})
// will not throw
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,
// these values are required in the cache
_filename: path,
id: path,
}
})
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: 16,
concurrency: writeBlockConcurrency,
compression: this.#getCompressionType(),
async validator() {
await input.task

View File

@@ -3,8 +3,10 @@
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const logAfterEnd = () => {
throw new Error('task has already ended')
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
}
const noop = Function.prototype

View File

@@ -128,42 +128,49 @@ class VmBackup {
}
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, warnMessage, parallel = true) {
async _callWriters(fn, step, parallel = true) {
const writers = this._writers
const n = writers.size
if (n === 0) {
return
}
if (n === 1) {
const [writer] = writers
async function callWriter(writer) {
const { name } = writer.constructor
try {
debug('writer step starting', { step, writer: name })
await fn(writer)
debug('writer step succeeded', { duration: step, writer: name })
} catch (error) {
writers.delete(writer)
warn('writer step failed', { error, step, writer: name })
// these two steps are the only one that are not already in their own sub tasks
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
Task.warning(
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
throw error
}
return
}
if (n === 1) {
const [writer] = writers
return callWriter(writer)
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await fn(writer)
await callWriter(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
// these two steps are the only one that are not already in their own sub tasks
if (warnMessage === 'writer.checkBaseVdis()' || warnMessage === 'writer.beforeBackup()') {
Task.warning(
`the writer ${writer.constructor.name} has failed the step ${warnMessage} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
}
})
if (writers.size === 0) {
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
}
}

View File

@@ -35,7 +35,7 @@ afterEach(async () => {
})
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => Buffer.from(uniqueId(), 'utf-8')
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
async function generateVhd(path, opts = {}) {
let vhd
@@ -78,15 +78,15 @@ test('It remove broken vhd', async () => {
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
expect((await handler.list(basePath)).length).toEqual(1)
let loggued = ''
const onLog = message => {
const logInfo = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
await adapter.cleanVm('/', { remove: false, logInfo, logWarn: logInfo, lock: false })
expect(loggued).toEqual(`VHD check error`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
// really remove it
await adapter.cleanVm('/', { remove: true, onLog })
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
expect((await handler.list(basePath)).length).toEqual(0)
})
@@ -118,15 +118,13 @@ test('it remove vhd with missing or multiple ancestors', async () => {
)
// clean
let loggued = ''
const onLog = message => {
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
@@ -159,14 +157,12 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
})
let loggued = ''
const onLog = message => {
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
@@ -183,8 +179,8 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
})
@@ -220,16 +216,16 @@ test('it merges delta of non destroyed chain', async () => {
})
let loggued = []
const onLog = message => {
const logInfo = message => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
await adapter.cleanVm('/', { remove: true, logInfo, logWarn: logInfo, lock: false })
expect(loggued[0]).toEqual(`incorrect backup size in metadata`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
await adapter.cleanVm('/', { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
const [merging] = loggued
expect(merging).toEqual(`merging 1 children into /${basePath}/orphan.vhd`)
expect(merging).toEqual(`merging VHD chain`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children after the merge
@@ -275,7 +271,7 @@ test('it finish unterminated merge ', async () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
@@ -382,7 +378,7 @@ describe('tests multiple combination ', () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
await adapter.cleanVm('/', { remove: true, merge: true, logWarn: () => {}, lock: false })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
// size should be the size of children + grand children + clean after the merge
@@ -418,7 +414,7 @@ describe('tests multiple combination ', () => {
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true })
await adapter.cleanVm('/', { remove: true, logWarn: () => {}, lock: false })
expect(await handler.list(basePath)).toEqual([])
})
@@ -433,7 +429,11 @@ test('check Aliases should work alone', async () => {
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
remove: true,
handler,
logWarn: () => {},
})
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))

View File

@@ -1,22 +1,27 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { mergeVhdChain } = require('vhd-lib/merge')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
const handlerPath = require('@xen-orchestra/fs/path')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(vhds) {
function shouldComputeVhdsSize(handler, vhds) {
if (handler.isEncrypted) {
return false
}
return vhds.every(vhd => vhd instanceof VhdFile)
}
@@ -24,73 +29,49 @@ const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(vhds)) {
if (shouldComputeVhdsSize(handler, vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is [ ancestor, child1, ..., childn]
// 1. Create a VhdSynthetic from all children
// 2. Merge the VhdSynthetic into the ancestor
// 3. Delete all (now) unused VHDs
// 4. Rename the ancestor with the merged data to the latest child
//
// VhdSynthetic
// |
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
// [ ancestor, child1, ...,child n-1, childn ]
// | \___________________/ ^
// | | |
// | unused VHDs |
// | |
// \___________rename_____________/
async function mergeVhdChain(chain, { handler, logInfo, remove, merge }) {
assert(chain.length >= 2)
const chainCopy = [...chain]
const parent = chainCopy.pop()
const children = chainCopy
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
if (merge) {
logInfo(`merging children into parent`, { childrenCount: children.length, parent })
logInfo(`merging VHD chain`, { chain })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
logInfo(`merging children in progress`, { children, parent, doneCount: done, totalCount: total})
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
}
}, 10e3)
const mergedSize = await mergeVhd(handler, parent, handler, children, {
onProgress({ done: d, total: t }) {
done = d
total = t
},
})
clearInterval(handle)
const mergeTargetChild = children.shift()
await Promise.all([
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
logInfo(`the VHD child is already merged`, { child })
if (remove) {
logInfo(`deleting merged VHD child`, { child })
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}
}
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir) => {
const listVhds = async (handler, vmDir, logWarn) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
@@ -110,12 +91,23 @@ const listVhds = async (handler, vmDir) => {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
list.forEach(file => {
await asyncMap(list, async file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.add(`${vdiDir}/${file}`)
} else {
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
try {
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
statePath: `${vdiDir}/${file}`,
chain: mergeState.chain,
})
} catch (error) {
// fall back to a non resuming merge
vhds.add(`${vdiDir}/${file}`)
logWarn('failed to read existing merge state', { path: file, error })
}
}
})
}
@@ -131,15 +123,15 @@ async function checkAliases(
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
const aliasFound = []
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
if (!isVhdFile(target)) {
logWarn('alias references non VHD target', { path, target })
logWarn('alias references non VHD target', { alias, target })
if (remove) {
logInfo('removing alias and non VHD target', { path, target })
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(path)
await handler.unlink(alias)
}
continue
}
@@ -152,13 +144,13 @@ async function checkAliases(
// error during dispose should not trigger a deletion
}
} catch (error) {
logWarn('missing or broken alias target', { target, path, error })
logWarn('missing or broken alias target', { alias, target, error })
if (remove) {
try {
await VhdAbstract.unlink(handler, path)
await VhdAbstract.unlink(handler, alias)
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { target, path, error })
logWarn('error deleting alias target', { alias, target, error })
}
}
}
@@ -168,17 +160,17 @@ async function checkAliases(
aliasFound.push(resolve('/', target))
}
const entries = await handler.list(targetDataRepository, {
const vhds = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
logWarn('no alias references VHD', { entry })
await asyncMap(vhds, async path => {
if (!aliasFound.includes(path)) {
logWarn('no alias references VHD', { path })
if (remove) {
logInfo('deleting unaliased VHD')
await VhdAbstract.unlink(handler, entry)
logInfo('deleting unused VHD', { path })
await VhdAbstract.unlink(handler, path)
}
}
})
@@ -190,17 +182,26 @@ const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, logInfo = noop, logWarn = console.warn }
{
fixMetadata,
remove,
merge,
mergeBlockConcurrency,
mergeLimiter = defaultMergeLimiter,
logInfo = noop,
logWarn = console.warn,
}
) {
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
const handler = this._handler
const vhdsToJSons = new Set()
const vhdById = new Map()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
// remove broken VHDs
await asyncMap(vhds, async path => {
@@ -218,12 +219,31 @@ exports.cleanVm = async function cleanVm(
}
vhdChildren[parent] = path
}
// Detect VHDs with the same UUIDs
//
// Due to a bug introduced in a1bcd35e2
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
let vhdKept = vhd
if (duplicate !== undefined) {
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
if (duplicate.containsAllDataOf(vhd)) {
logWarn(`should delete ${path}`)
vhdKept = duplicate
vhds.delete(path)
} else if (vhd.containsAllDataOf(duplicate)) {
logWarn(`should delete ${duplicate._path}`)
vhds.delete(duplicate._path)
} else {
logWarn('same ids but different content')
}
}
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
})
} catch (error) {
vhds.delete(path)
logWarn('VHD check error', { path, error })
if (error?.code === 'ERR_ASSERTION' && remove) {
logInfo('deleting broken path', { path })
logInfo('deleting broken VHD', { path })
return VhdAbstract.unlink(handler, path)
}
}
@@ -232,7 +252,7 @@ exports.cleanVm = async function cleanVm(
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const statePath = interruptedVhds.get(interruptedVhd)
const { statePath } = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
logWarn('orphan merge state', {
@@ -271,9 +291,9 @@ exports.cleanVm = async function cleanVm(
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
logWarn('parent VHD is missing', { parent, vhdPath })
logWarn('parent VHD is missing', { parent, child: vhdPath })
if (remove) {
logInfo('deleting orphan VHD', { vhdPath })
logInfo('deleting orphan VHD', { path: vhdPath })
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
@@ -291,6 +311,7 @@ exports.cleanVm = async function cleanVm(
}
const jsons = new Set()
let mustInvalidateCache = false
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -324,7 +345,7 @@ exports.cleanVm = async function cleanVm(
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
logWarn('failed to read metadata file', { json, error })
logWarn('failed to read backup metadata', { path: json, error })
jsons.delete(json)
return
}
@@ -335,10 +356,11 @@ exports.cleanVm = async function cleanVm(
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
logWarn('metadata XVA is missing', { json })
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
if (remove) {
logInfo('deleting incomplete backup', { json })
logInfo('deleting incomplete backup', { path: json })
jsons.delete(json)
mustInvalidateCache = true
await handler.unlink(json)
}
}
@@ -358,9 +380,10 @@ exports.cleanVm = async function cleanVm(
vhdsToJSons[path] = json
})
} else {
logWarn('some metadata VHDs are missing', { json, missingVhds })
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
if (remove) {
logInfo('deleting incomplete backup', { json })
logInfo('deleting incomplete backup', { path: json })
mustInvalidateCache = true
jsons.delete(json)
await handler.unlink(json)
}
@@ -372,7 +395,7 @@ exports.cleanVm = async function cleanVm(
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
// ancestor
const vhdChainsToMerge = { __proto__: null }
@@ -396,14 +419,14 @@ exports.cleanVm = async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.push(vhd)
chain.unshift(vhd)
return chain
}
}
logWarn('unused VHD', { vhd })
logWarn('unused VHD', { path: vhd })
if (remove) {
logInfo('deleting unused VHD', { vhd })
logInfo('deleting unused VHD', { path: vhd })
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -414,7 +437,13 @@ exports.cleanVm = async function cleanVm(
// merge interrupted VHDs
for (const parent of interruptedVhds.keys()) {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
// before #6349 the chain wasn't in the mergeState
const { chain, statePath } = interruptedVhds.get(parent)
if (chain === undefined) {
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
} else {
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
}
}
Object.values(vhdChainsToMerge).forEach(chain => {
@@ -427,9 +456,15 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(chain, { handler, logInfo, logWarn, remove, merge })
const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
@@ -472,7 +507,11 @@ exports.cleanVm = async function cleanVm(
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
fileSystemSize = await handler.getSize(linkedXva)
try {
fileSystemSize = await handler.getSize(linkedXva)
} catch (error) {
// can fail with encrypted remote
}
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
@@ -484,11 +523,15 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
logWarn('incorrect size in metadata', { size: size ?? 'none', fileSystemSize })
logWarn('incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
}
} catch (error) {
logWarn('failed to get metadata size', { metadataPath, error })
logWarn('failed to get backup size', { backup: metadataPath, error })
return
}
@@ -498,11 +541,16 @@ exports.cleanVm = async function cleanVm(
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
logWarn('metadata size update failed', { metadataPath, error })
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
}
}
})
// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
await handler.unlink(vmDir + '/cache.json.gz')
}
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,

View File

@@ -1,12 +1,12 @@
'use strict'
const compareVersions = require('compare-versions')
const find = require('lodash/find.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
const { compareVersions } = require('compare-versions')
const { createVhdStreamWithLength } = require('vhd-lib')
const { defer } = require('golike-defer')

View File

@@ -3,6 +3,8 @@
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
// create a new readable stream from an existing one which may be piped later
//
// in case of error in the new readable stream, it will simply be unpiped
@@ -11,18 +13,23 @@ exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
debug('forking', { forks: stream.forks })
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
debug('error on original stream, destroying fork', { error })
proxy.destroy(error)
}
})
eos(proxy, _ => {
stream.forks--
eos(proxy, error => {
debug('end of stream, unpiping', { error, forks: --stream.forks })
stream.unpipe(proxy)
if (stream.forks === 0) {
debug('no more forks, destroying original stream')
stream.destroy(new Error('no more consumers for this stream'))
}
})

View File

@@ -49,6 +49,11 @@ const isValidTar = async (handler, size, fd) => {
// TODO: find an heuristic for compressed files
async function isValidXva(path) {
const handler = this._handler
// size is longer when encrypted + reading part of an encrypted file is not implemented
if (handler.isEncrypted) {
return true
}
try {
const fd = await handler.openFile(path, 'r')
try {
@@ -66,7 +71,6 @@ async function isValidXva(path) {
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}

View File

@@ -14,12 +14,14 @@
## File structure on remote
### with vhd files
```
<remote>
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ index.json // TODO
├─ cache.json.gz
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
@@ -30,6 +32,31 @@
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
### with vhd directories
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
```
<vdis>/<job UUID>/<VDI UUID>
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
└─ data
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
└─ <uuid>.vhd
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.
Add the following file: `xo-vm-backups/<VM UUID>/cache.json.gz`.
This cache is compressed in Gzip and contains an JSON object with the metadata for all the backups of this VM indexed by their absolute path (i.e. `/xo-vm-backups/<VM UUID>/<timestamp>.json`).
This file is generated on demande when listing the backups, and directly updated on backup creation/deletion.
In case any incoherence is detected, the file is deleted so it will be fully generated when required.
## Attributes
### Of created snapshots

View File

@@ -64,7 +64,7 @@ const main = Disposable.wrap(async function* main(args) {
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {

View File

@@ -8,7 +8,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.25.0",
"version": "0.27.4",
"engines": {
"node": ">=14.6"
},
@@ -16,16 +16,18 @@
"postversion": "npm publish --access public"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@vates/fuse-vhd": "^0.0.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
"compare-versions": "^5.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"end-of-stream": "^1.4.4",
@@ -37,8 +39,8 @@
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^8.3.2",
"vhd-lib": "^3.2.0",
"uuid": "^9.0.0",
"vhd-lib": "^4.0.1",
"yazl": "^2.5.1"
},
"devDependencies": {
@@ -46,7 +48,7 @@
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^1.2.0"
"@xen-orchestra/xapi": "^1.4.2"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -19,8 +19,6 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
@@ -38,6 +36,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
@@ -71,35 +70,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
return this._cleanVm({ merge: true })
}
healthCheck(sr) {
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
@@ -189,7 +159,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataContent = {
jobId,
mode: job.mode,
@@ -235,6 +204,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
})
if (isDelta) {
@@ -254,9 +224,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
})
metadataContent.size = size
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
// TODO: run cleanup?
}

View File

@@ -34,7 +34,6 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup
const adapter = this._adapter
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -74,9 +73,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
if (!deleteFirst) {
await deleteOldBackups()

View File

@@ -3,10 +3,13 @@
const { createLogger } = require('@xen-orchestra/log')
const { join } = require('path')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const MergeWorker = require('../merge-worker/index.js')
const assert = require('assert')
const { formatFilenameDate } = require('../_filenameDate.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { Task } = require('../Task.js')
const MergeWorker = require('../merge-worker/index.js')
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
@@ -36,6 +39,7 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
Task.warning(message, data)
},
lock: false,
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
})
})
} catch (error) {
@@ -71,6 +75,39 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
}
healthCheck(sr) {
assert.notStrictEqual(
this._metadataFileName,
undefined,
'Metadata file name should be defined before making a healthcheck'
)
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
}

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^1.2.1"
"xen-api": "^1.2.2"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -0,0 +1,19 @@
## metadata files
- Older remotes dont have any metadata file
- Remote used since 5.75 have two files : encryption.json and metadata.json
The metadata files are checked by the sync() method. If the check fails it MUST throw an error and dismount.
If the remote is empty, the `sync` method creates them
### encryption.json
A non encrypted file contain the algorithm and parameters used for this remote.
This MUST NOT contains the key.
### metadata.json
An encrypted JSON file containing the settings of a remote. Today this is an empty JSON file ( `{random: <randomuuid>}` ), it serves to check if the encryption key set in the remote is valid, but in the future will be able to store some remote settings to ease disaster recovery.
If this file can't be read (decrypted, decompressed, .. ), that means that the remote settings have been updated. If the remote is empty, update the `encryption.json` and `metadata.json` files , else raise an error.

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "1.0.3",
"version": "3.1.0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -17,18 +17,18 @@
"xo-fs": "./cli.js"
},
"engines": {
"node": ">=14"
"node": ">=14.13"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.54.0",
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
"@aws-sdk/node-http-handler": "^3.54.0",
"@marsaud/smb2": "^0.18.0",
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^0.1.0",
"@vates/async-each": "^1.0.0",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/read-chunk": "^1.0.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"bind-property-descriptor": "^2.0.0",
@@ -40,9 +40,10 @@
"lodash": "^4.17.4",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"readable-stream": "^3.0.6",
"pumpify": "^2.0.1",
"readable-stream": "^4.1.0",
"through2": "^4.0.2",
"xo-remote-parser": "^0.8.0"
"xo-remote-parser": "^0.9.1"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -50,7 +51,6 @@
"@babel/plugin-proposal-decorators": "^7.1.6",
"@babel/plugin-proposal-function-bind": "^7.0.0",
"@babel/preset-env": "^7.8.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"dotenv": "^16.0.0",
@@ -68,5 +68,9 @@
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"exports": {
".": "./dist/index.js",
"./path": "./dist/path.js"
}
}

View File

@@ -0,0 +1,71 @@
const { readChunk } = require('@vates/read-chunk')
const crypto = require('crypto')
const pumpify = require('pumpify')
function getEncryptor(key) {
if (key === undefined) {
return {
id: 'NULL_ENCRYPTOR',
algorithm: 'none',
key: 'none',
ivLength: 0,
encryptData: buffer => buffer,
encryptStream: stream => stream,
decryptData: buffer => buffer,
decryptStream: stream => stream,
}
}
const algorithm = 'aes-256-cbc'
const ivLength = 16
function encryptStream(input) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = pumpify(input, cipher)
encrypted.unshift(iv)
return encrypted
}
async function decryptStream(encryptedStream) {
const iv = await readChunk(encryptedStream, ivLength)
const cipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
/**
* WARNING
*
* the crytped size has an initializtion vector + a padding at the end
* whe can't predict the decrypted size from the start of the encrypted size
* thus, we can't set decrypted.length reliably
*
*/
return pumpify(encryptedStream, cipher)
}
function encryptData(buffer) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = cipher.update(buffer)
return Buffer.concat([iv, encrypted, cipher.final()])
}
function decryptData(buffer) {
const iv = buffer.slice(0, ivLength)
const encrypted = buffer.slice(ivLength)
const decipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
const decrypted = decipher.update(encrypted)
return Buffer.concat([decrypted, decipher.final()])
}
return {
id: algorithm,
algorithm,
key,
ivLength,
encryptData,
encryptStream,
decryptData,
decryptStream,
}
}
exports._getEncryptor = getEncryptor

View File

@@ -1,4 +1,5 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import assert from 'assert'
import getStream from 'get-stream'
import { coalesceCalls } from '@vates/coalesce-calls'
import { createLogger } from '@xen-orchestra/log'
@@ -6,13 +7,14 @@ import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parse } from 'xo-remote-parser'
import { pipeline } from 'stream'
import { randomBytes } from 'crypto'
import { randomBytes, randomUUID } from 'crypto'
import { synchronized } from 'decorator-synchronized'
import { basename, dirname, normalize as normalizePath } from './_path'
import { basename, dirname, normalize as normalizePath } from './path'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
import { _getEncryptor } from './_encryptor'
const { warn } = createLogger('@xen-orchestra:fs')
const { info, warn } = createLogger('@xen-orchestra:fs')
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime, size) => {
@@ -23,6 +25,9 @@ const computeRate = (hrtime, size) => {
const DEFAULT_TIMEOUT = 6e5 // 10 min
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
const ENCRYPTION_DESC_FILENAME = 'encryption.json'
const ENCRYPTION_METADATA_FILENAME = 'metadata.json'
const ignoreEnoent = error => {
if (error == null || error.code !== 'ENOENT') {
throw error
@@ -63,6 +68,7 @@ class PrefixWrapper {
}
export default class RemoteHandlerAbstract {
_encryptor
constructor(remote, options = {}) {
if (remote.url === 'test://') {
this._remote = remote
@@ -73,6 +79,7 @@ export default class RemoteHandlerAbstract {
}
}
;({ highWaterMark: this._highWaterMark, timeout: this._timeout = DEFAULT_TIMEOUT } = options)
this._encryptor = _getEncryptor(this._remote.encryptionKey)
const sharedLimit = limitConcurrency(options.maxParallelOperations ?? DEFAULT_MAX_PARALLEL_OPERATIONS)
this.closeFile = sharedLimit(this.closeFile)
@@ -111,90 +118,51 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
// TODO: remove method
async createOutputStream(file, { checksum = false, dirMode, ...options } = {}) {
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (options.end !== undefined || options.start !== undefined) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
}
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout.call(
this._createOutputStream(file, {
dirMode,
flags: 'wx',
...options,
}),
let stream = await timeout.call(
this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }),
this._timeout
)
if (!checksum) {
return streamP
}
// detect early errors
await fromEvent(stream, 'readable')
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
if (checksum) {
try {
const path = typeof file === 'string' ? file : file.path
const checksum = await this._readFile(checksumFile(path), { flags: 'r' })
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
.catch(forwardError)
return checksumStream
}
createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout
.call(this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }), this._timeout)
.then(stream => {
// detect early errors
let promise = fromEvent(stream, 'readable')
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
promise = Promise.all([
promise,
ignoreErrors.call(
this._getSize(file).then(size => {
stream.length = size
})
),
])
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
} catch (error) {
if (!(ignoreMissingChecksum && error.code === 'ENOENT')) {
throw error
}
return promise.then(() => stream)
})
if (!checksum) {
return streamP
}
// avoid a unhandled rejection warning
ignoreErrors.call(streamP)
return this._readFile(checksumFile(path), { flags: 'r' }).then(
checksum =>
streamP.then(stream => {
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
return stream
}),
error => {
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
return streamP
}
throw error
}
)
}
if (this.isEncrypted) {
stream = this._encryptor.decryptStream(stream)
} else {
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
try {
stream.length = await this._getSize(file)
} catch (error) {
// ignore errors
}
}
}
return stream
}
/**
@@ -210,6 +178,8 @@ export default class RemoteHandlerAbstract {
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
path = normalizePath(path)
let checksumStream
input = this._encryptor.encryptStream(input)
if (checksum) {
checksumStream = createChecksumStream()
pipeline(input, checksumStream, noop)
@@ -220,6 +190,8 @@ export default class RemoteHandlerAbstract {
validator,
})
if (checksum) {
// using _outpuFile means the checksum will NOT be encrypted
// it is by design to allow checking of encrypted files without the key
await this._outputFile(checksumFile(path), await checksumStream.checksum, { dirMode, flags: 'wx' })
}
}
@@ -239,8 +211,13 @@ export default class RemoteHandlerAbstract {
return timeout.call(this._getInfo(), this._timeout)
}
// when using encryption, the file size is aligned with the encryption block size ( 16 bytes )
// that means that the size will be 1 to 16 bytes more than the content size + the initialized vector length (16 bytes)
async getSize(file) {
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
return size - this._encryptor.ivLength
}
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
@@ -286,15 +263,18 @@ export default class RemoteHandlerAbstract {
}
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
await this._outputFile(normalizePath(file), data, { dirMode, flags })
const encryptedData = this._encryptor.encryptData(data)
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
}
async read(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of an encrypted file ${file}`)
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async readFile(file, { flags = 'r' } = {}) {
return this._readFile(normalizePath(file), { flags })
const data = await this._readFile(normalizePath(file), { flags })
return this._encryptor.decryptData(data)
}
async rename(oldPath, newPath, { checksum = false } = {}) {
@@ -334,6 +314,61 @@ export default class RemoteHandlerAbstract {
@synchronized()
async sync() {
await this._sync()
try {
await this._checkMetadata()
} catch (error) {
await this._forget()
throw error
}
}
async _canWriteMetadata() {
const list = await this.list('/', {
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
})
return list.length === 0
}
async _createMetadata() {
await Promise.all([
this._writeFile(
normalizePath(ENCRYPTION_DESC_FILENAME),
JSON.stringify({ algorithm: this._encryptor.algorithm }),
{
flags: 'w',
}
), // not encrypted
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
])
}
async _checkMetadata() {
try {
// this file is not encrypted
const data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME))
JSON.parse(data)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
try {
// this file is encrypted
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME)
JSON.parse(data)
} catch (error) {
if (error.code === 'ENOENT' || (await this._canWriteMetadata())) {
info('will update metadata of this remote')
return this._createMetadata()
}
warn(
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
{ error }
)
// will probably send a ERR_OSSL_EVP_BAD_DECRYPT if key is incorrect
throw error
}
}
async test() {
@@ -387,11 +422,13 @@ export default class RemoteHandlerAbstract {
}
async write(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't write part of a file with encryption ${file}`)
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async writeFile(file, data, { flags = 'wx' } = {}) {
await this._writeFile(normalizePath(file), data, { flags })
const encryptedData = this._encryptor.encryptData(data)
await this._writeFile(normalizePath(file), encryptedData, { flags })
}
// Methods that can be called by private methods to avoid parallel limit on public methods
@@ -424,6 +461,10 @@ export default class RemoteHandlerAbstract {
// Methods that can be implemented by inheriting classes
useVhdDirectory() {
return this._remote.useVhdDirectory ?? false
}
async _closeFile(fd) {
throw new Error('Not implemented')
}
@@ -506,9 +547,13 @@ export default class RemoteHandlerAbstract {
async _outputStream(path, input, { dirMode, validator }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await this.createOutputStream(tmpPath, {
dirMode,
})
const output = await timeout.call(
this._createOutputStream(tmpPath, {
dirMode,
flags: 'wx',
}),
this._timeout
)
try {
await fromCallback(pipeline, input, output)
if (validator !== undefined) {
@@ -591,6 +636,10 @@ export default class RemoteHandlerAbstract {
async _writeFile(file, data, options) {
throw new Error('Not implemented')
}
get isEncrypted() {
return this._encryptor.id !== 'NULL_ENCRYPTOR'
}
}
function createPrefixWrapperMethods() {

View File

@@ -30,18 +30,6 @@ describe('closeFile()', () => {
})
})
describe('createOutputStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createOutputStream: () => new Promise(() => {}),
})
const promise = testHandler.createOutputStream('File')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({

View File

@@ -1,10 +1,7 @@
/* eslint-env jest */
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { tmpdir } from 'os'
import { getHandler } from '.'
@@ -27,9 +24,6 @@ const unsecureRandomBytes = n => {
const TEST_DATA_LEN = 1024
const TEST_DATA = unsecureRandomBytes(TEST_DATA_LEN)
const createTestDataStream = asyncIteratorToStream(function* () {
yield TEST_DATA
})
const rejectionOf = p =>
p.then(
@@ -82,14 +76,6 @@ handlers.forEach(url => {
})
})
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})
describe('#getInfo()', () => {
let info
beforeAll(async () => {

View File

@@ -5,7 +5,6 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerS3 from './s3'
import RemoteHandlerSmb from './smb'
import RemoteHandlerSmbMount from './smb-mount'
const HANDLERS = {
file: RemoteHandlerLocal,
@@ -15,10 +14,8 @@ const HANDLERS = {
try {
execa.sync('mount.cifs', ['-V'])
HANDLERS.smb = RemoteHandlerSmbMount
} catch (_) {
HANDLERS.smb = RemoteHandlerSmb
}
} catch (_) {}
export const getHandler = (remote, ...rest) => {
const Handler = HANDLERS[parse(remote.url).type]

View File

@@ -1,13 +1,38 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
import lockfile from 'proper-lockfile'
import { createLogger } from '@xen-orchestra/log'
import { fromEvent, retry } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
const { info, warn } = createLogger('xo:fs:local')
// save current stack trace and add it to any rejected error
//
// This is especially useful when the resolution is separate from the initial
// call, which is often the case with RPC libs.
//
// There is a perf impact and it should be avoided in production.
async function addSyncStackTrace(fn, ...args) {
const stackContainer = new Error()
try {
return await fn.apply(this, args)
} catch (error) {
error.syncStack = stackContainer.stack
throw error
}
}
function dontAddSyncStackTrace(fn, ...args) {
return fn.apply(this, args)
}
export default class LocalHandler extends RemoteHandlerAbstract {
constructor(remote, opts = {}) {
super(remote)
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : dontAddSyncStackTrace
this._retriesOnEagain = {
delay: 1e3,
retries: 9,
@@ -30,17 +55,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {
return fs.close(fd)
return this._addSyncStackTrace(fs.close, fd)
}
async _copy(oldPath, newPath) {
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
return this._addSyncStackTrace(fs.copy, this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _createReadStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createReadStream(this._getFilePath(file), options)
await fromEvent(stream, 'open')
await this._addSyncStackTrace(fromEvent, stream, 'open')
return stream
}
return fs.createReadStream('', {
@@ -53,7 +78,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
async _createWriteStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createWriteStream(this._getFilePath(file), options)
await fromEvent(stream, 'open')
await this._addSyncStackTrace(fromEvent, stream, 'open')
return stream
}
return fs.createWriteStream('', {
@@ -79,71 +104,98 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _getSize(file) {
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
const stats = await this._addSyncStackTrace(fs.stat, this._getFilePath(typeof file === 'string' ? file : file.path))
return stats.size
}
async _list(dir) {
return fs.readdir(this._getFilePath(dir))
return this._addSyncStackTrace(fs.readdir, this._getFilePath(dir))
}
_lock(path) {
return lockfile.lock(this._getFilePath(path))
async _lock(path) {
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
async onCompromised(error) {
warn('lock compromised', { error })
try {
release = await acquire()
info('compromised lock was reacquired')
} catch (error) {
warn('compromised lock could not be reacquired', { error })
}
},
})
let release = await this._addSyncStackTrace(acquire)
return async () => {
try {
await this._addSyncStackTrace(release)
} catch (error) {
warn('lock could not be released', { error })
}
}
}
_mkdir(dir, { mode }) {
return fs.mkdir(this._getFilePath(dir), { mode })
return this._addSyncStackTrace(fs.mkdir, this._getFilePath(dir), { mode })
}
async _openFile(path, flags) {
return fs.open(this._getFilePath(path), flags)
return this._addSyncStackTrace(fs.open, this._getFilePath(path), flags)
}
async _read(file, buffer, position) {
const needsClose = typeof file === 'string'
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
file = needsClose ? await this._addSyncStackTrace(fs.open, this._getFilePath(file), 'r') : file.fd
try {
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
return await this._addSyncStackTrace(
fs.read,
file,
buffer,
0,
buffer.length,
position === undefined ? null : position
)
} finally {
if (needsClose) {
await fs.close(file)
await this._addSyncStackTrace(fs.close, file)
}
}
}
async _readFile(file, options) {
const filePath = this._getFilePath(file)
return await retry(() => fs.readFile(filePath, options), this._retriesOnEagain)
return await this._addSyncStackTrace(retry, () => fs.readFile(filePath, options), this._retriesOnEagain)
}
async _rename(oldPath, newPath) {
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
return this._addSyncStackTrace(fs.rename, this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _rmdir(dir) {
return fs.rmdir(this._getFilePath(dir))
return this._addSyncStackTrace(fs.rmdir, this._getFilePath(dir))
}
async _sync() {
const path = this._getRealPath('/')
await fs.ensureDir(path)
await fs.access(path, fs.R_OK | fs.W_OK)
await this._addSyncStackTrace(fs.ensureDir, path)
await this._addSyncStackTrace(fs.access, path, fs.R_OK | fs.W_OK)
}
_truncate(file, len) {
return fs.truncate(this._getFilePath(file), len)
return this._addSyncStackTrace(fs.truncate, this._getFilePath(file), len)
}
async _unlink(file) {
const filePath = this._getFilePath(file)
return await retry(() => fs.unlink(filePath), this._retriesOnEagain)
return await this._addSyncStackTrace(retry, () => fs.unlink(filePath), this._retriesOnEagain)
}
_writeFd(file, buffer, position) {
return fs.write(file.fd, buffer, 0, buffer.length, position)
return this._addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
return this._addSyncStackTrace(fs.writeFile, this._getFilePath(file), data, { flag: flags })
}
}

View File

@@ -1,6 +1,6 @@
import path from 'path'
const { basename, dirname, join, resolve, sep } = path.posix
const { basename, dirname, join, resolve, relative, sep } = path.posix
export { basename, dirname, join }
@@ -19,3 +19,6 @@ export function split(path) {
return parts
}
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -27,7 +27,7 @@ import copyStreamToBuffer from './_copyStreamToBuffer.js'
import createBufferFromStream from './_createBufferFromStream.js'
import guessAwsRegion from './_guessAwsRegion.js'
import RemoteHandlerAbstract from './abstract'
import { basename, join, split } from './_path'
import { basename, join, split } from './path'
import { asyncEach } from '@vates/async-each'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
@@ -155,6 +155,14 @@ export default class S3Handler extends RemoteHandlerAbstract {
if (e.name === 'EntityTooLarge') {
return this._multipartCopy(oldPath, newPath)
}
// normalize this error code
if (e.name === 'NoSuchKey') {
const error = new Error(`ENOENT: no such file or directory '${oldPath}'`)
error.cause = e
error.code = 'ENOENT'
error.path = oldPath
throw error
}
throw e
}
}
@@ -525,4 +533,8 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {}
useVhdDirectory() {
return true
}
}

View File

@@ -1,23 +0,0 @@
import { parse } from 'xo-remote-parser'
import MountHandler from './_mount'
import { normalize } from './_path'
export default class SmbMountHandler extends MountHandler {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
}
get type() {
return 'smb'
}
}

View File

@@ -1,163 +1,23 @@
import Smb2 from '@marsaud/smb2'
import { parse } from 'xo-remote-parser'
import RemoteHandlerAbstract from './abstract'
import MountHandler from './_mount'
import { normalize } from './path'
// Normalize the error code for file not found.
const wrapError = (error, code) => ({
__proto__: error,
cause: error,
code,
})
const normalizeError = (error, shouldBeDirectory) => {
const { code } = error
throw code === 'STATUS_DIRECTORY_NOT_EMPTY'
? wrapError(error, 'ENOTEMPTY')
: code === 'STATUS_FILE_IS_A_DIRECTORY'
? wrapError(error, 'EISDIR')
: code === 'STATUS_NOT_A_DIRECTORY'
? wrapError(error, 'ENOTDIR')
: code === 'STATUS_OBJECT_NAME_NOT_FOUND' || code === 'STATUS_OBJECT_PATH_NOT_FOUND'
? wrapError(error, 'ENOENT')
: code === 'STATUS_OBJECT_NAME_COLLISION'
? wrapError(error, 'EEXIST')
: code === 'STATUS_NOT_SUPPORTED' || code === 'STATUS_INVALID_PARAMETER'
? wrapError(error, shouldBeDirectory ? 'ENOTDIR' : 'EISDIR')
: error
}
const normalizeDirError = error => normalizeError(error, true)
export default class SmbHandler extends RemoteHandlerAbstract {
export default class SmbHandler extends MountHandler {
constructor(remote, opts) {
super(remote, opts)
// defined in _sync()
this._client = undefined
const prefix = this._remote.path
this._prefix = prefix !== '' ? prefix + '\\' : prefix
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
}
get type() {
return 'smb'
}
_getFilePath(file) {
return this._prefix + (typeof file === 'string' ? file : file.path).slice(1).replace(/\//g, '\\')
}
_dirname(file) {
const parts = file.split('\\')
parts.pop()
return parts.join('\\')
}
_closeFile(file) {
return this._client.close(file).catch(normalizeError)
}
_createReadStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createReadStream(file, options).catch(normalizeError)
}
_createWriteStream(file, options) {
if (typeof file === 'string') {
file = this._getFilePath(file)
} else {
options = { autoClose: false, ...options, fd: file.fd }
file = ''
}
return this._client.createWriteStream(file, options).catch(normalizeError)
}
_forget() {
const client = this._client
this._client = undefined
return client.disconnect()
}
_getSize(file) {
return this._client.getSize(this._getFilePath(file)).catch(normalizeError)
}
_list(dir) {
return this._client.readdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_mkdir(dir, { mode }) {
return this._client.mkdir(this._getFilePath(dir), mode).catch(normalizeDirError)
}
// TODO: add flags
_openFile(path, flags) {
return this._client.open(this._getFilePath(path), flags).catch(normalizeError)
}
async _read(file, buffer, position) {
const client = this._client
const needsClose = typeof file === 'string'
file = needsClose ? await client.open(this._getFilePath(file)) : file.fd
try {
return await client.read(file, buffer, 0, buffer.length, position)
} catch (error) {
normalizeError(error)
} finally {
if (needsClose) {
await client.close(file)
}
}
}
_readFile(file, options) {
return this._client.readFile(this._getFilePath(file), options).catch(normalizeError)
}
_rename(oldPath, newPath) {
return this._client
.rename(this._getFilePath(oldPath), this._getFilePath(newPath), {
replace: true,
})
.catch(normalizeError)
}
_rmdir(dir) {
return this._client.rmdir(this._getFilePath(dir)).catch(normalizeDirError)
}
_sync() {
const remote = this._remote
this._client = new Smb2({
share: `\\\\${remote.host}`,
domain: remote.domain,
username: remote.username,
password: remote.password,
autoCloseTimeout: 0,
})
// Check access (smb2 does not expose connect in public so far...)
return this.list('.')
}
_truncate(file, len) {
return this._client.truncate(this._getFilePath(file), len).catch(normalizeError)
}
_unlink(file) {
return this._client.unlink(this._getFilePath(file)).catch(normalizeError)
}
_writeFd(file, buffer, position) {
return this._client.write(file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, options) {
return this._client.writeFile(this._getFilePath(file), data, options).catch(normalizeError)
}
}

View File

@@ -30,20 +30,41 @@ export default class Hooks extends EventEmitter {
// Run *start* async listeners.
//
// They initialize the application.
//
// *startCore* is automatically called if necessary.
async start() {
assert.strictEqual(this._status, 'stopped')
if (this._status === 'stopped') {
await this.startCore()
} else {
assert.strictEqual(this._status, 'core started')
}
this._status = 'starting'
await runHook(this, 'start')
this.emit((this._status = 'started'))
}
// Run *stop* async listeners.
// Run *start core* async listeners.
//
// They initialize core features of the application (connect to databases,
// etc.) and should be fast and side-effects free.
async startCore() {
assert.strictEqual(this._status, 'stopped')
this._status = 'starting core'
await runHook(this, 'start core')
this.emit((this._status = 'core started'))
}
// Run *stop* async listeners if necessary and *stop core* listeners.
//
// They close connections, unmount file systems, save states, etc.
async stop() {
assert.strictEqual(this._status, 'started')
this._status = 'stopping'
await runHook(this, 'stop')
if (this._status !== 'core started') {
assert.strictEqual(this._status, 'started')
this._status = 'stopping'
await runHook(this, 'stop')
this._status = 'core started'
}
await runHook(this, 'stop core')
this.emit((this._status = 'stopped'))
}
}

View File

@@ -40,7 +40,7 @@ export default class HttpProxy {
this.#app = app
const events = new EventListenersManager(httpServer)
app.config.watch('http.proxy.enabled', (enabled = false) => {
app.config.watch('http.proxy.enabled', (enabled = true) => {
events.removeAll()
if (enabled) {
events.add('connect', this.#handleConnect.bind(this)).add('request', this.#handleRequest.bind(this))

View File

@@ -0,0 +1,214 @@
import { createLogger } from '@xen-orchestra/log'
import { createSecureContext } from 'tls'
import { dirname } from 'node:path'
import { X509Certificate } from 'node:crypto'
import acme from 'acme-client'
import fs from 'node:fs/promises'
import get from 'lodash/get.js'
const { debug, info, warn } = createLogger('xo:mixins:sslCertificate')
acme.setLogger(message => {
debug(message)
})
// - create any missing parent directories
// - replace existing files
// - secure permissions (read-only for the owner)
async function outputFile(path, content) {
await fs.mkdir(dirname(path), { recursive: true })
try {
await fs.unlink(path)
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
}
await fs.writeFile(path, content, { flag: 'wx', mode: 0o400 })
}
// from https://github.com/publishlab/node-acme-client/blob/master/examples/auto.js
class SslCertificate {
#cert
#challengeCreateFn
#challengeRemoveFn
#delayBeforeRenewal = 30 * 24 * 60 * 60 * 1000 // 30 days
#secureContext
#updateSslCertificatePromise
constructor({ challengeCreateFn, challengeRemoveFn }, cert, key) {
this.#challengeCreateFn = challengeCreateFn
this.#challengeRemoveFn = challengeRemoveFn
this.#set(cert, key)
}
get #isValid() {
const cert = this.#cert
return cert !== undefined && Date.parse(cert.validTo) > Date.now() && cert.issuer !== cert.subject
}
get #shouldBeRenewed() {
return !(this.#isValid && Date.parse(this.#cert.validTo) > Date.now() + this.#delayBeforeRenewal)
}
#set(cert, key) {
this.#cert = new X509Certificate(cert)
this.#secureContext = createSecureContext({ cert, key })
}
async getSecureContext(config) {
if (!this.#shouldBeRenewed) {
return this.#secureContext
}
if (this.#updateSslCertificatePromise === undefined) {
// not currently updating certificate
//
// ensure we only refresh certificate once at a time
//
// promise is cleaned by #updateSslCertificate itself
this.#updateSslCertificatePromise = this.#updateSslCertificate(config)
}
// old certificate is still here, return it while updating
if (this.#isValid) {
return this.#secureContext
}
return this.#updateSslCertificatePromise
}
async #save(certPath, cert, keyPath, key) {
try {
await Promise.all([outputFile(keyPath, key), outputFile(certPath, cert)])
info('new certificate generated', { cert: certPath, key: keyPath })
} catch (error) {
warn(`couldn't write let's encrypt certificates to disk `, { error })
}
}
async #updateSslCertificate(config) {
const { cert: certPath, key: keyPath, acmeEmail, acmeDomain } = config
try {
let { acmeCa = 'letsencrypt/production' } = config
if (!(acmeCa.startsWith('http:') || acmeCa.startsWith('https:'))) {
acmeCa = get(acme.directory, acmeCa.split('/'))
}
/* Init client */
const client = new acme.Client({
directoryUrl: acmeCa,
accountKey: await acme.crypto.createPrivateKey(),
})
/* Create CSR */
let [key, csr] = await acme.crypto.createCsr({
commonName: acmeDomain,
})
csr = csr.toString()
key = key.toString()
debug('Successfully generated key and csr')
/* Certificate */
const cert = await client.auto({
challengeCreateFn: this.#challengeCreateFn,
challengePriority: ['http-01'],
challengeRemoveFn: this.#challengeRemoveFn,
csr,
email: acmeEmail,
skipChallengeVerification: true,
termsOfServiceAgreed: true,
})
debug('Successfully generated certificate')
this.#set(cert, key)
// don't wait for this
this.#save(certPath, cert, keyPath, key)
return this.#secureContext
} catch (error) {
warn(`couldn't renew ssl certificate`, { acmeDomain, error })
} finally {
this.#updateSslCertificatePromise = undefined
}
}
}
export default class SslCertificates {
#app
#challenges = new Map()
#challengeHandlers = {
challengeCreateFn: (authz, challenge, keyAuthorization) => {
this.#challenges.set(challenge.token, keyAuthorization)
},
challengeRemoveFn: (authz, challenge, keyAuthorization) => {
this.#challenges.delete(challenge.token)
},
}
#handlers = new Map()
constructor(app, { httpServer }) {
// don't setup the proxy if httpServer is not present
//
// that can happen when the app is instanciated in another context like xo-server-recover-account
if (httpServer === undefined) {
return
}
const prefix = '/.well-known/acme-challenge/'
httpServer.on('request', (req, res) => {
const { url } = req
if (url.startsWith(prefix)) {
const token = url.slice(prefix.length)
this.#acmeChallendMiddleware(req, res, token)
}
})
this.#app = app
httpServer.getSecureContext = this.getSecureContext.bind(this)
}
async getSecureContext(httpsDomainName, configKey, initialCert, initialKey) {
const config = this.#app.config.get(['http', 'listen', configKey])
const handlers = this.#handlers
const { acmeDomain } = config
// not a let's encrypt protected end point, sommething changed in the configuration
if (acmeDomain === undefined) {
handlers.delete(configKey)
return
}
// server has been access with another domain, don't use the certificate
if (acmeDomain !== httpsDomainName) {
return
}
let handler = handlers.get(configKey)
if (handler === undefined) {
// register the handler for this domain
handler = new SslCertificate(this.#challengeHandlers, initialCert, initialKey)
handlers.set(configKey, handler)
}
return handler.getSecureContext(config)
}
// middleware that will serve the http challenge to let's encrypt servers
#acmeChallendMiddleware(req, res, token) {
debug('fetching challenge for token ', token)
const challenge = this.#challenges.get(token)
debug('challenge content is ', challenge)
if (challenge === undefined) {
res.statusCode = 404
res.end()
return
}
res.write(challenge)
res.end()
debug('successfully answered challenge ')
}
}

View File

@@ -10,11 +10,11 @@
## Set up
The proxy is disabled by default, to enable it, add the following lines to your config:
The proxy is enabled by default, to disable it, add the following lines to your config:
```toml
[http.proxy]
enabled = true
enabled = false
```
## Usage

View File

@@ -0,0 +1,49 @@
> This module provides [Let's Encrypt](https://letsencrypt.org/) integration to `xo-proxy` and `xo-server`.
First of all, make sure your server is listening on HTTP on port 80 and on HTTPS 443.
In `xo-server`, to avoid HTTP access, enable the redirection to HTTPs:
```toml
[http]
redirectToHttps = true
```
Your server must be reachable with the configured domain to the certificate provider (e.g. Let's Encrypt), it usually means publicly reachable.
Finally, add the following entries to your HTTPS configuration.
```toml
# Must be set to true for this feature
autoCert = true
# These entries are required and indicates where the certificate and the
# private key will be saved.
cert = 'path/to/cert.pem'
key = 'path/to/key.pem'
# ACME (e.g. Let's Encrypt, ZeroSSL) CA directory
#
# Specifies the URL to the ACME CA's directory.
#
# A identifier `provider/directory` can be passed instead of a URL, see the
# list of supported directories here: https://www.npmjs.com/package/acme-client#directory-urls
#
# Note that the application cannot detect that this value has changed.
#
# In that case delete the certificate and the key files, and restart the
# application to generate new ones.
#
# Default is 'letsencrypt/production'
acmeCa = 'zerossl/production'
# Domain for which the certificate should be created.
#
# This entry is required.
acmeDomain = 'my.domain.net'
# Optional email address which will be used for the certificate creation.
#
# It will be notified of any issues.
acmeEmail = 'admin@my.domain.net'
```

View File

@@ -14,16 +14,17 @@
"url": "https://vates.fr"
},
"license": "AGPL-3.0-or-later",
"version": "0.5.0",
"version": "0.8.0",
"engines": {
"node": ">=12"
"node": ">=15.6"
},
"dependencies": {
"@vates/event-listeners-manager": "^1.0.0",
"@vates/event-listeners-manager": "^1.0.1",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/emit-async": "^1.0.0",
"@xen-orchestra/log": "^0.3.0",
"app-conf": "^2.1.0",
"acme-client": "^5.0.0",
"app-conf": "^2.3.0",
"lodash": "^4.17.21",
"promise-toolbox": "^0.21.0"
},

View File

@@ -9,7 +9,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.1",
"version": "0.1.2",
"engines": {
"node": ">=8.10"
},
@@ -30,7 +30,7 @@
"rimraf": "^3.0.0"
},
"dependencies": {
"@vates/read-chunk": "^0.1.2"
"@vates/read-chunk": "^1.0.0"
},
"author": {
"name": "Vates SAS",

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/proxy-cli",
"version": "0.3.0",
"version": "0.3.1",
"license": "AGPL-3.0-or-later",
"description": "CLI for @xen-orchestra/proxy",
"keywords": [
@@ -26,9 +26,9 @@
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@vates/read-chunk": "^0.1.2",
"@vates/read-chunk": "^1.0.0",
"ansi-colors": "^4.1.1",
"app-conf": "^2.1.0",
"app-conf": "^2.3.0",
"content-type": "^1.0.4",
"cson-parser": "^4.0.7",
"getopts": "^2.2.3",

View File

@@ -1,6 +1,7 @@
import Config from '@xen-orchestra/mixins/Config.mjs'
import Hooks from '@xen-orchestra/mixins/Hooks.mjs'
import HttpProxy from '@xen-orchestra/mixins/HttpProxy.mjs'
import SslCertificate from '@xen-orchestra/mixins/SslCertificate.mjs'
import mixin from '@xen-orchestra/mixin'
import { createDebounceResource } from '@vates/disposable/debounceResource.js'
@@ -14,9 +15,23 @@ import ReverseProxy from './mixins/reverseProxy.mjs'
export default class App {
constructor(opts) {
mixin(this, { Api, Appliance, Authentication, Backups, Config, Hooks, HttpProxy, Logs, Remotes, ReverseProxy }, [
opts,
])
mixin(
this,
{
Api,
Appliance,
Authentication,
Backups,
Config,
Hooks,
HttpProxy,
Logs,
Remotes,
ReverseProxy,
SslCertificate,
},
[opts]
)
const debounceResource = createDebounceResource()
this.config.watchDuration('resourceCacheDelay', delay => {

View File

@@ -1,4 +1,4 @@
import { format, parse, MethodNotFound } from 'json-rpc-protocol'
import { format, parse, MethodNotFound, JsonRpcError } from 'json-rpc-protocol'
import * as errors from 'xo-common/api-errors.js'
import Ajv from 'ajv'
import asyncIteratorToStream from 'async-iterator-to-stream'
@@ -9,11 +9,26 @@ import helmet from 'koa-helmet'
import Koa from 'koa'
import once from 'lodash/once.js'
import Router from '@koa/router'
import stubTrue from 'lodash/stubTrue.js'
import Zone from 'node-zone'
import { createLogger } from '@xen-orchestra/log'
const { debug, warn } = createLogger('xo:proxy:api')
// format an error to JSON-RPC but do not hide non JSON-RPC errors
function formatError(responseId, error) {
if (error != null && typeof error.toJsonRpcError !== 'function') {
const { message, ...data } = error
// force these entries even if they are not enumerable
data.code = error.code
data.stack = error.stack
error = new JsonRpcError(error.message, undefined, data)
}
return format.error(responseId, error)
}
const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable) {
try {
let cursor, iterator
@@ -24,7 +39,7 @@ const ndJsonStream = asyncIteratorToStream(async function* (responseId, iterable
cursor = await iterator.next()
yield format.response(responseId, { $responseType: 'ndjson' }) + '\n'
} catch (error) {
yield format.error(responseId, error)
yield formatError(responseId, error)
throw error
}
@@ -63,7 +78,7 @@ export default class Api {
try {
body = parse(body)
} catch (error) {
ctx.body = format.error(null, error)
ctx.body = formatError(null, error)
return
}
@@ -77,7 +92,7 @@ export default class Api {
const { method, params } = body
warn('call error', { method, params, error })
ctx.set('Content-Type', 'application/json')
ctx.body = format.error(body.id, error)
ctx.body = formatError(body.id, error)
return
}
@@ -166,14 +181,20 @@ export default class Api {
throw errors.noSuchObject('method', name)
}
const { description, params = {} } = method
return { description, name, params }
const { description, params = {}, result = {} } = method
return { description, name, params, result }
},
{
description: 'returns the signature of an API method',
params: {
method: { type: 'string' },
},
result: {
description: { type: 'string' },
name: { type: 'string' },
params: { type: 'object' },
result: { type: 'object' },
},
},
],
},
@@ -205,40 +226,29 @@ export default class Api {
})
}
addMethod(name, method, { description, params = {} } = {}) {
addMethod(name, method, { description, params = {}, result: resultSchema } = {}) {
const methods = this._methods
if (name in methods) {
throw new Error(`API method ${name} already exists`)
}
const ajv = this._ajv
const validate = ajv.compile({
// we want additional properties to be disabled by default
additionalProperties: params['*'] || false,
const validateParams = this.#compileSchema(params)
const validateResult = this.#compileSchema(resultSchema)
properties: params,
// we want params to be required by default unless explicitly marked so
// we use property `optional` instead of object `required`
required: Object.keys(params).filter(name => {
const param = params[name]
const required = !param.optional
delete param.optional
return required
}),
type: 'object',
})
const m = params => {
if (!validate(params)) {
throw errors.invalidParameters(validate.errors)
const m = async params => {
if (!validateParams(params)) {
throw errors.invalidParameters(validateParams.errors)
}
return method(params)
const result = await method(params)
if (!validateResult(result)) {
warn('invalid API method result', { errors: validateResult.error, result })
}
return result
}
m.description = description
m.params = params
m.result = resultSchema
methods[name] = m
@@ -289,4 +299,43 @@ export default class Api {
}
return fn(params)
}
#compileSchema(schema) {
if (schema === undefined) {
return stubTrue
}
if (schema.type === undefined) {
schema = { type: 'object', properties: schema }
}
const { type } = schema
if (Array.isArray(type) ? type.includes('object') : type === 'object') {
const { properties = {} } = schema
if (schema.additionalProperties === undefined) {
const wildCard = properties['*']
if (wildCard === undefined) {
// we want additional properties to be disabled by default
schema.additionalProperties = false
} else {
delete properties['*']
schema.additionalProperties = wildCard
}
}
// we want properties to be required by default unless explicitly marked so
// we use property `optional` instead of object `required`
if (schema.required === undefined) {
schema.required = Object.keys(properties).filter(name => {
const param = properties[name]
const required = !param.optional
delete param.optional
return required
})
}
}
return this._ajv.compile(schema)
}
}

View File

@@ -407,6 +407,7 @@ export default class Backups {
debounceResource: app.debounceResource.bind(app),
dirMode: app.config.get('backups.dirMode'),
vhdDirectoryCompression: app.config.get('backups.vhdDirectoryCompression'),
useGetDiskLegacy: app.config.getOptional('backups.useGetDiskLegacy'),
})
}

View File

@@ -56,11 +56,32 @@ ${APP_NAME} v${APP_VERSION}
createSecureServer: opts => createSecureServer({ ...opts, allowHTTP1: true }),
})
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }) => {
forOwn(config.http.listen, async ({ autoCert, cert, key, ...opts }, configKey) => {
const useAcme = autoCert && opts.acmeDomain !== undefined
// don't pass these entries to httpServer.listen(opts)
for (const key of Object.keys(opts).filter(_ => _.startsWith('acme'))) {
delete opts[key]
}
try {
const niceAddress = await pRetry(
async () => {
if (cert !== undefined && key !== undefined) {
let niceAddress
if (cert !== undefined && key !== undefined) {
if (useAcme) {
opts.SNICallback = async (serverName, callback) => {
try {
// injected by mixins/SslCertificate
const secureContext = await httpServer.getSecureContext(serverName, configKey, opts.cert, opts.key)
callback(null, secureContext)
} catch (error) {
warn(error)
callback(error, null)
}
}
}
niceAddress = await pRetry(
async () => {
try {
opts.cert = fse.readFileSync(cert)
opts.key = fse.readFileSync(key)
@@ -76,20 +97,22 @@ ${APP_NAME} v${APP_VERSION}
opts.cert = pems.cert
opts.key = pems.key
}
}
return httpServer.listen(opts)
},
{
tries: 2,
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
onRetry: () => {
warn('deleting invalid certificate')
fse.unlinkSync(cert)
fse.unlinkSync(key)
return httpServer.listen(opts)
},
}
)
{
tries: 2,
when: e => autoCert && e.code === 'ERR_SSL_EE_KEY_TOO_SMALL',
onRetry: () => {
warn('deleting invalid certificate')
fse.unlinkSync(cert)
fse.unlinkSync(key)
},
}
)
} else {
niceAddress = await httpServer.listen(opts)
}
info(`Web server listening on ${niceAddress}`)
} catch (error) {
@@ -146,6 +169,7 @@ ${APP_NAME} v${APP_VERSION}
process.on(signal, () => {
if (alreadyCalled) {
warn('forced exit')
// eslint-disable-next-line n/no-process-exit
process.exit(1)
}
alreadyCalled = true
@@ -164,6 +188,7 @@ main(process.argv.slice(2)).then(
error => {
fatal(error)
// eslint-disable-next-line n/no-process-exit
process.exit(1)
}
)

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "@xen-orchestra/proxy",
"version": "0.23.2",
"version": "0.26.1",
"license": "AGPL-3.0-or-later",
"description": "XO Proxy used to remotely execute backup jobs",
"keywords": [
@@ -26,33 +26,33 @@
},
"dependencies": {
"@iarna/toml": "^2.2.0",
"@koa/router": "^10.0.0",
"@koa/router": "^12.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.25.0",
"@xen-orchestra/fs": "^1.0.3",
"@xen-orchestra/backups": "^0.27.4",
"@xen-orchestra/fs": "^3.1.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/mixin": "^0.1.0",
"@xen-orchestra/mixins": "^0.5.0",
"@xen-orchestra/mixins": "^0.8.0",
"@xen-orchestra/self-signed": "^0.1.3",
"@xen-orchestra/xapi": "^1.2.0",
"@xen-orchestra/xapi": "^1.4.2",
"ajv": "^8.0.3",
"app-conf": "^2.1.0",
"app-conf": "^2.3.0",
"async-iterator-to-stream": "^1.1.0",
"fs-extra": "^10.0.0",
"get-stream": "^6.0.0",
"getopts": "^2.2.3",
"golike-defer": "^0.5.1",
"http-server-plus": "^0.11.1",
"http-server-plus": "^0.12.0",
"http2-proxy": "^5.0.53",
"json-rpc-protocol": "^0.13.1",
"jsonrpc-websocket-client": "^0.7.2",
"koa": "^2.5.1",
"koa-compress": "^5.0.1",
"koa-helmet": "^5.1.0",
"koa-helmet": "^6.1.0",
"lodash": "^4.17.10",
"node-zone": "^0.4.0",
"parse-pairs": "^1.0.0",
@@ -60,7 +60,7 @@
"source-map-support": "^0.5.16",
"stoppable": "^1.0.6",
"xdg-basedir": "^5.1.0",
"xen-api": "^1.2.1",
"xen-api": "^1.2.2",
"xo-common": "^0.8.0"
},
"devDependencies": {

View File

@@ -1,3 +0,0 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1 +0,0 @@
../../scripts/babel-eslintrc.js

View File

@@ -1,8 +1,10 @@
import escapeRegExp from 'lodash/escapeRegExp'
'use strict'
const escapeRegExp = require('lodash/escapeRegExp')
const compareLengthDesc = (a, b) => b.length - a.length
export function compileTemplate(pattern, rules) {
exports.compileTemplate = function compileTemplate(pattern, rules) {
const matches = Object.keys(rules).sort(compareLengthDesc).map(escapeRegExp).join('|')
const regExp = new RegExp(`\\\\(?:\\\\|${matches})|${matches}`, 'g')
return (...params) =>

View File

@@ -1,5 +1,8 @@
/* eslint-env jest */
import { compileTemplate } from '.'
'use strict'
const { compileTemplate } = require('.')
it("correctly replaces the template's variables", () => {
const replacer = compileTemplate('{property}_\\{property}_\\\\{property}_{constant}_%_FOO', {

View File

@@ -14,31 +14,13 @@
"name": "Vates SAS",
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish --access public"
},
"dependencies": {
"lodash": "^4.17.15"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -43,7 +43,7 @@
"pw": "^0.0.4",
"xdg-basedir": "^4.0.0",
"xo-lib": "^0.11.1",
"xo-vmdk-to-vhd": "^2.4.1"
"xo-vmdk-to-vhd": "^2.4.3"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -0,0 +1,9 @@
'use strict'
// TODO: remove when Node >=15.0
module.exports = class AggregateError extends Error {
constructor(errors, message) {
super(message)
this.errors = errors
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@xen-orchestra/xapi",
"version": "1.2.0",
"version": "1.4.2",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/xapi",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
@@ -15,7 +15,7 @@
"node": ">=14"
},
"peerDependencies": {
"xen-api": "^1.2.1"
"xen-api": "^1.2.2"
},
"scripts": {
"postversion": "npm publish --access public"
@@ -26,9 +26,10 @@
"@xen-orchestra/log": "^0.3.0",
"d3-time-format": "^3.0.0",
"golike-defer": "^0.5.1",
"json-rpc-protocol": "^0.13.2",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^3.2.0",
"vhd-lib": "^4.0.1",
"xo-common": "^0.8.0"
},
"private": false,

View File

@@ -1,12 +1,19 @@
'use strict'
const { asyncMap, asyncMapSettled } = require('@xen-orchestra/async-map')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { VDI_FORMAT_RAW } = require('./index.js')
const { incorrectState } = require('xo-common/api-errors')
const { VDI_FORMAT_VHD } = require('./index.js')
const assert = require('node:assert').strict
const peekFooterFromStream = require('vhd-lib/peekFooterFromVhdStream')
const AggregateError = require('./_AggregateError.js')
const { warn } = require('@xen-orchestra/log').createLogger('xo:xapi:sr')
const OC_MAINTENANCE = 'xo:maintenanceState'
class Sr {
async create({
content_type = 'user', // recommended by Citrix
@@ -38,19 +45,135 @@ class Sr {
return ref
}
// Switch the SR to maintenance mode:
// - shutdown all running VMs with a VDI on this SR
// - their UUID is saved into SR.other_config[OC_MAINTENANCE].shutdownVms
// - clean shutdown is attempted, and falls back to a hard shutdown
// - unplug all connected hosts from this SR
async enableMaintenanceMode($defer, ref, { vmsToShutdown = [] } = {}) {
const state = { timestamp: Date.now() }
// will throw if already in maintenance mode
await this.call('SR.add_to_other_config', ref, OC_MAINTENANCE, JSON.stringify(state))
await $defer.onFailure.call(this, 'call', 'SR.remove_from_other_config', ref, OC_MAINTENANCE)
const runningVms = new Map()
const handleVbd = async ref => {
const vmRef = await this.getField('VBD', ref, 'VM')
if (!runningVms.has(vmRef)) {
const power_state = await this.getField('VM', vmRef, 'power_state')
const isPaused = power_state === 'Paused'
if (isPaused || power_state === 'Running') {
runningVms.set(vmRef, isPaused)
}
}
}
await asyncMap(await this.getField('SR', ref, 'VDIs'), async ref => {
await asyncMap(await this.getField('VDI', ref, 'VBDs'), handleVbd)
})
{
const runningVmUuids = await asyncMap(runningVms.keys(), ref => this.getField('VM', ref, 'uuid'))
const set = new Set(vmsToShutdown)
for (const vmUuid of runningVmUuids) {
if (!set.has(vmUuid)) {
throw incorrectState({
actual: vmsToShutdown,
expected: runningVmUuids,
property: 'vmsToShutdown',
})
}
}
}
state.shutdownVms = {}
await asyncMapSettled(runningVms, async ([ref, isPaused]) => {
state.shutdownVms[await this.getField('VM', ref, 'uuid')] = isPaused
try {
await this.callAsync('VM.clean_shutdown', ref)
} catch (error) {
warn('SR_enableMaintenanceMode, VM clean shutdown', { error })
await this.callAsync('VM.hard_shutdown', ref)
}
$defer.onFailure.call(this, 'callAsync', 'VM.start', ref, isPaused, true)
})
state.unpluggedPbds = []
await asyncMapSettled(await this.getField('SR', ref, 'PBDs'), async ref => {
if (await this.getField('PBD', ref, 'currently_attached')) {
state.unpluggedPbds.push(await this.getField('PBD', ref, 'uuid'))
await this.callAsync('PBD.unplug', ref)
$defer.onFailure.call(this, 'callAsync', 'PBD.plug', ref)
}
})
await this.setFieldEntry('SR', ref, 'other_config', OC_MAINTENANCE, JSON.stringify(state))
}
// this method is best effort and will not stop on first error
async disableMaintenanceMode(ref) {
const state = JSON.parse((await this.getField('SR', ref, 'other_config'))[OC_MAINTENANCE])
// will throw if not in maintenance mode
await this.call('SR.remove_from_other_config', ref, OC_MAINTENANCE)
const errors = []
await asyncMap(state.unpluggedPbds, async uuid => {
try {
await this.callAsync('PBD.plug', await this.call('PBD.get_by_uuid', uuid))
} catch (error) {
errors.push(error)
}
})
await asyncMap(Object.entries(state.shutdownVms), async ([uuid, isPaused]) => {
try {
await this.callAsync('VM.start', await this.call('VM.get_by_uuid', uuid), isPaused, true)
} catch (error) {
errors.push(error)
}
})
if (errors.length !== 0) {
throw new AggregateError(errors)
}
}
async importVdi(
$defer,
ref,
stream,
{ name_label = '[XO] Imported disk - ' + new Date().toISOString(), ...vdiCreateOpts } = {}
{
format = VDI_FORMAT_VHD,
name_label = '[XO] Imported disk - ' + new Date().toISOString(),
virtual_size,
...vdiCreateOpts
} = {}
) {
const footer = await peekFooterFromStream(stream)
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size: footer.currentSize })
if (virtual_size === undefined) {
if (format === VDI_FORMAT_VHD) {
const footer = await peekFooterFromStream(stream)
virtual_size = footer.currentSize
} else {
virtual_size = stream.length
assert.notEqual(virtual_size, undefined)
}
}
const vdiRef = await this.VDI_create({ ...vdiCreateOpts, name_label, SR: ref, virtual_size })
$defer.onFailure.call(this, 'callAsync', 'VDI.destroy', vdiRef)
await this.VDI_importContent(vdiRef, stream, { format: VDI_FORMAT_RAW })
await this.VDI_importContent(vdiRef, stream, { format })
return vdiRef
}
}
module.exports = Sr
decorateClass(Sr, { importVdi: defer })
decorateClass(Sr, { enableMaintenanceMode: defer, importVdi: defer })

View File

@@ -1,5 +1,6 @@
'use strict'
const assert = require('node:assert').strict
const CancelToken = require('promise-toolbox/CancelToken')
const pCatch = require('promise-toolbox/catch')
const pRetry = require('promise-toolbox/retry')
@@ -86,6 +87,8 @@ class Vdi {
}
async importContent(ref, stream, { cancelToken = CancelToken.none, format }) {
assert.notEqual(format, undefined)
if (stream.length === undefined) {
throw new Error('Trying to import a VDI without a length field. Please report this error to Xen Orchestra.')
}

View File

@@ -12,6 +12,7 @@ const { createLogger } = require('@xen-orchestra/log')
const { decorateClass } = require('@vates/decorate-with')
const { defer } = require('golike-defer')
const { incorrectState, forbiddenOperation } = require('xo-common/api-errors.js')
const { JsonRpcError } = require('json-rpc-protocol')
const { Ref } = require('xen-api')
const extractOpaqueRef = require('./_extractOpaqueRef.js')
@@ -509,6 +510,22 @@ class Vm {
}
return ref
} catch (error) {
if (
// xxhash is the new form consistency hashing in CH 8.1 which uses a faster,
// more efficient hashing algorithm to generate the consistency checks
// in order to support larger files without the consistency checking process taking an incredibly long time
error.code === 'IMPORT_ERROR' &&
error.params?.some(
param =>
param.includes('INTERNAL_ERROR') &&
param.includes('Expected to find an inline checksum') &&
param.includes('.xxhash')
)
) {
warn('import', { error })
throw new JsonRpcError('Importing this VM requires XCP-ng or Citrix Hypervisor >=8.1')
}
// augment the error with as much relevant info as possible
const [poolMaster, sr] = await Promise.all([
safeGetRecord(this, 'host', this.pool.master),

File diff suppressed because it is too large Load Diff

3913
CHANGELOG.pre-2022.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -7,12 +7,20 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [Backup/Restore file] Implement File level restore for s3 and encrypted backups (PR [#6409](https://github.com/vatesfr/xen-orchestra/pull/6409))
- [Backup] Improve listing speed by updating caches instead of regenerating them on backup creation/deletion (PR [#6411](https://github.com/vatesfr/xen-orchestra/pull/6411))
- [Backup] Add `mergeBlockConcurrency` and `writeBlockConcurrency` to allow tuning of backup resources consumptions (PR [#6416](https://github.com/vatesfr/xen-orchestra/pull/6416))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
- [VDI Import] Fix `this._getOrWaitObject is not a function`
- [VM] Attempting to delete a protected VM should display a modal with the error and the ability to bypass it (PR [#6290](https://github.com/vatesfr/xen-orchestra/pull/6290))
- [Plugin/auth-saml] Certificate input support multiline (PR [#6403](https://github.com/vatesfr/xen-orchestra/pull/6403))
- [Backup] Launch Health Check after a full backup (PR [#6401](https://github.com/vatesfr/xen-orchestra/pull/6401))
- [Backup] Fix `Lock file is already being held` error when deleting a VM backup while the VM is currently being backed up
- [Tasks] Fix the pool filter that did not display tasks even if they existed (PR [#6424](https://github.com/vatesfr/xen-orchestra/pull/6424))
- [Tasks] Fix tasks being displayed for all users (PR [#6422](https://github.com/vatesfr/xen-orchestra/pull/6422))
- [Storage/advanced] Fix the display of VDI to coalesce [#6334](https://xcp-ng.org/forum/topic/6334/coalesce-not-showing-anymore) (PR [#6429](https://github.com/vatesfr/xen-orchestra/pull/6429))
### Packages to release
@@ -30,9 +38,11 @@
<!--packages-start-->
- @vates/event-listeners-manager patch
- @vates/read-chunk major
- @xen-orchestra/xapi minor
- xo-server patch
- @vates/fuse-vhd major
- @xen-orchestra/backups minor
- vhd-lib minor
- xo-server-auth-saml patch
- xo-server minor
- xo-web minor
<!--packages-end-->

View File

@@ -4,11 +4,11 @@
We apply patches and fix security issues for the following versions:
| Version | Supported |
| ------- | ------------------ |
| XOA `latest` | :white_check_mark: |
| XOA `stable` | :white_check_mark: |
| `master` branch | :white_check_mark: |
| Version | Supported |
| --------------- | ------------------ |
| XOA `latest` | :white_check_mark: |
| XOA `stable` | :white_check_mark: |
| `master` branch | :white_check_mark: |
| anything else | :x: |
## Reporting a Vulnerability

View File

@@ -109,10 +109,13 @@ As a temporary workaround you can increase the timeout higher than the default v
:::
Create the following file:
```
/etc/xo-server/config.httpInactivityTimeout.toml
```
Add the following lines:
```
# XOA Support - Work-around HTTP timeout issue during backups
[xapiOptions]

View File

@@ -28,7 +28,7 @@ hostname = '0.0.0.0'
port = 80
```
## HTTPS
## HTTPS and certificates
XO-server can also run in HTTPS (you can run HTTP and HTTPS at the same time) - just modify what's needed in the `# Basic HTTPS` section, this time with the certificates/keys you need and their path:
@@ -68,9 +68,10 @@ You shouldn't have to change this. It's the path where `xo-web` files are served
If you use certificates signed by an in-house CA for your XCP-ng or XenServer hosts, and want to have Xen Orchestra connect to them without rejection, you can use the [`NODE_EXTRA_CA_CERTS`](https://nodejs.org/api/cli.html#cli_node_extra_ca_certs_file) environment variable.
To enable this option in your XOA, edit the `/etc/systemd/system/xo-server.service` file and add this:
To enable this option in your XOA, create `/etc/systemd/system/xo-server.service.d/ca.conf` with the following content:
```
[Service]
Environment=NODE_EXTRA_CA_CERTS=/usr/local/share/ca-certificates/my-cert.crt
```
@@ -81,6 +82,8 @@ Don't forget to reload `systemd` conf and restart `xo-server`:
# systemctl restart xo-server.service
```
> For XO Proxy, the process is almost the same except the file to create is `/etc/systemd/system/xo-proxy.service.d/ca.conf` and the service to restart is `xo-proxy.service`.
## Redis server
By default, XO-server will try to contact Redis server on `localhost`, with the port `6379`. But you can define whatever you want:

View File

@@ -24,16 +24,15 @@ Please, do explain:
The best way to propose a change to the documentation or code is
to create a [GitHub pull request](https://help.github.com/articles/using-pull-requests/).
:::tip
Your pull request should always be against the `master` branch and not against `stable` which is the stable branch!
:::
1. Create a branch for your work
2. Add a summary of your changes to `CHANGELOG.md` under the `next` section, if your changes do not relate to an existing changelog item
3. Create a pull request for this branch against the `master` branch
4. Push into the branch until the pull request is ready to merge
5. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
6. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
1. Fork the [Xen Orchestra repository](https://github.com/vatesfr/xen-orchestra) using the Fork button
2. Follow [the documentation](installation.md#from-the-sources) to install and run Xen Orchestra from the sources
3. Create a branch for your work
4. Edit the source files
5. Add a summary of your changes to `CHANGELOG.unreleased.md`, if your changes do not relate to an existing changelog item and update the list of packages that must be released to take your changes into account
6. [Create a pull request](https://github.com/vatesfr/xen-orchestra/compare) for this branch against the `master` branch
7. Push into the branch until the pull request is ready to merge
8. Avoid unnecessary merges: keep you branch up to date by regularly rebasing `git rebase origin/master`
9. When ready to merge, clean up the history (reorder commits, squash some of them together, rephrase messages): `git rebase -i origin/master`
### Issue triage

View File

@@ -143,14 +143,14 @@ curl \
## VDI Import
A VHD can be imported on an SR to create a VDI at `/rest/v0/srs/<sr uuid>/vdis`.
A VHD or a raw export can be imported on an SR to create a new VDI at `/rest/v0/srs/<sr uuid>/vdis`.
```bash
curl \
-X POST \
-b authenticationToken=KQxQdm2vMiv7jBIK0hgkmgxKzemd8wSJ7ugFGKFkTbs \
-T myDisk.vhd \
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?name_label=my_imported_VDI' \
-T myDisk.raw \
'https://xo.example.org/rest/v0/srs/357bd56c-71f9-4b2a-83b8-3451dec04b8f/vdis?raw&name_label=my_imported_VDI' \
| cat
```
@@ -162,6 +162,7 @@ The following query parameters are supported to customize the created VDI:
- `name_label`
- `name_description`
- `raw`: this parameter must be used if importing a raw export instead of a VHD
## The future

View File

@@ -15,7 +15,7 @@ Please read the [dedicated devblog on the SDN Controller](https://xen-orchestra.
:::warning
As VxLAN and GRE are protocols using extra encapsulation, they require extra bits on a network packet. If you create a Global Private Network with a default MTU at `1500`, you won't be able to use it "as is" in your VMs, unless you configure a smaller MTU for each virtual interface, in your VM operating system (eg: `1400`).
If you want something entirely transparent for your VMs, then you'll need to create a network with a MTU of `1546` for GRE or `1550` for VxLAN. However, larger MTU will require capable network equipements.
If you want something entirely transparent for your VMs, then you'll need to create a network with a MTU of `1546` for GRE or `1550` for VxLAN. However, larger MTU will require capable network equipments.
:::
### Network creation

View File

@@ -109,10 +109,6 @@ In the "Settings" then "Plugins" view, expand the SAML plugin configuration. The
Save the configuration and then activate the plugin (button on top).
:::warning
When registering your instance to your identity provider, you must configure its callback URL to `http://xo.example.net/signin/saml/callback`!
:::
### GitHub
This plugin allows GitHub users to authenticate to Xen-Orchestra.

View File

@@ -206,6 +206,23 @@ In any case, if you lose your password, you can reset the database and get the d
You can verify that your time is correctly set with the `date` command. To set XOA to your current timezone, use `sudo dpkg-reconfigure tzdata`.
## Setting a custom NTP server
By default, XOA is configured to use the standard Debian NTP servers:
```
pool 0.debian.pool.ntp.org iburst
pool 1.debian.pool.ntp.org iburst
pool 2.debian.pool.ntp.org iburst
pool 3.debian.pool.ntp.org iburst
```
If you'd like to use your own NTP server or another pool, you can make the changes directly in `/etc/ntp.conf`.
You will need to be root to edit this file (or use `sudo`). We recommend adding your custom server to the top of the list, leaving the debian server entries if possible.
For changes to take effect, you will need to restart NTP: `sudo systemctl restart ntp.service`.
## Restart the service
You can restart Xen Orchestra by accessing XOA via SSH (or console) and running `systemctl restart xo-server.service`.

View File

@@ -3,7 +3,7 @@
"@babel/core": "^7.0.0",
"@babel/eslint-parser": "^7.13.8",
"@babel/register": "^7.0.0",
"babel-jest": "^27.3.1",
"babel-jest": "^29.0.3",
"benchmark": "^2.1.4",
"deptree": "^1.0.0",
"eslint": "^8.7.0",
@@ -19,8 +19,8 @@
"globby": "^13.1.1",
"handlebars": "^4.7.6",
"husky": "^4.2.5",
"jest": "^27.3.1",
"lint-staged": "^12.0.3",
"jest": "^29.0.3",
"lint-staged": "^13.0.3",
"lodash": "^4.17.4",
"prettier": "^2.0.5",
"promise-toolbox": "^0.21.0",
@@ -77,7 +77,7 @@
"private": true,
"scripts": {
"build": "scripts/run-script.js --parallel build",
"ci": "yarn && yarn test-integration",
"ci": "yarn && yarn build && yarn test-integration",
"clean": "scripts/run-script.js --parallel clean",
"dev": "scripts/run-script.js --parallel dev",
"dev-test": "jest --bail --watch \"^(?!.*\\.integ\\.spec\\.js$)\"",

View File

@@ -1,3 +0,0 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1 +0,0 @@
../../scripts/babel-eslintrc.js

View File

@@ -0,0 +1,14 @@
'use strict'
const { parse } = require('./')
const { ast, pattern } = require('./index.fixtures')
module.exports = ({ benchmark }) => {
benchmark('parse', () => {
parse(pattern)
})
benchmark('toString', () => {
ast.toString()
})
}

View File

@@ -1,8 +1,10 @@
import * as CM from './'
'use strict'
export const pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
const CM = require('./')
export const ast = new CM.And([
exports.pattern = 'foo !"\\\\ \\"" name:|(wonderwoman batman) hasCape? age:32 chi*go /^foo\\/bar\\./i'
exports.ast = new CM.And([
new CM.String('foo'),
new CM.Not(new CM.String('\\ "')),
new CM.Property('name', new CM.Or([new CM.String('wonderwoman'), new CM.String('batman')])),

View File

@@ -1,4 +1,6 @@
import { escapeRegExp, isPlainObject, some } from 'lodash'
'use strict'
const { escapeRegExp, isPlainObject, some } = require('lodash')
// ===================================================================
@@ -23,7 +25,7 @@ class Node {
}
}
export class Null extends Node {
class Null extends Node {
match() {
return true
}
@@ -32,10 +34,11 @@ export class Null extends Node {
return ''
}
}
exports.Null = Null
const formatTerms = terms => terms.map(term => term.toString(true)).join(' ')
export class And extends Node {
class And extends Node {
constructor(children) {
super()
@@ -54,8 +57,9 @@ export class And extends Node {
return isNested ? `(${terms})` : terms
}
}
exports.And = And
export class Comparison extends Node {
class Comparison extends Node {
constructor(operator, value) {
super()
this._comparator = Comparison.comparators[operator]
@@ -71,6 +75,7 @@ export class Comparison extends Node {
return this._operator + String(this._value)
}
}
exports.Comparison = Comparison
Comparison.comparators = {
'>': (a, b) => a > b,
'>=': (a, b) => a >= b,
@@ -78,7 +83,7 @@ Comparison.comparators = {
'<=': (a, b) => a <= b,
}
export class Or extends Node {
class Or extends Node {
constructor(children) {
super()
@@ -96,8 +101,9 @@ export class Or extends Node {
return `|(${formatTerms(this.children)})`
}
}
exports.Or = Or
export class Not extends Node {
class Not extends Node {
constructor(child) {
super()
@@ -112,8 +118,9 @@ export class Not extends Node {
return '!' + this.child.toString(true)
}
}
exports.Not = Not
export class NumberNode extends Node {
exports.Number = exports.NumberNode = class NumberNode extends Node {
constructor(value) {
super()
@@ -133,9 +140,8 @@ export class NumberNode extends Node {
return String(this.value)
}
}
export { NumberNode as Number }
export class NumberOrStringNode extends Node {
class NumberOrStringNode extends Node {
constructor(value) {
super()
@@ -160,9 +166,9 @@ export class NumberOrStringNode extends Node {
return this.value
}
}
export { NumberOrStringNode as NumberOrString }
exports.NumberOrString = exports.NumberOrStringNode = NumberOrStringNode
export class Property extends Node {
class Property extends Node {
constructor(name, child) {
super()
@@ -178,12 +184,13 @@ export class Property extends Node {
return `${formatString(this.name)}:${this.child.toString(true)}`
}
}
exports.Property = Property
const escapeChar = char => '\\' + char
const formatString = value =>
Number.isNaN(+value) ? (isRawString(value) ? value : `"${value.replace(/\\|"/g, escapeChar)}"`) : `"${value}"`
export class GlobPattern extends Node {
class GlobPattern extends Node {
constructor(value) {
// fallback to string node if no wildcard
if (value.indexOf('*') === -1) {
@@ -216,8 +223,9 @@ export class GlobPattern extends Node {
return this.value
}
}
exports.GlobPattern = GlobPattern
export class RegExpNode extends Node {
class RegExpNode extends Node {
constructor(pattern, flags) {
super()
@@ -245,9 +253,9 @@ export class RegExpNode extends Node {
return this.re.toString()
}
}
export { RegExpNode as RegExp }
exports.RegExp = exports.RegExpNode = RegExpNode
export class StringNode extends Node {
class StringNode extends Node {
constructor(value) {
super()
@@ -275,9 +283,9 @@ export class StringNode extends Node {
return formatString(this.value)
}
}
export { StringNode as String }
exports.String = exports.StringNode = StringNode
export class TruthyProperty extends Node {
class TruthyProperty extends Node {
constructor(name) {
super()
@@ -292,6 +300,7 @@ export class TruthyProperty extends Node {
return formatString(this.name) + '?'
}
}
exports.TruthyProperty = TruthyProperty
// -------------------------------------------------------------------
@@ -531,7 +540,7 @@ const parser = P.grammar({
),
ws: P.regex(/\s*/),
}).default
export const parse = parser.parse.bind(parser)
exports.parse = parser.parse.bind(parser)
// -------------------------------------------------------------------
@@ -573,7 +582,7 @@ const _getPropertyClauseStrings = ({ child }) => {
}
// Find possible values for property clauses in a and clause.
export const getPropertyClausesStrings = node => {
exports.getPropertyClausesStrings = function getPropertyClausesStrings(node) {
if (!node) {
return {}
}
@@ -605,7 +614,7 @@ export const getPropertyClausesStrings = node => {
// -------------------------------------------------------------------
export const setPropertyClause = (node, name, child) => {
exports.setPropertyClause = function setPropertyClause(node, name, child) {
const property = child && new Property(name, typeof child === 'string' ? new StringNode(child) : child)
if (node === undefined) {

View File

@@ -1,7 +1,9 @@
/* eslint-env jest */
import { ast, pattern } from './index.fixtures'
import {
'use strict'
const { ast, pattern } = require('./index.fixtures')
const {
getPropertyClausesStrings,
GlobPattern,
Null,
@@ -11,7 +13,7 @@ import {
Property,
setPropertyClause,
StringNode,
} from './'
} = require('./')
it('getPropertyClausesStrings', () => {
const tmp = getPropertyClausesStrings(parse('foo bar:baz baz:|(foo bar /^boo$/ /^far$/) foo:/^bar$/'))

View File

@@ -16,7 +16,6 @@
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"browserslist": [
">2%"
],
@@ -26,21 +25,7 @@
"dependencies": {
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -1,12 +0,0 @@
import { parse } from './'
import { ast, pattern } from './index.fixtures'
export default ({ benchmark }) => {
benchmark('parse', () => {
parse(pattern)
})
benchmark('toString', () => {
ast.toString()
})
}

View File

@@ -1,3 +0,0 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1 +0,0 @@
../../scripts/babel-eslintrc.js

View File

@@ -1,3 +1,5 @@
'use strict'
const match = (pattern, value) => {
if (Array.isArray(pattern)) {
return (
@@ -43,4 +45,6 @@ const match = (pattern, value) => {
return pattern === value
}
export const createPredicate = pattern => value => match(pattern, value)
exports.createPredicate = function createPredicate(pattern) {
return value => match(pattern, value)
}

View File

@@ -16,27 +16,13 @@
"url": "https://vates.fr"
},
"preferGlobal": false,
"main": "dist/",
"browserslist": [
">2%"
],
"engines": {
"node": ">=6"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"cross-env": "^7.0.2",
"rimraf": "^3.0.0"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build",
"postversion": "npm publish"
}
}

View File

@@ -1,3 +0,0 @@
'use strict'
module.exports = require('../../@xen-orchestra/babel-config')(require('./package.json'))

View File

@@ -1 +0,0 @@
../../scripts/babel-eslintrc.js

View File

@@ -1,3 +1,5 @@
'use strict'
const { createWriteStream } = require('fs')
const { PassThrough } = require('stream')
@@ -12,7 +14,7 @@ const createOutputStream = path => {
return stream
}
export const writeStream = (input, path) => {
exports.writeStream = function writeStream(input, path) {
const output = createOutputStream(path)
return new Promise((resolve, reject) =>

View File

@@ -1,11 +1,13 @@
import { VhdFile, checkVhdChain } from 'vhd-lib'
import getopts from 'getopts'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
'use strict'
const { VhdFile, checkVhdChain } = require('vhd-lib')
const getopts = require('getopts')
const { getHandler } = require('@xen-orchestra/fs')
const { resolve } = require('path')
const checkVhd = (handler, path) => new VhdFile(handler, path).readHeaderAndFooter()
export default async rawArgs => {
module.exports = async function check(rawArgs) {
const { chain, _: args } = getopts(rawArgs, {
boolean: ['chain'],
default: {

View File

@@ -1,9 +1,11 @@
import { getSyncedHandler } from '@xen-orchestra/fs'
import { openVhd, Constants } from 'vhd-lib'
import Disposable from 'promise-toolbox/Disposable'
import omit from 'lodash/omit'
'use strict'
const deepCompareObjects = function (src, dest, path) {
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { openVhd, Constants } = require('vhd-lib')
const Disposable = require('promise-toolbox/Disposable')
const omit = require('lodash/omit')
function deepCompareObjects(src, dest, path) {
for (const key of Object.keys(src)) {
const srcValue = src[key]
const destValue = dest[key]
@@ -29,7 +31,7 @@ const deepCompareObjects = function (src, dest, path) {
}
}
export default async args => {
module.exports = async function compare(args) {
if (args.length < 4 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: compare <sourceRemoteUrl> <source VHD> <destionationRemoteUrl> <destination> `
}

View File

@@ -1,9 +1,11 @@
import { getSyncedHandler } from '@xen-orchestra/fs'
import { openVhd, VhdFile, VhdDirectory } from 'vhd-lib'
import Disposable from 'promise-toolbox/Disposable'
import getopts from 'getopts'
'use strict'
export default async rawArgs => {
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { openVhd, VhdFile, VhdDirectory } = require('vhd-lib')
const Disposable = require('promise-toolbox/Disposable')
const getopts = require('getopts')
module.exports = async function copy(rawArgs) {
const {
directory,
help,

View File

@@ -0,0 +1,43 @@
//
// This file has been generated by [index-modules](https://npmjs.com/index-modules)
//
var d = Object.defineProperty
function de(o, n, v) {
d(o, n, { enumerable: true, value: v })
return v
}
function dl(o, n, g, a) {
d(o, n, {
configurable: true,
enumerable: true,
get: function () {
return de(o, n, g(a))
},
})
}
function r(p) {
var v = require(p)
return v && v.__esModule
? v
: typeof v === 'object' || typeof v === 'function'
? Object.create(v, { default: { enumerable: true, value: v } })
: { default: v }
}
function e(p, i) {
dl(defaults, i, function () {
return exports[i].default
})
dl(exports, i, r, p)
}
d(exports, '__esModule', { value: true })
var defaults = de(exports, 'default', {})
e('./check.js', 'check')
e('./compare.js', 'compare')
e('./copy.js', 'copy')
e('./info.js', 'info')
e('./merge.js', 'merge')
e('./raw.js', 'raw')
e('./repl.js', 'repl')
e('./synthetize.js', 'synthetize')

View File

@@ -1,9 +1,13 @@
import { Constants, VhdFile } from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
import * as UUID from 'uuid'
import humanFormat from 'human-format'
import invert from 'lodash/invert.js'
'use strict'
const { Constants, VhdFile } = require('vhd-lib')
const { getHandler } = require('@xen-orchestra/fs')
const { openVhd } = require('vhd-lib/openVhd')
const { resolve } = require('path')
const Disposable = require('promise-toolbox/Disposable')
const humanFormat = require('human-format')
const invert = require('lodash/invert.js')
const UUID = require('uuid')
const { PLATFORMS } = Constants
@@ -32,8 +36,8 @@ function mapProperties(object, mapping) {
return result
}
export default async args => {
const vhd = new VhdFile(getHandler({ url: 'file:///' }), resolve(args[0]))
async function showDetails(handler, path) {
const vhd = new VhdFile(handler, resolve(path))
try {
await vhd.readHeaderAndFooter()
@@ -43,6 +47,7 @@ export default async args => {
}
console.log(
'footer:',
mapProperties(vhd.footer, {
currentSize: 'bytes',
diskType: 'diskType',
@@ -53,6 +58,7 @@ export default async args => {
)
console.log(
'header:',
mapProperties(vhd.header, {
blockSize: 'bytes',
parentTimestamp: 'date',
@@ -67,3 +73,29 @@ export default async args => {
})
)
}
async function showList(handler, paths) {
let previousUuid
for (const path of paths) {
await Disposable.use(openVhd(handler, resolve(path)), async vhd => {
const uuid = MAPPERS.uuid(vhd.footer.uuid)
const fields = [path, MAPPERS.bytes(vhd.footer.currentSize), uuid, MAPPERS.diskType(vhd.footer.diskType)]
if (vhd.footer.diskType === Constants.DISK_TYPES.DIFFERENCING) {
const parentUuid = MAPPERS.uuid(vhd.header.parentUuid)
fields.push(parentUuid === previousUuid ? '<above VHD>' : parentUuid)
}
previousUuid = uuid
console.log(fields.join(' | '))
})
}
}
module.exports = async function info(args) {
const handler = getHandler({ url: 'file:///' })
if (args.length === 1) {
return showDetails(handler, args[0])
}
return showList(handler, args)
}

View File

@@ -1,16 +1,18 @@
import { Bar } from 'cli-progress'
import { mergeVhd } from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
'use strict'
export default async function main(args) {
const { Bar } = require('cli-progress')
const { getHandler } = require('@xen-orchestra/fs')
const { mergeVhdChain } = require('vhd-lib/merge')
const { resolve } = require('path')
module.exports = async function merge(args) {
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: ${this.command} <child VHD> <parent VHD>`
}
const handler = getHandler({ url: 'file:///' })
let bar
await mergeVhd(handler, resolve(args[1]), handler, resolve(args[0]), {
await mergeVhdChain(handler, [resolve(args[1]), resolve(args[0])], {
onProgress({ done, total }) {
if (bar === undefined) {
bar = new Bar({

View File

@@ -1,11 +1,13 @@
import { openVhd } from 'vhd-lib'
import { getSyncedHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
'use strict'
import { writeStream } from '../_utils'
import { Disposable } from 'promise-toolbox'
const { openVhd } = require('vhd-lib')
const { getSyncedHandler } = require('@xen-orchestra/fs')
const { resolve } = require('path')
export default async args => {
const { writeStream } = require('../_utils')
const { Disposable } = require('promise-toolbox')
module.exports = async function raw(args) {
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: ${this.command} <input VHD> [<output raw>]`
}

Some files were not shown because too many files have changed in this diff Show More