Compare commits

..

553 Commits

Author SHA1 Message Date
Florent Beauchamp
4722a62a6a fix(xo-server): increase timeout for ova import 2022-12-08 08:47:49 +01:00
Julien Fontanet
1f32557743 fix(scripts/gen-deps-list): fix packages order (#6564)
The release order computation is now uncoupled of the packages to release computation, and is now done for all packages so that transitive dependencies are still correctly ordered.
2022-11-30 14:52:46 +01:00
Julien Fontanet
e95aae2129 feat: release 5.77.0 2022-11-30 14:05:38 +01:00
Pierre Donias
9176171f20 feat: technical release (#6566) 2022-11-30 11:18:33 +01:00
Florent BEAUCHAMP
d4f2249a4d fix(xo-server/vm.warmMigration): use same job id in subsequent run (#6565)
Introduced by 72c69d7
2022-11-30 11:00:42 +01:00
Julien Fontanet
e0b4069c17 fix(scripts/bump-pkg): don't call git add --patch twice 2022-11-29 18:56:03 +01:00
Julien Fontanet
6b25a21151 feat(scripts/bump-pkg): ignore yarn.lock changes 2022-11-29 18:56:03 +01:00
Julien Fontanet
716dc45d85 chore(CHANGELOG): integrate released changes 2022-11-29 18:56:03 +01:00
Julien Fontanet
57850230c8 feat(xo-web): 5.108.0 2022-11-29 18:47:33 +01:00
Julien Fontanet
362d597031 feat(xo-server-web-hooks): 0.3.2 2022-11-29 18:47:14 +01:00
Julien Fontanet
e89b84b37b feat(xo-server-usage-report): 0.10.2 2022-11-29 18:46:54 +01:00
Julien Fontanet
ae6f6bf536 feat(xo-server-transport-nagios): 1.0.0 2022-11-29 18:46:27 +01:00
Julien Fontanet
6f765bdd6f feat(xo-server-sdn-controller): 1.0.7 2022-11-29 18:45:50 +01:00
Julien Fontanet
1982c6e6e6 feat(xo-server-netbox): 0.3.5 2022-11-29 18:45:30 +01:00
Julien Fontanet
527dceb43f feat(xo-server-load-balancer): 0.7.2 2022-11-29 18:44:12 +01:00
Julien Fontanet
f5a3d68d07 feat(xo-server-backup-reports): 0.17.2 2022-11-29 18:43:50 +01:00
Julien Fontanet
6c904fbc96 feat(xo-server-auth-ldap): 0.10.6 2022-11-29 18:43:22 +01:00
Julien Fontanet
295036a1e3 feat(xo-server-audit): 0.10.2 2022-11-29 18:42:30 +01:00
Julien Fontanet
5601d61b49 feat(xo-server): 5.107.0 2022-11-29 18:32:04 +01:00
Julien Fontanet
1c35c1a61a feat(xo-cli): 0.14.2 2022-11-29 18:31:24 +01:00
Julien Fontanet
4143014466 feat(xo-vmdk-to-vhd): 2.5.0 2022-11-29 18:29:33 +01:00
Julien Fontanet
90fea69b7e feat(@xen-orchestra/proxy): 0.26.5 2022-11-29 18:21:01 +01:00
Julien Fontanet
625663d619 feat(@xen-orchestra/xapi): 1.5.3 2022-11-29 18:18:09 +01:00
Julien Fontanet
403afc7aaf feat(@xen-orchestra/mixins): 0.8.2 2022-11-29 17:50:43 +01:00
Julien Fontanet
d295524c3c feat(@xen-orchestra/backups-cli): 1.0.0 2022-11-29 17:48:21 +01:00
Julien Fontanet
5eb4294e70 feat(@xen-orchestra/backups): 0.29.1 2022-11-29 17:48:21 +01:00
Julien Fontanet
90598522a6 feat(@xen-orchestra/audit-core): 0.2.2 2022-11-29 17:48:21 +01:00
Julien Fontanet
519fa1bcf8 feat(vhd-lib): 4.2.0 2022-11-29 17:48:21 +01:00
Julien Fontanet
7b0e5afe37 feat(@xen-orchestra/fs): 3.3.0 2022-11-29 17:48:21 +01:00
Julien Fontanet
0b6b3a47a2 feat(@vates/disposable): 0.1.3 2022-11-29 17:48:21 +01:00
Julien Fontanet
75db810508 feat(@xen-orchestra/log): 0.5.0 2022-11-29 17:48:21 +01:00
Julien Fontanet
2f52c564f5 chore(backups-cli): format package.json 2022-11-29 17:48:21 +01:00
Florent Beauchamp
011d582b80 fix(vhd-lib/merge): delete old data AFTER the alias has been overwritten 2022-11-29 16:42:57 +01:00
Julien Fontanet
32d21b2308 chore: use caret range for @vates/async-each
Introduced by 08298d328
2022-11-29 16:31:41 +01:00
Pierre Donias
45971ca622 fix(xo-web): remove duplicated imports (#6562) 2022-11-29 16:17:40 +01:00
Mathieu
f3a09f2dad feat(xo-web/VM/advanced): add button for warm migration (#6533)
See #6549
2022-11-29 15:14:41 +01:00
Mathieu
552a9c7b9f feat(xo-web/proxy): register an existing proxy (#6556) 2022-11-29 14:44:51 +01:00
Gabriel Gunullu
ed34d9cbc0 feat(xo-server-transport-nagios): make host and service configurable (#6560) 2022-11-29 14:34:41 +01:00
Julien Fontanet
187ee99931 fix(xo-server/plugin.configure): don't save injected defaults
Default values injected by Ajv from the configuration schema should not be saved.
2022-11-29 12:43:17 +01:00
Cécile Morange
ff78dd8f7c feat(xo-web/i18n): "XenServer" → "XCP-ng" (#6462)
See #6439
2022-11-29 11:47:16 +01:00
Julien Fontanet
b0eadb8ea4 fix: remove concurrency limit for dev script
Introduced by 9d5bc8af6

Limited concurrency (which is the default) is not compatible with never-ending commands.
2022-11-29 11:35:01 +01:00
Julien Fontanet
a95754715a fix: use --verbose for dev script
Introduced by 9d5bc8af6

Silent mode is not compatible (i.e. does not show a meaningful output) with never-ending commands.
2022-11-29 11:14:44 +01:00
Julien Fontanet
18ece4b90c fix(xo-server/MigrateVm): fix uuid import
Introduced by 72c69d791

Fixes #6561
2022-11-29 10:30:09 +01:00
Florent Beauchamp
3862fb2664 fix(fs/rename): throw ENOENT when source file is missing 2022-11-28 17:33:57 +01:00
Florent BEAUCHAMP
72c69d791a feat(xo-server): implement warm migration backend (#6549) 2022-11-28 17:28:19 +01:00
Julien Fontanet
d6192a4a7a chore: remove unused travis-tests.js 2022-11-28 15:51:47 +01:00
Julien Fontanet
0f824ffa70 lint(vhd-lib): remove unused var and fix formatting
Introduced by f6c227e7f
2022-11-26 10:10:08 +01:00
Florent BEAUCHAMP
f6c227e7f5 feat(vhd-lib): merge resume can resume when rename fails (#6530) 2022-11-25 20:51:33 +01:00
Julien Fontanet
9d5bc8af6e feat: run-script.js now only shows output on error by default 2022-11-25 15:45:52 +01:00
Julien Fontanet
9480079770 feat: script test-unit now bails on first error 2022-11-25 15:45:08 +01:00
Julien Fontanet
54fe9147ac chore: only enable Babel debug on prod builds
The output was making test results hard to see.
2022-11-25 14:43:36 +01:00
Gabriel Gunullu
b6a0477232 feat(xo-server-transport-nagios): report backed up VM individually (#6534) 2022-11-25 14:36:41 +01:00
Julien Fontanet
c60644c578 chore(lite): merge lint with the root config 2022-11-25 11:23:04 +01:00
Thierry Goettelmann
abdce94c5f feat(lite): type check on test (#6547) 2022-11-25 11:19:58 +01:00
Mathieu
d7dee04013 feat(xo-web/settings/users): remove OTP of users in admin panel (#6541)
See https://xcp-ng.org/forum/topic/6521
2022-11-25 11:15:07 +01:00
Julien Fontanet
dfc62132b7 fix(xo-web/remote): prevent browser from autocompleting encryption key 2022-11-24 18:48:45 +01:00
Julien Fontanet
36f7f193aa feat: run linter in CI 2022-11-24 17:00:59 +01:00
Julien Fontanet
ca4a82ec38 fix: make test-lint script ignore xo-web
Too many errors in this legacy package.
2022-11-24 16:26:40 +01:00
Julien Fontanet
37aea1888d chore: fix lint issues 2022-11-24 16:26:40 +01:00
Julien Fontanet
92f3b4ddd7 chore(backups/RemoteAdapter): remove unused invalidateVmBackupListCache 2022-11-24 16:26:40 +01:00
Mathieu
647995428c feat(lite/pool/dashboard): top 5 RAM usage (#6419) 2022-11-24 15:57:11 +01:00
Mathieu
407e9c25f3 feat(xo-web/licenses): text to explicit where to bind xcp-ng licenses (#6551)
See zammad#11037
2022-11-24 15:42:16 +01:00
Julien Fontanet
1612ab7335 fix(backups-cli/clean-vms): remove incorrect console.log
Introduced by 94c755b10
2022-11-23 23:03:46 +01:00
Julien Fontanet
b952c36210 fix(vhd-lib/merge): VhdAbstract.rename → handler.rename
Missing changed from c5b3acfce
2022-11-23 15:02:56 +01:00
Florent BEAUCHAMP
96b5cb2c61 feat(xo-vmdk-to-vhd): overprovision vmdk size to generate ova in one pass (#6487) 2022-11-23 14:48:18 +01:00
Florent Beauchamp
c5b3acfce2 fix(vhd-lib): remove unsafe VhdAbstract.rename implementation
actual implementation was deleting the target vhd even if the source did not exist, leading to ptential data loss
2022-11-23 14:31:37 +01:00
Julien Fontanet
20a01bf266 feat(lint-staged): format all files with Prettier 2022-11-22 18:20:01 +01:00
Julien Fontanet
a33b88cf1c chore: format with Prettier 2022-11-22 17:30:14 +01:00
Julien Fontanet
09a2f45ada feat: run test script for all pkgs with changed files 2022-11-22 17:30:14 +01:00
Julien Fontanet
83a7dd7ea1 chore: remove custom scripts/lint-staged 2022-11-22 17:30:14 +01:00
Julien Fontanet
afc1b6a5c0 Revert "feat: run pre-commit script for all packages"
This reverts commit f5b91cd45d.
2022-11-22 17:30:14 +01:00
Thierry Goettelmann
7f4f860735 feat(lite/color mode): "auto" mode + "D" shortcut to toggle (#6536)
The shortcut is only enabled in dev environment
2022-11-22 15:35:31 +01:00
Julien Fontanet
d789e3aa0d chore: update to husky@8 2022-11-22 15:33:43 +01:00
Julien Fontanet
f5b91cd45d feat: run pre-commit script for all packages 2022-11-22 11:37:40 +01:00
Julien Fontanet
92ab4b3309 chore(lite): format with Prettier (#6545) 2022-11-22 11:33:03 +01:00
Florent Beauchamp
2c456e4c89 fix(vhd-lib): create directory for merged blocks 2022-11-22 11:05:51 +01:00
Florent Beauchamp
1460e63449 fix(vhd-lib): write state at the begining 2022-11-22 11:05:51 +01:00
Julien Fontanet
8291124c1f feat(xo-server/remote.{create,set}): prevent xo-vm-backups suffix
Fixes zammad#10930
2022-11-21 16:58:24 +01:00
Julien Fontanet
fc4d9accfd feat(mixin): add usage 2022-11-21 11:04:51 +01:00
Julien Fontanet
80969b785f feat(xo-server/proxy.register): authenticationToken is now optional
It's automatically generated if missing, which can be useful when manually registering a proxy.
2022-11-20 23:51:48 +01:00
Julien Fontanet
3dfd7f1835 fix(xo-server/proxy.register): requires either address or vmUuid 2022-11-20 23:50:51 +01:00
Julien Fontanet
65daa39ebe fix(xo-cli): fix invalid parameters error message
Introduced by d7f29e736

The error format has changed due to the switch of xo-server to Ajv.
2022-11-20 23:44:50 +01:00
Julien Fontanet
5ad94504e3 feat(xo-web/downloadLog): use .json extension for JSON values 2022-11-20 23:20:01 +01:00
Julien Fontanet
4101bf3ba5 fix(xo-web): injected task.parent should not be enumerable
Shared task objects are direclty altered and adding an enumerable cyclic property might break JSON.stringify in other components.
2022-11-20 23:19:35 +01:00
Thierry Goettelmann
e9d52864ef fix(lite): remove @trivago/prettier-plugin-sort-imports package breaking monorepo (#6531) 2022-11-18 11:32:27 +01:00
Julien Fontanet
aef2696426 feat(log): respect env.{DEBUG,NODE_DEBUG} by default
Previously, env.{DEBUG,NODE_DEBUG} were only handled if `log/configure` has been imported, now it's the case by default.
2022-11-18 10:42:13 +01:00
Julien Fontanet
94c755b102 fix(backups-cli/clean-vms): use getSyncedHandler 2022-11-18 10:42:13 +01:00
Gabriel Gunullu
279b457348 test(xo-remote-parser): from Jest to test (#6537) 2022-11-17 14:35:01 +01:00
Julien Fontanet
b5988bb8b7 chore(backups-cli): convert to ESM 2022-11-17 10:44:48 +01:00
Mathieu
f73b1d8b40 feat(lite): add loader in pool dashboard (#6468) 2022-11-17 10:15:03 +01:00
Gabriel Gunullu
b2ccb07a95 test(complex-matcher): from Jest to test (#6535) 2022-11-16 23:24:32 +01:00
Thierry Goettelmann
9560cc4e33 chore(lite): upgrade packages (#6532) 2022-11-16 11:18:04 +01:00
Julien Fontanet
e87c380556 chore: update dev deps 2022-11-15 15:16:29 +01:00
Julien Fontanet
b0846876f7 feat: release 5.76.2 2022-11-14 15:55:02 +01:00
Julien Fontanet
477ed67957 feat(xo-server): 5.106.1 2022-11-14 14:52:01 +01:00
Thierry Goettelmann
5acacd7e1e feat(lite): add merge prop to UiButtonGroup (#6494) 2022-11-14 11:08:26 +01:00
Thierry Goettelmann
8d542fe9c0 fix(lite): UiButton should follow UiButtonGroup transparent prop (#6493) 2022-11-14 11:06:54 +01:00
Thierry Goettelmann
b0cb249ae9 docs(lite): update README about UiIcon (#6520) 2022-11-14 10:22:07 +01:00
Julien Fontanet
185509a0cf fix(xo-server/proxy.upgradeAppliance): use getObject method on correct object
Introduced by 572359892
2022-11-10 18:12:57 +01:00
Julien Fontanet
08298d3284 feat: limit concurrency of root build script
Should fixes https://xcp-ng.org/forum/post/54567
2022-11-10 18:09:05 +01:00
Mathieu
7a4cec5093 fix(dashboard/health): filter correctly unhealthyVdis (#6519)
See zammad#10720
2022-11-10 15:35:05 +01:00
rajaa-b
f44f5199c6 feat(lite): uncollapse hosts by default (#6428) 2022-11-10 15:12:37 +01:00
kursantkvi
81abc091de feat(xo-web/intl): Russian localization (#6526) 2022-11-10 10:02:16 +01:00
Pierre Donias
7e4f4c445d feat: release 5.76.1 (#6523) 2022-11-08 16:24:01 +01:00
Pierre Donias
5a673c1833 feat: technical release (#6521) 2022-11-08 11:12:48 +01:00
Mathieu
266231ae0f fix(xo-web): "Pro Support" instead of "pool support" in XCP-ng support tooltips (#6517)
See https://xcp-ng.org/forum/topic/6535
2022-11-08 10:11:16 +01:00
Florent BEAUCHAMP
9e87a887cb fix(xo-web/backup): cleanup settings correctly when deselecting health check (#6515)
Fix #6501
2022-11-08 10:06:04 +01:00
Mathieu
12e98bfd31 fix(xo-web/health): fix "an error has occurred" (#6508) 2022-11-08 09:52:54 +01:00
Mathieu
249f124ba6 fix(xo-web/license): display the license product ID in SelectLicense (#6512)
See zammad#10750
2022-11-08 09:50:46 +01:00
Julien Fontanet
131643a91b feat(xo-server/rest-api): expose VDI snapshots 2022-11-07 17:14:08 +01:00
Julien Fontanet
df3df18690 feat(xo-server/rest-api): expose VM snapshots and templates 2022-11-07 17:00:22 +01:00
Julien Fontanet
5401d17610 fix(xo-server/backup): respect httpProxy when connecting to XAPIs (#6513) 2022-11-07 15:07:35 +01:00
Florent BEAUCHAMP
90ea2284c6 fix(xo-vmk-to-vhd): failing tests (#6518)
Sometimes `buffer.allocUnsafe` was generating an buffer containing only zeroes, that buffer was filtered in packages/xo-vmdk-to-vhd/src/vmdk-generate.js line 140, thus the generated vmdk was variable
2022-11-07 13:28:43 +01:00
Mathieu
a4c5792f9e fix(lite): fix .value is undefined (#6469) 2022-11-07 10:18:12 +01:00
Julien Fontanet
5723598923 feat(xo-server/proxy.upgradeAppliance): support proxies with unknown VM 2022-11-07 00:19:13 +01:00
Julien Fontanet
aa0b2ff93a feat(xo-server/proxy.register): vmUuid parameter 2022-11-06 01:10:09 +01:00
Gabriel Gunullu
be6233f12b test(backups): from Jest to test (#6500) 2022-11-04 17:00:02 +01:00
Olivier Lambert
17df749790 chore(ISSUE_TEMPLATE/bug_report): make it mandatory and ask commit (#6509) 2022-11-04 11:25:58 +01:00
Gabriel Gunullu
97f852f8e8 test(log): from Jest to test (#6498) 2022-11-04 10:54:11 +01:00
Gabriel Gunullu
dc3446d61a test(template): from Jest to test (#6499) 2022-11-04 10:53:38 +01:00
Mathieu
c830a0b208 fix(pool): added tooltip for no support icon (#6505)
See zammad#10716
2022-11-04 10:46:17 +01:00
Thierry Goettelmann
ff0307b68f fix(lite): Vite constants declaration (#6511) 2022-11-04 10:27:41 +01:00
Mathieu
1c3cad9235 feat(lite): alert when unreachable hosts (#6378) 2022-11-04 10:02:02 +01:00
Julien Fontanet
ccafc15b66 fix(xo-server): split-log → split-host
Introduced by ed7ff1fad

Fixes https://xcp-ng.org/forum/post/54503
2022-11-03 14:54:23 +01:00
Julien Fontanet
a40d6b32e3 fix(xo-server/sample.config.toml): typo log → logs
Introduced by 2dda1aecc

Fixes https://xcp-ng.org/forum/post/54351
2022-11-03 13:57:34 +01:00
Pierre Donias
de1ee92fe7 chore(lite): normalize package 2022-11-03 10:30:39 +01:00
Pierre Donias
c7227d2f50 feat(lite): settings page (#6418) 2022-11-03 10:30:39 +01:00
Mathieu
b2cebbfaf4 fix(lite): invalidate sessionId token (#6480) 2022-11-03 10:30:39 +01:00
Pierre Donias
30fbbc92ca feat(lite/ProgressBar): use transition instead of animation (#6466) 2022-11-03 10:30:39 +01:00
Mathieu
d1b210cf16 fix(lite/dashboard): add missing 'id' field for storage usage (#6467) 2022-11-03 10:30:39 +01:00
Mathieu
9963568368 feat(lite/pool/dashboard): top 5 CPU usage (#6370) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
ffc3249b33 feat(lite/component): UiSpinner (#6427) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
29826db81b fix(lite): fix build errors (#6448) 2022-11-03 10:30:39 +01:00
Pierre Donias
5367a76db5 chore(lite): create CHANGELOG.md (#6457) 2022-11-03 10:30:39 +01:00
Pierre Donias
2512a00205 fix(lite/UiBadge): do not instanciate FontAwesomeIcon if icon is undefined (#6446) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
72a3a9f04f feat(lite/component): Radio, Checkbox, Select, Input, Toggle (#6426) 2022-11-03 10:30:39 +01:00
Mathieu
b566e0fd46 feat(lite): persit language change (#6443) 2022-11-03 10:30:39 +01:00
Pierre Donias
4621fb4e9b feat(lite): set default language to English (#6442)
See https://xcp-ng.org/forum/topic/4731/xen-orchestra-lite/48?_=1664781482177
2022-11-03 10:30:39 +01:00
Mathieu
7f3d25964f feat(lite): display storage usage (#6421) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
4b3728e8d8 feat(lite/component): New multicolor modal (#6394) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
5218d6df1a feat(lite/component): Menu component (#6354)
* feat(lite/component): Menu component

* feat(lite/component): Add disabled prop to AppMenu

* feat(lite/component): Add custom placement to AppMenu + Fix trigger color

* feat(lite/component): Update VmsActionBar to use new AppMenu (#6357)

* fix(lite/menu): Doesn't teleport the root menu if no trigger

* Don't disable a menu item having a submenu

* i18n
2022-11-03 10:30:39 +01:00
Thierry Goettelmann
94b2b8ec70 feat(lite/buttons): Add multiple button colors, outlined and transparent (#6393) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
6d1086539e feat(lite/component): UiActionButton component (#6386) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
7f758bbb73 Revert "feat(lite/component): Radio and Checkbox"
This reverts commit abfb6c97a2.
2022-11-03 10:30:39 +01:00
Thierry Goettelmann
62b88200c3 feat(lite/component): Radio and Checkbox 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
ce42883268 feat(lite): tooltips (#6412) 2022-11-03 10:30:39 +01:00
Pierre Donias
6b60cfce4d feat(lite): deploy script (#6413) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
aebb47ad38 feat(lite/component): Linear Chart (#6376) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
41f5634b7a feat(lite): i18n (#6399) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
87ddb01122 feat(lite): use FontAwesome Free (#6405) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
6898eea45e feat(lite): Update missing colors (#6392) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
ba2679d3d7 feat(lite/component): Change style of active items in tree view (#6397) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
971cdaa44f feat(lite): CR feedback (#6341) 2022-11-03 10:30:39 +01:00
Pierre Donias
005d3b5976 feat(lite): placeholders for pool/host/VM name_label (#6391)
Some objects may have an empty `name_label`. This is to avoid confusion in the
tree view.
2022-11-03 10:30:39 +01:00
Thierry Goettelmann
663403cb14 chore(lite): change font-size (#6390) 2022-11-03 10:30:39 +01:00
Pierre Donias
b341e38623 feat(lite): add "coming soon" message on empty views (#6389) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
8246db30cb chore(lite): remove fake cards from Pool Dashboard (#6385) 2022-11-03 10:30:39 +01:00
Pierre Donias
9c9c656620 feat(lite/signin): smaller logo and few tweaks (#6381) 2022-11-03 10:30:39 +01:00
Pierre Donias
f36be0d5e0 feat(lite/nav): use logo without circle (#6382) 2022-11-03 10:30:39 +01:00
Pierre Donias
72090ea8ff feat(lite/dark mode): persistence + icon (#6383) 2022-11-03 10:30:39 +01:00
Pierre Donias
8d64a0a232 feat(lite): use new XO Lite logo (#6379) 2022-11-03 10:30:39 +01:00
Pierre Donias
35974a0a33 fix(lite): connect to window.origin's XAPI in prod (#6377) 2022-11-03 10:30:39 +01:00
Mathieu
3023439028 fix(lite): fix UiCard height (#6373) 2022-11-03 10:30:39 +01:00
Pierre Donias
77f4a09d74 chore(lite): switch from actual routes to hash routes (#6372)
XCP-ng web servers only serve the HTML on /xolite.html. This allows XO Lite to
still work when opened on a route that is different than /.
2022-11-03 10:30:39 +01:00
Pierre Donias
0fc797f7d0 fix(lite): change HTML main element ID from app to root (#6371)
XCP-ng web servers already serve an HTML file with a #root element. This allows
to use the new version of XO Lite without having to change that HTML file on
XCP-ng hosts.
2022-11-03 10:30:39 +01:00
Mathieu
0b02c84e33 feat(lite): xapiStat with fetchStats composable (#6361) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
e03ff0a9be feat(lite/component): update UiButton (#6355) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
d91f1841c0 feat(lite/component): UI Icon utility component (#6353) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
0effc9cfc1 fix(lite): disconnecting (#6346) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
f08cbb458d fix(lite): ESLint config (#6344) 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
b8c9770d43 chore(lite): merge old repo to XO 2022-11-03 10:30:39 +01:00
Julien Fontanet
44ff5d0e4d fix(lite): various fixes 2022-11-03 10:30:39 +01:00
Thierry Goettelmann
ecb580a629 feat(lite): initial Vue.js implementation 2022-11-03 10:30:39 +01:00
Julien Fontanet
0623d837c1 feat: release 5.76.0 2022-10-31 13:59:50 +01:00
Julien Fontanet
f92d1ce4ac chore(CHANGELOG): integrate released changes 2022-10-31 13:24:43 +01:00
Julien Fontanet
88f84069d6 feat(xo-web): 5.106.0 2022-10-31 13:23:49 +01:00
Julien Fontanet
b9b7081184 feat(xo-server): 5.105.0 2022-10-31 13:23:21 +01:00
Julien Fontanet
ce3e0817db feat(@xen-orchestra/proxy): 0.26.4 2022-10-31 13:22:52 +01:00
Julien Fontanet
55b65a8bf6 feat(@xen-orchestra/backups): 0.29.0 2022-10-31 13:22:18 +01:00
Julien Fontanet
6767141661 feat(@xen-orchestra/xapi): 1.5.2 2022-10-31 13:17:26 +01:00
rajaa-b
2eb3b15930 feat(xo-web/new-vm): possibility to destroy cloud config drive after first boot (#6486)
Fixes #6438
2022-10-31 12:25:55 +01:00
Julien Fontanet
b63c4a0d4f fix(xapi/waitObjectState): check if state si already correct 2022-10-28 16:15:53 +02:00
Mathieu
1269ddfeae feat(xo-web/pool): XCP-ng license binding (#6453) 2022-10-28 16:04:37 +02:00
rajaa-b
afd47f5522 fix(xo-web/proxies): remove "Bind license" for proxies without VM UUID (#6472) 2022-10-28 11:19:45 +02:00
Florent BEAUCHAMP
7ede6bdbce feat(backups): use NBD to export VDIs when possible (#6461) 2022-10-27 16:50:56 +02:00
Pierre Donias
03b505e40e feat: technical release (#6488) 2022-10-27 15:18:03 +02:00
Julien Fontanet
ed7ff1fad4 feat(xo-server): allow logging to external syslog
Follow-up of 756d2fe4e
2022-10-27 14:23:00 +02:00
Julien Fontanet
2dda1aecce feat(xo-server): use object to configure log transports
Follow-up of 756d2fe4e
2022-10-27 14:18:23 +02:00
Julien Fontanet
720e363577 test(fs/abstract): use getSyncedHandler 2022-10-27 09:26:49 +02:00
Florent Beauchamp
545a65521a fix(vhd-lib): improve openVhd error handling 2022-10-27 09:26:49 +02:00
Florent Beauchamp
0cf6f94677 test: rework tests following 05161bd4df
Test of cleanVm are still failin , untill we fix the error condition of cleanVm broken vhd removing

- don't use handler to / (need root to run)
- don't create file at the root of the remote (conflict with the metadata and encryption.json)
- test more unhappy paths
2022-10-27 09:26:49 +02:00
Florent Beauchamp
14e205ab69 fix(vhd-cli): sync handler 2022-10-27 09:26:49 +02:00
Florent Beauchamp
c3da87a40c fix(@xen-orchestra/fs): do not create metadata on non encrypted remote
this was creating file in wrong place during test or when running cli
introduced by #05161bd4df5b42e5ecfa0ae11e60c466ab2eabdf
2022-10-27 09:26:49 +02:00
Gabriel Gunullu
5d93b05088 test(cron): from Jest to test (#6485) 2022-10-26 09:41:39 +02:00
Gabriel Gunullu
2cdd33cb7a test(async-map): from Jest to test (#6484) 2022-10-25 16:17:08 +02:00
Julien Fontanet
dc909fdfb0 test(async-each): fix iteratee calls
Introduced by myself in last minute change in a43199b75
2022-10-25 13:14:44 +02:00
Gabriel Gunullu
a43199b754 test(async-each): from Jest to test (#6481) 2022-10-25 12:23:59 +02:00
Gabriel Gunullu
876211879f test(decorate-with): from Tap to test (#6474) 2022-10-24 17:46:44 +02:00
Gabriel Gunullu
fe323b8fe5 test: remove unnecessary ESLint comments (#6479) 2022-10-24 17:46:27 +02:00
Florent BEAUCHAMP
b60f5d593b feat(xo-web/remote): show encryption in remote UI (#6465)
* if remote is disabled we don't know the used algorithm : only show the lock if there is an encryption key
* if remote is enabled : 
  *  if algorithm is undefined or none : show nothing, remote is not encrypted
  * if algorithm is defined to DEFAULT_ENCRYPTION_ALGORITHM : show the lock with the name of the algorithm as a tooltip
  * else show the lock and a warning advising to create a new remote with an up to date algorithm
2022-10-24 16:15:26 +02:00
Gabriel Gunullu
2d4317b681 test(read-chunk): from Jest to test (#6478) 2022-10-24 14:38:56 +02:00
Julien Fontanet
caf0eb3762 chore(eslint): accepts Node 16 features in tests 2022-10-24 11:21:19 +02:00
Gabriel Gunullu
c1aa7b9d8a test(multi-key-map): from Jest to test (#6477) 2022-10-24 10:05:51 +02:00
Gabriel Gunullu
6c6efd9cfb test(disposable): from Jest to test and SinonJS (#6476) 2022-10-24 10:04:48 +02:00
Julien Fontanet
551670a8b9 fix(eslint): disable n/no-unpublished-{import,require} in tests 2022-10-24 09:53:55 +02:00
Gabriel Gunullu
ac75225e7d test(compose): from Jest to test (#6473) 2022-10-21 16:25:25 +02:00
Julien Fontanet
20dbbeb38e feat(npmignore): handle *.test.*js files
This naming scheme is used by `node:test` and its userland implementation `test`.
2022-10-20 17:00:49 +02:00
Julien Fontanet
37dea9980e fix(npmignore): handle .cjs and .mjs files 2022-10-20 16:58:30 +02:00
Gabriel Gunullu
5cec2d4cb0 test(coalesce-calls): from Jest to test (#6470) 2022-10-20 16:46:56 +02:00
Julien Fontanet
ed76fa5141 feat(predicates): not operator 2022-10-20 12:47:02 +02:00
Julien Fontanet
389a765825 fix(mixins/_parseBasicAuth): consider empty password as missing
This makes `username:` recognized as token, just like `username` is.

This fixes token-based authentication in HttpProxy with cURL.
2022-10-20 10:21:09 +02:00
Julien Fontanet
3bad40095a fix(mixins/Config#watch): first run even when undefined
Fixes issue introduced by d157fd352
2022-10-19 18:43:48 +02:00
Gabriel Gunullu
1a51c66028 fix(ci): GitHub actions workflow (#6463) 2022-10-19 12:00:46 +02:00
Florent BEAUCHAMP
05161bd4df feat(fs): use aes256-gcm encryption algorithm (#6447)
Fixes zammad#9788
2022-10-17 11:33:55 +02:00
Florent BEAUCHAMP
db1102750f feat(xo-web): label of vhd directory backup (#6459) 2022-10-13 16:45:03 +02:00
Julien Fontanet
42a974476f feat(@vates/otp): minimal HOTP/TOTP implementation (#6456) 2022-10-12 15:44:43 +02:00
Florent BEAUCHAMP
0dd91c1efe feat(nbd-client): first implementation (#6444) 2022-10-12 14:46:16 +02:00
Julien Fontanet
756d2fe4e7 feat(xo-server): make log transport configurable
See zammad#9799
2022-10-12 14:37:41 +02:00
Julien Fontanet
61c64b49c7 feat(log/configure): can instanciate transport from JSON 2022-10-12 14:37:41 +02:00
Julien Fontanet
c2eb68a31a chore(fuse-vhd): remove unused var/lib
Introduced by 46fe3be32
2022-10-11 16:41:12 +02:00
Julien Fontanet
f1a1b922c7 chore: format with Prettier 2022-10-11 16:40:10 +02:00
Cécile Morange
a2dcceb470 docs(installation): Debian 10 → 11 (#6458)
Signed-off-by: Cécile MORANGE - AtaxyaNetwork <contact@ataxya.net>
2022-10-10 17:29:32 +02:00
Julien Fontanet
1d78fdd673 chore: update dev deps 2022-10-10 15:55:05 +02:00
Julien Fontanet
4a53749ca0 fix(xo-server): Redis via socket
Introduced by 9fab15537

Fixes #6455
2022-10-10 11:35:12 +02:00
ggunullu
7f73ec52d6 fix(docker): Node version incompatible with babel-jest
Introduced by 8fd10bace7
2022-10-07 16:45:54 +02:00
ggunullu
4abb172976 fix(docker): add missing dependency libfuse2
Introduced by 46fe3be322
2022-10-07 16:45:54 +02:00
Julien Fontanet
c52e0a5531 docs(xapi): improve VM Sync Hook example server 2022-10-07 11:52:07 +02:00
Julien Fontanet
0197758780 fix(CHANGELOG): restore unreleased comments
Introduced by 3d3b63a59
2022-10-07 11:23:32 +02:00
Julien Fontanet
e2521b6688 fix(read-chunk): handle already ended stream 2022-10-06 15:20:28 +02:00
Cécile Morange
13f19de1a0 feat(xo-web): replace XenServer by XCP-ng (#6439)
Signed-off-by: Cécile MORANGE <contact@ataxya.net>
2022-10-06 10:52:06 +02:00
Florent Beauchamp
5e589019d0 fix(xo-web): do not throw an error when editing a non encrypted remote
Introduced by dca3f39
2022-10-05 11:08:14 +02:00
Florent Beauchamp
feaad13ac3 fix(xo-web): save region on S3 remote creation
Introduced by f755365
2022-10-05 11:08:14 +02:00
Florent Beauchamp
ab9428a9c4 fix(xo-remote-parser): correctly handle empty S3 region
Introduced by c219ea0
2022-10-05 11:08:14 +02:00
Julien Fontanet
c964a1471a fix(xo-server-transport-nagios): don\'t fail on message with line break
Fixes zammad#9800
2022-10-03 17:18:07 +02:00
Julien Fontanet
424322f7b7 feat(xo-server): 5.103.1 2022-09-30 15:38:40 +02:00
Julien Fontanet
956a4f8b2a feat: release 5.75.0 2022-09-30 14:14:26 +02:00
Florent BEAUCHAMP
d87210e903 fix(xo-server): fix running replication job (#6437)
Introduced by 2d6b827fd
2022-09-28 15:38:54 +02:00
Florent BEAUCHAMP
3d3b63a596 feat: technical release (#6436) 2022-09-26 16:42:47 +02:00
Pierre Donias
4f9636b4c3 fix(xo-web/home/VMs): do not delete VMs when confirm modal has been cancelled (#6435)
See Zammad#9735
Introduced by 11e09e1f87
2022-09-26 15:27:01 +02:00
Mathieu
74c8d56046 feat(xo-web/storage/NFS): ability to specify subdirectory (#6425)
Fixes #3919
2022-09-26 15:17:27 +02:00
Florent BEAUCHAMP
2d6b827fd2 fix(xo-server): ignore disabled remotes when running VM backup (#6430) 2022-09-26 14:50:30 +02:00
Julien Fontanet
f82eb8aeb4 feat(xapi/VM_{checkpoint,snapshot}): HTTP sync hook (#6423) 2022-09-26 12:23:51 +02:00
Mathieu
f1ab62524c fix(xo-web/SR): fix "VDIs to coalesce" in SR advanced tab (#6429)
See https://xcp-ng.org/forum/topic/6334/coalesce-not-showing-anymore/3
Introduced by a9c1239149
2022-09-21 16:21:23 +02:00
rajaa-b
ce78d22bb8 fix(xo-web/tasks): fix tasks being displayed to all users (#6422)
See zammad#9509
Introduced by e246c8ee47
2022-09-21 11:25:14 +02:00
rajaa-b
99a1dbeae1 fix(xo-web/tasks): fix tasks filter (#6424)
See zammad#9423
2022-09-21 11:02:03 +02:00
Julien Fontanet
2a71e28253 docs(backups): add cache for a VM 2022-09-20 14:54:47 +01:00
Florent BEAUCHAMP
46fe3be322 feat: implement file restore on top of FUSE instead of vhdimount (#6409)
It brings file restore to VhdDirectory (and related features like encryption and compression).
2022-09-20 11:04:24 +02:00
Florent BEAUCHAMP
9da65b6c7c feat(backups): write and merge block concurrency are now configurable (#6416) 2022-09-16 14:54:33 +02:00
Julien Fontanet
ad02700b51 fix(backups/RemoteAdapter#_getPartition): mount with norecovery option 2022-09-15 11:06:59 +02:00
Julien Fontanet
8fd10bace7 chore: update deps 2022-09-15 00:06:07 +02:00
Julien Fontanet
9d09a3adf5 feat(backups/deleteVmBackups): run cleanVm in parallel 2022-09-12 11:23:46 +02:00
Julien Fontanet
4350f0cd1a fix(backups/deleteVmBackups): don't fail on cleanVm error
Seen when investigating zammad#8842
2022-09-12 11:23:44 +02:00
Julien Fontanet
5dc993255c fix(backups/DeltaBackupWriter#checkBaseVdis): don't warn on missing dir 2022-09-11 14:04:24 +02:00
Julien Fontanet
e9188a9864 feat(proxy/api): more raw errors
Follow up of ae373c3e7
2022-09-11 13:40:27 +02:00
Julien Fontanet
42dd70c2f7 chore(backups/RemoteAdapter): add more cache related debug 2022-09-10 14:16:54 +02:00
Julien Fontanet
191c124130 feat(backups): update VM backups cache (#6411) 2022-09-10 14:16:29 +02:00
Julien Fontanet
2742f948c2 feat(CHANGELOG): move releases before 2022 in other file
Because the main CHANGELOG is getting too big to be displayed on GitHub.
2022-09-10 13:21:15 +02:00
Julien Fontanet
455a3ba677 fix(CHANGELOG): fix version of 5.74.2 and 5.74.3 2022-09-10 13:14:57 +02:00
Julien Fontanet
1961da9aed feat(xo-server): 5.102.3 2022-09-09 18:29:48 +02:00
Julien Fontanet
e82d9d7a74 fix(xo-server/isValidAuthenticationToken): call _getAuthenticationToken
Introduced by d52dcd070
2022-09-09 17:37:44 +02:00
Florent BEAUCHAMP
dfb3166bed fix(backups): add healthcheck to full backup (#6401) 2022-09-09 16:35:38 +02:00
Florent BEAUCHAMP
5a54f7f302 feat(backups/cleanVm): invalidate cache on backup deletion (#6402) 2022-09-09 16:27:12 +02:00
Julien Fontanet
6002a497fe feat(xo-server): 5.102.2 2022-09-09 15:50:37 +02:00
Julien Fontanet
3fa8b6332e chore: update to app-conf@2.3.0 2022-09-09 12:59:59 +02:00
Manon Mercier
1b521b87c5 docs(configuration): add certificates in title to ease search (#6212) 2022-09-09 10:44:32 +02:00
rajaa-b
8b7d2aab6b feat(xo-server#_startVm): add a message for 'NO_HOSTS_AVAILABLE' error (#6408) 2022-09-09 10:43:22 +02:00
Julien Fontanet
b0006f91f4 fix(xo-server/vm.import): allow additional props for data param
Fixes zammad#9521

Introduced by d7f29e736
2022-09-08 11:10:51 +02:00
Julien Fontanet
31aaa968ec docs(users/SAML): remove confusing warning
See zammad#9420
2022-09-07 09:59:11 +02:00
Mathieu
facb4593f0 feat: release 7.74.2 (#6407) 2022-09-06 15:04:27 +02:00
Mathieu
d1a30363b4 feat: patch release (#6406) 2022-09-06 14:12:46 +02:00
Julien Fontanet
eac5347f32 fix(CHANGELOG): remove CHANGELOG.unreleased comments 2022-09-06 11:56:00 +02:00
Julien Fontanet
2006665fe8 feat(CHANGELOG): release 5.74.1 2022-09-06 11:55:06 +02:00
Julien Fontanet
26a3862d61 chore(CHANGELOG.unreleased): clearer error message
Introduced by 2371109b6
2022-09-06 11:53:53 +02:00
Mathieu
2371109b6f fix(xo-server): handle unfetched VDIs in pool.$ha_statefiles (#6404)
Introduced by 4dc7575d5

Fixes zammad#9498
2022-09-06 11:37:13 +02:00
Mathieu
243bffebbd feat(xo-server-auth-saml): support multiline cert (#6403)
Fixes https://xcp-ng.org/forum/topic/6174/saml-auth-with-azure-ad/10
2022-09-06 10:55:02 +02:00
Julien Fontanet
e69ae7b0db chore(CHANGELOG): integrate released changes 2022-09-03 11:37:43 +02:00
Julien Fontanet
5aff7b94d8 feat(xo-web): 5.103.0 2022-09-03 11:37:10 +02:00
Julien Fontanet
a65058ddd5 feat(xo-server): 5.102.0 2022-09-03 11:36:28 +02:00
Julien Fontanet
b1e81d84c6 feat(@xen-orchestra/proxy): 0.26.1 2022-09-03 11:35:27 +02:00
Julien Fontanet
96e60f7e4f feat(@xen-orchestra/mixins): 0.8.0 2022-09-03 11:34:58 +02:00
Julien Fontanet
5e59c617e8 feat(vhd-lib): 4.0.1 2022-09-03 11:33:54 +02:00
Julien Fontanet
69ad0ade6e feat(@xen-orchestra/fs): 3.1.0 2022-09-03 11:32:23 +02:00
Julien Fontanet
37cdbc19ef fix(xo-web): fix signout
Introduced by 281a1cc54
2022-09-02 18:45:10 +02:00
Julien Fontanet
6cbce81faa feat(xo-server): respect disabled setting for VM console
See #6319
2022-09-02 11:32:57 +02:00
Julien Fontanet
8c14906a60 fix(xo-server-recover-account): connect Redis client (#6398) 2022-09-02 11:01:42 +02:00
Florent BEAUCHAMP
62591e1f6f fix(vhd-lib/merge): reduce concurrency to protect slower backends (#6400) 2022-09-02 11:00:53 +02:00
Julien Fontanet
ea4a888c5e fix(xo-server/vm.create): allow additional props for VDIs and existingDisks
Fixes https://xcp-ng.org/forum/post/52561
2022-09-02 10:26:38 +02:00
Julien Fontanet
281a1cc549 feat(xo-server): validate auth token on HTTP request 2022-09-01 17:19:30 +02:00
Julien Fontanet
d52dcd0708 feat(xo-server): validate auth token on HTTP request 2022-09-01 17:15:39 +02:00
Florent BEAUCHAMP
d8e01b2867 fix(fs/s3#copy): normalize error: no such key → ENOENT (#6388) 2022-09-01 12:51:44 +02:00
Florent BEAUCHAMP
dca3f39156 feat(xo-web): remote level encryption (#6321)
Co-authored-by: mathieuRA <contact@mathieu-raisin.fr>
2022-09-01 11:34:48 +02:00
Julien Fontanet
31e964fe0f fix(xo-server/backupNg.{create,edit,run}Job): allow settings other than concurrency
Introduced by d7f29e736
2022-09-01 00:25:12 +02:00
Julien Fontanet
39d973c43f fix(xo-server/api): allow additional props on objects without prop definitions
Fixes #6395

Introduced by d7f29e736
2022-09-01 00:24:12 +02:00
Julien Fontanet
55f921959d fix(xo-server/api/adaptJsonSchema): fix additionalProperties test
Introduced by d7f29e736
2022-09-01 00:22:49 +02:00
Julien Fontanet
6598090662 fix(xo-server/api): keep previous params format
Introduced by d7f29e736

Avoid breaking `xo-cli --list-commands`.
2022-09-01 00:21:59 +02:00
Julien Fontanet
d7f29e7363 chore(xo-server/api): use Ajv instead of schema-inspector
- standard JSON schema
- faster
- maintained

New implementation also pre-compile schemas which means that params validation for each call is faster and incorrect schemas are detected at startup.
2022-08-31 16:46:17 +02:00
Julien Fontanet
82df6089c3 chore: refresh yarn.lock 2022-08-31 15:59:04 +02:00
rajaa-b
80cc66964e feat(xo-web/proxies): ability to bind licence to existing proxy (#6348)
See Zammad#7457
2022-08-31 15:40:34 +02:00
Florent BEAUCHAMP
7883d38622 fix(vhd-lib/VhdDirectory/mergeBlock): write BAT on block creation (#6300) 2022-08-31 15:35:10 +02:00
Julien Fontanet
2cb5169b6d feat(fs/Local): stack traces v2 (#6363)
- better support of lock/release
- handle sync exceptions as well
- save stacks in `syncStack` instead of replacing existing ones
2022-08-31 15:30:08 +02:00
Julien Fontanet
9ad2c07984 feat: release 5.74.0 2022-08-31 15:09:17 +02:00
Mathieu
a9c1239149 feat(xo-server/xo-web/health): detect invalid vhd-parent VDIs (#6356) 2022-08-31 11:35:35 +02:00
Mathieu
cb1223f72e feat: technical release (#6387) 2022-08-30 15:36:30 +02:00
Mathieu
4dc7575d5b feat(xo-web/storage): display SR used for the HA state files (#6384)
Fixes #6339
2022-08-29 17:02:50 +02:00
Julien Fontanet
276d1ce60a feat(backups/Task): add original log to *log after end* error 2022-08-29 10:05:25 +02:00
Julien Fontanet
58ab32a623 feat(backups/_forkStreamUnpipe): add more debug 2022-08-26 10:49:52 +02:00
Julien Fontanet
c1846e6ff3 fix(xen-api/{get,put}Resource): add sync stack traces support
Follows 857a9f3ef
2022-08-25 17:06:37 +02:00
Julien Fontanet
826de17111 feat(backups/VmBackup#_callWriters): add more debug 2022-08-25 16:43:45 +02:00
Julien Fontanet
8a09ea8bc1 feat(backups/VmBackup#_callWriters): unify single/multiple code
The behavior should be the same even if there is a single writer
2022-08-25 16:43:18 +02:00
Florent Beauchamp
1297c925ad feat: server side of backup encryption 2022-08-23 12:04:16 +02:00
Julien Fontanet
74d15e1a92 chore: format with Prettier 2022-08-23 11:56:26 +02:00
Julien Fontanet
ae373c3e77 feat(proxy/api): returns raw errors
Similar to dd5e11e83
2022-08-23 11:08:25 +02:00
Pierre Donias
e9b90caa3a fix(complex-matcher): properly alias RegExp export as RegExpNode (#6375)
Fixes #6365
Introduced by 9ef2c7da4c
2022-08-22 10:44:07 +02:00
Florent BEAUCHAMP
b89e77a6a4 fix: various VHD related test fixes (#6302) 2022-08-12 16:23:59 +02:00
Florent Beauchamp
61691ac46b fix(vhd-lib/VhdDirectory#mergeBlock): fix rename condition
Introduced by fd752fee8
2022-08-12 16:18:25 +02:00
Florent Beauchamp
512b96af24 fix(backups/cleanVm): fix path of merge state
Introduced by ad149740b1
2022-08-12 16:07:44 +02:00
Mathieu
d369593979 fix(xo-web): from ignoreBackup to bypassBackupCheck (#6362)
Introduced by 837b06ef2b
2022-08-12 15:01:22 +02:00
Julien Fontanet
2f38e0564b fix(fs/Local#lock): correctly assign release
Introduced by 4bed4195a
2022-08-11 17:13:40 +02:00
Julien Fontanet
5e8dd4e4bc fix(vhd-lib/mergeVhdChain): inverse condition to use VhdSynthetic
Introduced by 76813737e
2022-08-11 17:04:23 +02:00
Julien Fontanet
8f9f1f566d fix(proxy/api): typo in Array#includes
Introduced by 08cdcf411
2022-08-11 14:20:57 +02:00
olegur
d7870b8860 small docs typo fix 2022-08-11 14:16:12 +02:00
Julien Fontanet
97fa23f890 chore(xo-web): use vm.convertToTemplate instead of alias vm.convert 2022-08-11 11:22:54 +02:00
Julien Fontanet
f839887da8 chore(xo-server/api): remove unused alias vdi.delete_ 2022-08-11 11:22:54 +02:00
Julien Fontanet
15bfaa15ca chore(xo-server/api): remove unused alias network.delete_ 2022-08-11 11:22:54 +02:00
Julien Fontanet
4a3183ffa0 chore(xo-server/api): remove unused method 2022-08-11 11:22:54 +02:00
Pierre Donias
18d03a076b fix(xo-web/backup/restore): don't use UNSAFE_componentWillReceiveProps (#6364)
Introduced by 7d6e832226

`UNSAFE_componentWillReceiveProps` method was only introduced in React 16 but we
are using React 15
`.eslintrc`: ask eslint to check the React version so that it doesn't suggest to
change it to `UNSAFE_componentWillReceiveProps`
2022-08-11 11:19:07 +02:00
Julien Fontanet
4bed4195ac feat(fs/Local#lock): attempt to reacquire in case of compromission
Related to zammad#8826
2022-08-10 17:41:16 +02:00
Julien Fontanet
a963878af5 fix(fs/Local#lock): never fail on release
Related to zammad#8826

Also, log properly if the log is compromised.
2022-08-10 17:32:36 +02:00
Julien Fontanet
d6c3dc87e0 feat(xo-server): avoid warning if client WS has been closed
Fixes part of zammad#8826
2022-08-10 16:53:38 +02:00
Julien Fontanet
5391a9a5ad chore(CHANGELOG.unreleased): bump fs in major
Introduced by b50e95802
2022-08-10 16:52:39 +02:00
Julien Fontanet
b50e95802c feat(fs): remove JS based SMB handler
It's not well tested nor maintained.
2022-08-10 16:28:05 +02:00
Julien Fontanet
75a9799e96 feat(xo-server): make http.listen config an object
It remains compatible with previous configurations.

It now aligns with xo-proxy's config and is easier to overload with other config files.
2022-08-10 14:05:16 +02:00
Julien Fontanet
dbb9e4d60f feat(fs/Local): add stack traces to native fs methods 2022-08-09 11:25:49 +02:00
Julien Fontanet
d27b6bd49d fix(xo-server/collection/redis#{add,update}): cast to string before inserting in db
Fixes https://xcp-ng.org/forum/post/51933
Fixes #6359

Introduced by 36b94f745
2022-08-07 13:27:25 +02:00
Julien Fontanet
c5d2726faa chore(xo-server/collection/redis): remove unused constructor param
Introduced by 36b94f745
2022-08-07 13:01:51 +02:00
Julien Fontanet
a2a98c490f feat(xo-server/db-cli): improve help message 2022-08-07 12:48:34 +02:00
Julien Fontanet
e2dc1d98f1 feat(xo-server/db-cli repl): now has a collection ready for each namespace 2022-08-07 12:46:52 +02:00
Julien Fontanet
658c26d3c9 fix(xo-server/collection/redis#{add,update}): fix ignore id field 2022-08-06 13:23:46 +02:00
Julien Fontanet
612095789a feat(xo-server/db-cli): repl command 2022-08-06 12:31:56 +02:00
Julien Fontanet
7418d9f670 fix(xo-server/collection/redis#{add,update}): save all fields
Fixes https://xcp-ng.org/forum/post/51916

Introduced by 36b94f745

`Redis#hSet` accepts an object instead of a sequence of key/value.

The previous commit corrupted the database by deleting all but one fields per added/updated objects.
2022-08-06 12:30:08 +02:00
Julien Fontanet
f344c58a62 feat(xo-server/db-cli): ensure errors are printed 2022-08-06 11:30:00 +02:00
Julien Fontanet
36b94f745d fix(xo-server): redis@4 usage
Introduced by 9fab15537
2022-08-05 16:53:22 +02:00
Julien Fontanet
08cdcf4112 feat(proxy/api): method results can be documented/validated 2022-08-05 16:15:21 +02:00
Julien Fontanet
76813737ef feat(vhd-cli/merge): replace mergeVhd by mergeVhdChain 2022-08-05 15:04:44 +02:00
Julien Fontanet
53d15d6a77 chore(vhd-lib/merge): remove unnecessary concat 2022-08-05 15:04:44 +02:00
Julien Fontanet
dd01b62b87 feat(vhd-lib/mergeVhd): no longer exported from the index
BREAKING CHANGE
2022-08-05 15:04:44 +02:00
Julien Fontanet
9fab15537b chore: update deps 2022-08-05 14:25:09 +02:00
Florent BEAUCHAMP
d87db05b2b feat: release 5.73.1 (#6352) 2022-08-04 17:47:50 +02:00
Florent BEAUCHAMP
f1f32c962c feat: technical release (#6351) 2022-08-04 16:05:29 +02:00
Florent Beauchamp
ad149740b1 feat(backups/cleanVm,vhd-lib): support resuming merge of VHD chains
The whole chain is now stored in the merge state.
2022-08-04 15:25:31 +02:00
Florent Beauchamp
9a4e938b91 fix(backups/cleanVm): fix parent/child order when resuming merge 2022-08-04 15:25:31 +02:00
Julien Fontanet
a226760b07 fix(xo-web/css): fix double slash in Font Awesome import
Fixes #6350
2022-08-04 10:23:06 +02:00
Yannick Achy
a11450c3a7 docs(xoa): NTP configuration (#6342)
Co-authored-by: yannick Achy <yannick.achy@vates.fr>
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2022-08-04 09:24:00 +02:00
Julien Fontanet
e0cab4f937 feat(vhd-lib/merge): augment errors with VHD paths 2022-08-03 16:41:59 +02:00
Julien Fontanet
468250f291 fix(vhd-lib/merge): fix mergeState.currentBlock
Fixes zammad#8794 and zammad#8168

Introduced by 97d94b795

When the concurrency is one (or race condition), `Math.min(...merging)` could be called with `merging` being empty.

This lead to a `NaN` value which, was stored as `null` in the JSON merge state.
2022-08-03 14:56:49 +02:00
Julien Fontanet
d04b93c17e feat(vhd-cli info): explicit header/footer objects 2022-08-03 14:49:14 +02:00
Florent BEAUCHAMP
911556a1aa fix(backups/cleanVm): the child, not the parent is linked to a backup (#6331) 2022-08-03 14:06:35 +02:00
Per-Ole
c7d3230eef feat(xo-server-auth-saml): allow customizing callback URL (#6278)
This will allow you to enter the full path of the callback URL. As stated in issue #6108 the SAML plugin wont work with Azure AD as the callback URL needs to be HTTPS. This was solved by @jens-rabe
2022-08-03 10:12:32 +02:00
Julien Fontanet
b63086bf09 fix(xo-web): use complex-matcher classes to build filters
Using strings directly breaks with special characters.
2022-08-02 21:36:41 +02:00
Florent BEAUCHAMP
a4118a5676 docs(backups): describes file hierarchy with VHD directories (#6337) 2022-08-02 11:12:24 +02:00
Julien Fontanet
26e7e6467c fix(xo-server,xo-web): prevent backup concurrency from being <=0
Fixes #6338
2022-07-31 20:04:47 +02:00
Julien Fontanet
1c9552fa58 docs(mixins/SslCertificate): don't recommends using staging
This confuses users and XO is not currently able to detect that this valid has changed and generate a new certificate.
2022-07-29 19:40:29 +02:00
Julien Fontanet
9875cb5575 docs(mixins/SslCertificate): explicits that cert/key are required 2022-07-29 19:26:33 +02:00
Julien Fontanet
d1c6bb8829 fix(mixins/SslCertificate): remove unnecessary warnings 2022-07-29 19:24:49 +02:00
Julien Fontanet
ef7005a291 fix(backups/cleanVms): remove useless log 2022-07-29 15:42:46 +02:00
Pierre Donias
8068b83ffe feat: release 5.73.0 (#6336) 2022-07-29 10:48:59 +02:00
Pierre Donias
f01a89710c feat: technical release (#6335) 2022-07-29 10:08:26 +02:00
Julien Fontanet
38ced81ada fix(backups,backups-cli): correctly pass loggers to cleanVm
Introduced by c0b0ba433
2022-07-29 08:54:31 +02:00
Julien Fontanet
9834632d59 fix(vhd-lib/merge): delete merge state after cleanup
Related to https://xcp-ng.org/forum/post/51529

In case the clean-up fails, the merge will be retried in the future.
2022-07-28 21:39:36 +02:00
Julien Fontanet
bb4504dd50 fix(vhd-lib/merge): don't delete children if renaming parent failed
Related to https://xcp-ng.org/forum/post/51529
2022-07-28 21:39:36 +02:00
Julien Fontanet
8864c2f2db fix(backups/cleanVm): wait for unreferenced VHDs to be deleted
Introduced by 249f63849
2022-07-28 21:26:22 +02:00
Julien Fontanet
19208472e6 feat(backups/cleanVm): improve log messages
Introduced byc0b0ba433
2022-07-28 21:26:22 +02:00
Florent BEAUCHAMP
10c77ba3cc feat(mixins/SslCertificate): Let's Encrypt support (#6320) 2022-07-28 18:13:12 +02:00
Pierre Donias
cd28fd4945 feat: technical release (#6332) 2022-07-28 15:57:40 +02:00
Mathieu
6778d6aa4a fix(xo-web/VM): display a confirmation modal to bypass blockedOperation (#6295) 2022-07-28 15:01:22 +02:00
Pierre Donias
433851d771 fix(xo-server/xapi-object-to-xo): handle guest metrics reporting empty IP field (#6328)
See https://xcp-ng.org/forum/topic/4810/netbox-plugin-error-ipaddr-the-address-has-neither-ipv6-nor-ipv4-format/27?_=1658735770330
2022-07-27 12:03:22 +02:00
Julien Fontanet
d157fd3528 feat(mixins/HttpProxy): enable by default 2022-07-26 10:59:37 +02:00
Julien Fontanet
9150823c37 fix(xo-server/unregisterProxy): don't try to unbind license if method unavailable 2022-07-25 18:05:08 +02:00
Julien Fontanet
07c3a44441 fix(xo-server/registerProxy): db.add does not return a model
Introduced by 8a71f8473
2022-07-25 18:01:30 +02:00
Julien Fontanet
051bbf9449 fix(xo-server/callProxyMethod): use stored address before XAPI ones 2022-07-25 17:58:40 +02:00
Julien Fontanet
22ea1c0e2a fix(xo-server/proxy.update): return proxy with its URL 2022-07-25 17:54:31 +02:00
Julien Fontanet
6432a44860 chore(fs/createOutputStream): remove deprecated method
Deprecated since 407586e2d
2022-07-25 16:00:42 +02:00
Julien Fontanet
493d861de3 chore(xo-server,proxy): fix linting errors 2022-07-25 13:26:26 +02:00
Julien Fontanet
82452e9616 feat(xo-server/RestApi): add raw VDI import 2022-07-21 16:28:22 +02:00
Julien Fontanet
2fbeaa618a fix(xapi/SR_importVdi): import as VDH, not raw
Fixes #6327
2022-07-21 15:18:48 +02:00
Julien Fontanet
6c08afaa0e fix(xapi/VDI_importContent): format is not optional 2022-07-21 15:18:48 +02:00
Julien Fontanet
af4cc1f574 fix(xo-cli): extract ws error message
Fixes #6022
2022-07-21 12:36:58 +02:00
Julien Fontanet
2fb27b26cd feat(xo-server): refresh HTTP proxy on config change 2022-07-21 10:42:30 +02:00
Pierre Donias
11e09e1f87 fix(xo-web/home/vm): show error toaster when deleting VMs failed (#6323) 2022-07-21 09:42:16 +02:00
Julien Fontanet
9ccb5f8aa9 feat(xo-server): inject proxy in env (#6322)
Fixes zammad#8073

Related to #6320

- brings `no_proxy` supports
- implicit supports for other libs
2022-07-20 15:27:57 +02:00
Pierre Donias
af87d6a0ea docs(contributing): update contribution steps (#6318)
See https://xcp-ng.org/forum/topic/6070/netbox-plugin-enhancements
2022-07-11 17:01:57 +02:00
Julien Fontanet
d847f45cb3 feat: release 5.72.1 2022-07-11 10:37:01 +02:00
Julien Fontanet
38c615609a feat(xo-web): 5.100.0 2022-07-11 10:36:03 +02:00
Julien Fontanet
144cc4b82f feat(xo-server-audit): 0.10.0 2022-07-11 10:36:03 +02:00
Julien Fontanet
d24ab141e9 feat(xo-server): 5.98.1 2022-07-11 10:36:03 +02:00
Julien Fontanet
8505374fcf feat(@xen-orchestra/proxy): 0.23.5 2022-07-11 10:36:03 +02:00
Julien Fontanet
e53d961fc3 feat(@xen-orchestra/backups-cli): 0.7.5 2022-07-11 10:36:03 +02:00
Julien Fontanet
dc8ca7a8ee feat(vhd-lib): 3.3.2 2022-07-11 10:36:03 +02:00
Julien Fontanet
3d1b87d9dc feat(@xen-orchestra/backups): 0.27.0 2022-07-11 10:35:58 +02:00
Julien Fontanet
01fa2af5cd chore: refresh yarn.lock 2022-07-11 10:11:00 +02:00
Julien Fontanet
20a89ca45a feat(xo-server-audit): ignore more methods 2022-07-09 10:41:36 +02:00
Julien Fontanet
16ca2f8da9 fix(xo-web/vm/console): SSH/RDP URLs
Introduced by 2b0f1b6aa and e9f82558e.
2022-07-07 17:06:49 +02:00
Florent BEAUCHAMP
30fe9764ad fix(backups,vhd-lib): merge with VhdSynthetic (#6317) 2022-07-07 16:57:15 +02:00
rajaa-b
e246c8ee47 fix(xo-web/tasks): tasks with no bound objects not displayed (#6315)
See https://xcp-ng.org/forum/topic/6038/not-seeing-tasks-any-more-as-admin
Introduced by dae37c6a50
2022-07-06 10:52:37 +02:00
Julien Fontanet
ba03a48498 chore(xo-server): update to hashy@0.11.1
Fixes https://xcp-ng.org/forum/post/50866

Introduced by 49890a09b7

Fixes argon2id support which is the new default algorithm since argon2@0.28.7
2022-07-06 09:44:48 +02:00
Julien Fontanet
b96dd0160a feat(async-each): change default concurrency to 10
BREAKING CHANGE
2022-07-05 12:00:07 +02:00
Julien Fontanet
49890a09b7 chore: update dev deps 2022-07-05 11:13:50 +02:00
Julien Fontanet
dfce56cee8 feat(async-each): add basic JsDoc typing 2022-07-04 17:37:52 +02:00
Julien Fontanet
a6fee2946a feat(async-each): concurrency 0 means no limit
It's identical to `Infinity` but has broader support (e.g. in JSON).
2022-07-04 17:22:47 +02:00
Julien Fontanet
34c849ee89 fix(vhd-lib/VhdAbstract#readBlock): return type 2022-07-04 10:57:44 +02:00
Mathieu
c7192ed3bf feat(xo-web): display maintenance mode badge next to the SR name (#6313) 2022-07-01 16:22:45 +02:00
Julien Fontanet
4d3dc0c5f7 feat: release 5.72.0 2022-06-30 16:47:32 +02:00
Julien Fontanet
9ba4afa073 chore(CHANGELOG): integrate released changes 2022-06-30 15:49:25 +02:00
Julien Fontanet
3ea4422d13 feat(xo-web): 5.99.0 2022-06-30 15:47:22 +02:00
Julien Fontanet
de2e314f7d feat(xo-server): 5.98.0 2022-06-30 15:46:58 +02:00
Julien Fontanet
2380fb42fe feat(@xen-orchestra/proxy): 0.23.4 2022-06-30 15:46:14 +02:00
Julien Fontanet
95b76076a3 feat(xo-remote-parser): 0.9.1 2022-06-30 15:45:29 +02:00
Julien Fontanet
b415d4c34c feat(vhd-lib): 3.3.1 2022-06-30 15:44:21 +02:00
Julien Fontanet
2d82b6dd6e feat(@xen-orchestra/xapi): 1.4.0 2022-06-30 15:38:10 +02:00
Mathieu
16b1935f12 feat(xo-server,xo-web/SR): display maintenance mode button (#6308)
Fixes #6215
2022-06-30 15:31:28 +02:00
Florent BEAUCHAMP
50ec614b2a feat(xo-web/remotes): ability to set useVhdDirectory in remote params (#6273) 2022-06-30 15:28:42 +02:00
rajaa-b
9e11a0af6e feat(xapi/VM_import): translate checksum error (#6304) 2022-06-30 12:08:36 +02:00
Florent BEAUCHAMP
0c3e42e0b9 fix(vhd-lib): fix VhdDirectory merge on non-S3 remote (#6310) 2022-06-30 11:40:21 +02:00
Julien Fontanet
36b31bb0b3 chore(vhd-lib/merge): minor comment improvement 2022-06-29 15:29:20 +02:00
Mathieu
c03c41450b feat: technical release (#6311) 2022-06-29 15:27:14 +02:00
Florent BEAUCHAMP
dfc2b5d88b feat(Backup): use vhd directory setting of remote (#6303) 2022-06-29 10:51:13 +02:00
Florent BEAUCHAMP
87e3e3ffe3 fix(xo-remote-parser): properly handle undefined options (#6309) 2022-06-29 10:26:50 +02:00
Rajaa.BARHTAOUI
dae37c6a50 feat(xo-web/tasks): show tasks for Self Service users (#6217)
See zammad#5436
2022-06-28 18:35:58 +02:00
Mathieu
c7df11cc6f feat(xo-web/user): user tokens management through XO interface (#6276) 2022-06-28 17:57:59 +02:00
Julien Fontanet
87f1f208c3 feat(vhd-cli): 0.8.0 2022-06-28 16:52:27 +02:00
Julien Fontanet
ba8c5d740e feat(vhd-cli info): list method with multiple VHDs 2022-06-27 16:24:43 +02:00
Julien Fontanet
c275d5d999 chore(vhd-cli): remove build step 2022-06-27 16:24:43 +02:00
Mathieu
cfc53c9c94 feat(xo-web/proxies): copy proxy URL (#6287) 2022-06-27 15:41:32 +02:00
Julien Fontanet
87df917157 feat(vhd-lib/merge): human readable UUID check
Introduced by a1bcd35e2
2022-06-27 14:10:15 +02:00
Julien Fontanet
395d87d290 chore(xo-common): remove build step 2022-06-23 17:24:54 +02:00
Julien Fontanet
aff8ec08ad chore(template): remove build step 2022-06-23 17:24:54 +02:00
Julien Fontanet
4d40b56d85 fix(xo-server/file restore): ignore non-regular files/dirs (#6305)
Fixes zammad#7648

This also ignore (broken and valid) symlinks.
2022-06-23 16:37:56 +02:00
Julien Fontanet
667d0724c3 docs(configuration/custom ca): fix systemd path
Introduced by 03a66e469
2022-06-22 11:32:24 +02:00
Julien Fontanet
a49395553a docs(configuration/custom ca): fix systemd path
Introduced by 03a66e469
2022-06-22 11:30:09 +02:00
Julien Fontanet
cce09bd9cc docs(configuration/custom ca): add note regarding XO Proxy 2022-06-22 10:44:25 +02:00
Julien Fontanet
03a66e4690 docs(configuration/custom ca): use separate systemd file
This is better as it avoids conflicts with existing config and is compatible with the way XO Proxy service is handled.
2022-06-22 10:44:25 +02:00
Florent BEAUCHAMP
fd752fee80 feat(backups,vhd-lib): implement copyless merge (#6271) 2022-06-22 10:36:57 +02:00
Julien Fontanet
8a71f84733 chore(xo-server): remove Model wrapping 2022-06-22 10:10:39 +02:00
Julien Fontanet
9ef2c7da4c chore(complex-matcher): remove build step 2022-06-22 09:55:59 +02:00
Julien Fontanet
8975073416 fix(xapi): add missing file
Introduced by b12c17947

Thanks @Danp2.
2022-06-22 00:07:32 +02:00
Julien Fontanet
d1c1378c9d feat(xo-server-db): minimal CLI to browser the DB 2022-06-21 18:11:44 +02:00
Julien Fontanet
7941284a1d feat(xo-server/collection/Redis): set of all indexes 2022-06-21 17:47:56 +02:00
Julien Fontanet
af2d17b7a5 feat(xo-server/collection/Redis): set of all namespaces 2022-06-21 17:29:19 +02:00
Julien Fontanet
3ca2b01d9a feat(xo-server/collection/Redis): assert namespace doesnt contain _ or : 2022-06-21 17:24:10 +02:00
Julien Fontanet
67193a2ab7 chore(xo-server/collection/Redis): replace prefix by namespace 2022-06-21 17:23:25 +02:00
Julien Fontanet
9757aa36de chore(xo-server/collection/Redis): _id field was never used 2022-06-21 17:23:18 +02:00
Julien Fontanet
29854a9f87 feat(xo-server): new sr.{enable,disable}MaintenanceMode methods 2022-06-21 15:07:09 +02:00
Julien Fontanet
b12c179470 feat(xapi): new SR_{enable,disable}MaintenanceMode methods 2022-06-21 15:07:09 +02:00
Julien Fontanet
bbef15e4e4 feat(xo-server/proxy.get{,All}); return associated URL(s) (#6291) 2022-06-21 11:33:25 +02:00
Florent BEAUCHAMP
c483929a0d fix(ova import): drain disk entry completly (#6284) 2022-06-20 16:09:20 +02:00
Julien Fontanet
1741f395dd chore(xo-server/deleteAuthenticationTokens): optimization
Don't use xo-server/deleteAuthenticationToken to avoid fetching the records twice.
2022-06-19 11:37:42 +02:00
Julien Fontanet
0f29262797 chore(value-matcher): remove build step 2022-06-19 11:28:11 +02:00
Julien Fontanet
31ed477b96 feat(xo-server/token.delete): available for non-admins 2022-06-17 11:59:29 +02:00
Julien Fontanet
9e5de5413d feat(xo-server/Collection#remove): accept a pattern 2022-06-17 11:59:29 +02:00
Florent BEAUCHAMP
0f297a81a4 feat(xo-remote-parser): additional parameters in URL (#6270) 2022-06-16 23:14:34 +02:00
Mathieu
89313def99 fix(xapi/vm): throw forbiddenOperation on blockedOperation (#6290) 2022-06-16 14:39:20 +02:00
Julien Fontanet
8e0be4edaf feat(xo-server/vm.set): blockedOperations now accepts string reasons and null
Related to #6290
2022-06-16 10:16:43 +02:00
Julien Fontanet
a8dfdfb922 fix(event-listeners-manager/add): _listeners is a Map 2022-06-15 14:37:38 +02:00
Julien Fontanet
f096024248 chore(event-listeners-manager): add tests 2022-06-15 14:37:31 +02:00
Julien Fontanet
4f50f90213 feat(xo-server/token.create): minimum duration is now one minute
This change also handles negative or zero invalid durations.
2022-06-15 11:26:32 +02:00
Julien Fontanet
4501902331 feat(xo-server): XO Proxy channel based on current channel (#6277) 2022-06-15 10:42:57 +02:00
Julien Fontanet
df19679dba fix(xo-cli): close connection when finished 2022-06-15 10:25:06 +02:00
Julien Fontanet
9f5a2f67f9 fix(xo-cli): xdg-basedir import
Introduced by 2d5c40632
2022-06-15 10:22:39 +02:00
Julien Fontanet
2d5c406325 chore: update dev deps 2022-06-13 19:33:09 +02:00
Julien Fontanet
151b8a8940 feat(read-chunk): add readChunkStrict 2022-06-13 12:01:02 +02:00
Julien Fontanet
cda027b94a docs(read-chunk): behavior when stream has ended 2022-06-13 11:22:42 +02:00
Julien Fontanet
ee2117abf6 chore(CHANGELOG.unreleased): pkgs list should be ordered
See https://team.vates.fr/vates/pl/1q6or14b9jffjfxk9qyebfg6sh
2022-06-13 11:22:08 +02:00
Thierry Goettelmann
6e7294d49f feat: release 5.71.1 (#6285) 2022-06-13 11:06:36 +02:00
Manon Mercier
062e45f697 docs(backup/troubleshooting): add no XAPI associated error (#6279)
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2022-06-13 10:07:20 +02:00
Julien Fontanet
d18b39990d feat(xo-server/api): introduce a global async API context (#6274)
This allows access to contextual data deep inside the call stack.

Example use cases:
- current user
- specific permission (e.g. read only token)
- current authentication token
2022-06-13 09:43:39 +02:00
Julien Fontanet
7387ac2411 fix(xo-server/disk.import): fix xapi._getOrWaitObject call
Maybe related to #6282

Introduced by 5063a6982
2022-06-10 17:34:33 +02:00
Thierry Goettelmann
4186592f9f feat: technical release (#6281) 2022-06-10 17:05:04 +02:00
Thierry Goettelmann
6c9d5a72a6 feat(xo-web/backup): show cleanVm logs only in case of warnings (#6280) 2022-06-09 22:07:29 +02:00
Julien Fontanet
83690a4dd4 fix(xo-server/_importOvaVm): fix VM creation
Fixes https://xcp-ng.org/forum/post/49920

Introduced by 2af5328a0f
2022-06-09 18:51:35 +02:00
Florent BEAUCHAMP
c11e03ab26 fix(xo-vmdk-to-vhd/generateVmdkData): don't use VM name as OVF filename
It might break the OVA depending on present characters.
2022-06-09 17:18:30 +02:00
Florent BEAUCHAMP
c7d8709267 fix(xo-vmdk-to-vhd/generateVmdkData): reduce compression level
The max value (9) is very slow and should be avoided.
2022-06-09 17:18:30 +02:00
Florent BEAUCHAMP
6579deffad fix(xo-server): don't create zombie task on OVA export
Introduced by 4b9db257f
2022-06-09 17:18:30 +02:00
Julien Fontanet
e2739e7a4b fix(xo-server): make auth tokens created_at/expiration numbers 2022-06-09 16:15:14 +02:00
Florent BEAUCHAMP
c0d587f541 fix(backups): task warning if beforeBackup or checkBaseVdis steps fail (#6266) 2022-06-09 14:39:25 +02:00
Florent BEAUCHAMP
05a96ffc14 fix(xo-web): handle missing result of broken merge tasks in backup logs (#6275) 2022-06-09 14:14:26 +02:00
Julien Fontanet
32a47444d7 feat(proxy-cli): new --url flag
Which can be used instead of `--host` and `--token`.
2022-06-09 13:38:06 +02:00
Julien Fontanet
9ff5de5f33 feat(xo-server): expose _xapiRef to the API
Fixes zammad#7439

This makes objects searchable by their opaque ref in the UI.
2022-06-09 09:52:17 +02:00
Julien Fontanet
09badf33d0 feat(docs/configuration): use NODE_EXTRA_CA_CERTS instead of --use-openssl-ca (#6226)
Fixes zammad#6310

Easier to use and compatible with more distributions.
2022-06-09 09:08:16 +02:00
Julien Fontanet
1643d3637f chore(xo-server/api): remove unused api from context 2022-06-08 22:52:24 +02:00
Julien Fontanet
b962e9ebe8 fix(xo-server/system.methodSignature): declare expected params 2022-06-08 22:52:03 +02:00
Julien Fontanet
66f3528e10 fix(xapi/VM_snapshot): handle undefined VM.VUSBs
Fixes zammad#7401
2022-06-08 16:29:27 +02:00
Julien Fontanet
a5e9f051a2 docs(REST API): content-type is no longer necessary with -T
Because it is no longer set by default to `application/x-www-form-urlencoded` like it was with `--data-binary`.
2022-06-07 23:46:14 +02:00
Julien Fontanet
63bfb76516 docs(REST API): use -T instead of --data-binary for cURL
Because `--data-binary` loads the whole data in memory which isn't compatible with big data like a VHD file, whereas `-T` streams the data to the server.
2022-06-07 23:38:05 +02:00
tkrafael
f88f7d41aa fix(xen-api/putResource): use agent for both requests (#6261)
Fixes #6260
2022-06-07 19:33:33 +02:00
Julien Fontanet
877383ac85 fix(xo-server/sr.createExt): fix SR_create call
Introduced by 052126613
2022-06-07 18:59:30 +02:00
Julien Fontanet
dd5e11e835 feat(xo-server/api): don't filters error sent to admin users (#6262)
Previous behavior was hiding all errors not explicitly dedicated to be sent to API users and replacing them with an *unknown error from the peer*.

This was done to avoid leaking sensitive information, but it often hides important info.

Administrators can already see the raw errors in Settings/Logs, therefore it makes sense to not hide them for these users.
2022-06-07 13:34:34 +02:00
Julien Fontanet
3d43550ffe feat(xo-cli): provide authentication token description 2022-06-07 10:57:28 +02:00
Julien Fontanet
115bc8fa0a feat(xo-server): authentication tokens can have a description 2022-06-07 10:57:26 +02:00
Julien Fontanet
15c46e324c feat(xo-server/api): new user.getAuthenticationTokens 2022-06-07 10:04:45 +02:00
Julien Fontanet
df38366066 fix(xo-server/collection/redis#get): correctly filter on properties when id is provided 2022-06-07 10:04:14 +02:00
Julien Fontanet
28b13ccfff fix(xo-server/collection/redis#get): don't mutate properties param 2022-06-07 09:57:25 +02:00
Julien Fontanet
26a433ebbe feat(xo-server/createAuthenticationToken): add created_at field 2022-06-07 09:20:34 +02:00
Julien Fontanet
1902595190 feat(xo-server/getAuthenticationTokensForUser): filter and remove expired tokens 2022-06-07 09:15:30 +02:00
Julien Fontanet
80146cfb58 feat(xo-server/proxies): expose auth tokens
First step to show expose them in the UI, to make XO Proxies easier to use as HTTP proxies.
2022-06-07 09:02:46 +02:00
Yannick Achy
03d2d6fc94 docs(backups): explain HTTP timeout error and auto power on behavior (#6263)
Co-authored-by: Jon Sands <fohdeesha@gmail.com>
2022-06-05 12:21:39 +02:00
Julien Fontanet
379e4d7596 chore(xo-server): use @xen-orchestra/xapi/VBD_unplug 2022-06-02 17:08:22 +02:00
Julien Fontanet
9860bd770b chore(xo-server): use @xen-orchestra/xapi/VBD_destroy 2022-06-02 17:07:18 +02:00
Julien Fontanet
2af5328a0f chore(xo-server): use @xen-orchestra/xapi/VM_create 2022-06-02 17:02:10 +02:00
Julien Fontanet
4084a44f83 chore(xo-server): use @xen-orchestra/xapi/VDI_exportContent 2022-06-02 16:57:21 +02:00
Julien Fontanet
ba7c7ddb23 chore(xo-server): use @xen-orchestra/xapi/VDI_importContent 2022-06-02 16:54:23 +02:00
Julien Fontanet
2351e7b98c chore(xo-server): use @xen-orchestra/xapi/VBD_create 2022-06-02 16:37:49 +02:00
Julien Fontanet
d353dc622c fix(xapi/VBD_create): don't fail if the VBD could not be plugged
Otherwise, the creation method would have failed but the VBD would still exist, violating the principle of least surprise.
2022-06-02 16:26:29 +02:00
Julien Fontanet
3ef6adfd02 feat(xapi/VBD_create): returns the new VBD's ref 2022-06-02 16:25:19 +02:00
Julien Fontanet
5063a6982a chore(xo-server): use @xen-orchestra/xapi/VDI_create 2022-06-02 16:10:16 +02:00
Julien Fontanet
0008f2845c feat(xapi/VDI_create): move sm_config in second param
Similarly to other creation methods, properties that must be explicited are passed in second param.
2022-06-02 14:45:57 +02:00
Julien Fontanet
a0994bc428 fix(scripts/gen-deps-list.js): add missing await
Introduced by a0836ebdd
2022-06-01 16:51:31 +02:00
Julien Fontanet
8fe0d97aec fix(scripts/gen-deps-list.js): fix packages order (#6259)
`deptree` nodes should be added only once with the full list of their dependencies.

For better display, packages are sorted by names before resolving the graph for nicer display.
2022-06-01 16:07:36 +02:00
Julien Fontanet
a8b3c02780 chore(CHANGELOG): integrate released changes 2022-06-01 15:56:01 +02:00
Julien Fontanet
f3489fb57c feat(xo-web): 5.97.1 2022-06-01 15:51:16 +02:00
Julien Fontanet
434b5b375d feat(xo-server): 5.95.0 2022-06-01 15:51:16 +02:00
Julien Fontanet
445120f9f5 feat(@xen-orchestra/proxy): 0.23.1 2022-06-01 15:51:16 +02:00
Julien Fontanet
71b11f0d9c feat(@xen-orchestra/xapi): 1.1.0 2022-06-01 15:51:16 +02:00
Julien Fontanet
8297a9e0e7 feat(@xen-orchestra/fs): 1.0.3 2022-06-01 15:51:16 +02:00
Florent BEAUCHAMP
4999672f2d fix(xo-web/backups): scheduled health check is available to enterprise (#6257)
Introduced by cae3555ca
2022-06-01 15:36:36 +02:00
Thierry Goettelmann
70608ed7e9 fix(scripts/gen-deps-lists.js): various fixes 2022-06-01 14:04:41 +02:00
Julien Fontanet
a0836ebdd7 feat(scripts/gen-deps-list.js): test mode (#6258) 2022-06-01 13:53:56 +02:00
Florent BEAUCHAMP
2b1edd1d4c feat: always log and display full remote errors (#6216)
Co-authored-by: Julien Fontanet <julien.fontanet@isonoe.net>
2022-05-31 17:30:27 +02:00
Thierry Goettelmann
42bb7cc973 feat: release 5.71.0 (#6256) 2022-05-31 16:20:41 +02:00
Julien Fontanet
8299c37bb7 fix(xo-server/pool.rollingUpdate): duplicate poolId declaration
Introduced by 7a2005c20
2022-05-31 14:32:13 +02:00
Mathieu
7a2005c20c feat(xo-server/pool): disable scheduled job when starting RPU (#6244)
See zammad#5377, zammad#5333
2022-05-31 11:59:52 +02:00
Pierre Donias
ae0eb9e66e fix(xo-web/health): make "Too many snapshots" table sortable by number of snaphots (#6255)
See zammad#6439
2022-05-31 11:45:11 +02:00
Julien Fontanet
052126613a feat(xapi,xo-server): create SRs with other_config.auto-scan=true (#6246)
Fixes https://team.vates.fr/vates/pl/nf18hnr51f8f3f3brcbra57uar
2022-05-31 11:24:15 +02:00
l-austenfeld
7959657bd6 fix(xo-server/xapi): missing shutdownHost default parameter (#6253)
Add a default empty object parameter to enable calls to shutdownHost with only one parameter.
This implicitly fixes the density load-balancer, since it calls shutdownHost with only one parameter.
2022-05-31 10:01:47 +02:00
Thierry Goettelmann
9f8bb376ea feat: technical release (#6254) 2022-05-30 17:45:59 +02:00
Julien Fontanet
ee8e2fa906 docs(REST API): use | cat trick in VDI import example 2022-05-30 16:51:35 +02:00
Julien Fontanet
33a380b173 docs(REST API): add name_label param in VDI import example 2022-05-30 16:50:36 +02:00
Julien Fontanet
6e5b6996fa docs(REST API): add required content-type in VM import 2022-05-30 16:48:01 +02:00
Julien Fontanet
6409dc276c docs(REST API): don't use --progress-bar in VDI import example
This is not necessary and more in line with other examples.
2022-05-30 16:46:54 +02:00
Julien Fontanet
98f7ce43e3 feat(xo-server/RestApi): VDI import now returns the new VDI's UUID 2022-05-30 16:45:41 +02:00
Julien Fontanet
aa076e1d2d chore(xo-server/rest-api): use xen-api shorthand syntax 2022-05-30 16:23:39 +02:00
Julien Fontanet
7a096d1b5c chore(xo-server/rest-api): remove unnecessary awaits 2022-05-30 16:00:43 +02:00
Julien Fontanet
93b17ccddd chore(xo-server/api/vm): format with Prettier
Introduced by d7d81431e
2022-05-30 16:00:43 +02:00
Julien Fontanet
68c118c3e5 fix(xo-server/api/vm): add missing quote
Introduced by d7d81431e
2022-05-30 16:00:43 +02:00
Thierry Goettelmann
c0b0ba433f feat(backups,xo-web): add cleanVm warnings to task (#6225) 2022-05-30 15:39:54 +02:00
Thierry Goettelmann
d7d81431ef feat(xo-server/vm.migrate): call VM.assert_can_migrate before (#6245)
Fixes #5301
2022-05-30 15:04:12 +02:00
Pierre Donias
7451f45885 fix(xo-web/home): don't make VM's resource set name clickable for non-admins (#6252)
See https://xcp-ng.org/forum/topic/5902/permissions-for-users-to-be-able-to-snapshot/5?_=1653902135402

Non-admin users aren't allowed to view the Self Service page so it doesn't make
sense to have a link to that page
2022-05-30 15:02:03 +02:00
Florent BEAUCHAMP
c9882001a9 fix(xo-web,xo-server): store health check settings in job instead of schedule (#6251)
Introduced by cae3555ca7
2022-05-30 14:56:28 +02:00
Mathieu
837b06ef2b feat(xo-server/xo-web/pool): avoid RPU/ host reboot, shutdown / host agent reboot during backup (#6232)
See zammad#5377
2022-05-30 11:13:13 +02:00
Julien Fontanet
0e49150b8e feat(xo-server/RestApi): add VDI import
Related to zammad#7036
2022-05-29 20:48:59 +02:00
Julien Fontanet
0ec5f4bf68 chore(proxy,xo-server): update to http-server-plus@0.11.1
This new version fixes, among others, the support of the `Expect: 100-Continue` HTTP header, which is notably used by cURL during `POST`.
2022-05-29 20:44:00 +02:00
Julien Fontanet
601730d737 feat(xapi): new SR_importVdi()
Creates a new VDI on an SR from a VHD.
2022-05-29 20:44:00 +02:00
Julien Fontanet
28eb4b21bd fix(xo-server/disk.import): VHD import
Introduced by 0706e6f4ff
2022-05-29 14:09:08 +02:00
Julien Fontanet
a5afe0bca1 feat(vhd-lib/peekFooterFromStream): check checksum and content 2022-05-29 14:07:48 +02:00
558 changed files with 32677 additions and 12345 deletions

View File

@@ -28,8 +28,10 @@ module.exports = {
},
},
{
files: ['*.spec.{,c,m}js'],
files: ['*.{spec,test}.{,c,m}js'],
rules: {
'n/no-unpublished-require': 'off',
'n/no-unpublished-import': 'off',
'n/no-unsupported-features/node-builtins': [
'error',
{

View File

@@ -6,7 +6,10 @@ labels: 'status: triaging :triangular_flag_on_post:, type: bug :bug:'
assignees: ''
---
**XOA or XO from the sources?**
1. ⚠️ **If you don't follow this template, the issue will be closed**.
2. ⚠️ **If your issue can't be easily reproduced, please report it [on the forum first](https://xcp-ng.org/forum/category/12/xen-orchestra)**.
Are you using XOA or XO from the sources?
If XOA:
@@ -15,6 +18,7 @@ If XOA:
If XO from the sources:
- Provide **your commit number**. If it's older than a week, we won't investigate
- Don't forget to [read this first](https://xen-orchestra.com/docs/community.html)
- As well as follow [this guide](https://xen-orchestra.com/docs/community.html#report-a-bug)
@@ -38,8 +42,6 @@ If applicable, add screenshots to help explain your problem.
**Environment (please provide the following information):**
- Node: [e.g. 16.12.1]
- xo-server: [e.g. 5.82.3]
- xo-web: [e.g. 5.87.0]
- hypervisor: [e.g. XCP-ng 8.2.0]
**Additional context**

View File

@@ -4,7 +4,6 @@ about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**

View File

@@ -1,13 +1,12 @@
name: CI
on: [push]
on: push
jobs:
build:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- run: docker-compose -f docker/docker-compose.dev.yml build
- run: docker-compose -f docker/docker-compose.dev.yml up
- name: Checkout
uses: actions/checkout@v3
- name: Build docker image
run: docker-compose -f docker/docker-compose.dev.yml build
- name: Create the container and start the tests
run: docker-compose -f docker/docker-compose.dev.yml up --exit-code-from xo

2
.gitignore vendored
View File

@@ -10,8 +10,6 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat

4
.husky/pre-commit Executable file
View File

@@ -0,0 +1,4 @@
#!/usr/bin/env sh
. "$(dirname -- "$0")/_/husky.sh"
npx lint-staged

View File

@@ -14,7 +14,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -32,7 +32,7 @@ Returns a promise wich rejects as soon as a call to `iteratee` throws or a promi
`opts` is an object that can contains the following options:
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `1`
- `concurrency`: a number which indicates the maximum number of parallel call to `iteratee`, defaults to `10`. The value `0` means no concurrency limit.
- `signal`: an abort signal to stop the iteration
- `stopOnError`: wether to stop iteration of first error, or wait for all calls to finish and throw an `AggregateError`, defaults to `true`

View File

@@ -9,7 +9,16 @@ class AggregateError extends Error {
}
}
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 1, signal, stopOnError = true } = {}) {
/**
* @template Item
* @param {Iterable<Item>} iterable
* @param {(item: Item, index: number, iterable: Iterable<Item>) => Promise<void>} iteratee
* @returns {Promise<void>}
*/
exports.asyncEach = function asyncEach(iterable, iteratee, { concurrency = 10, signal, stopOnError = true } = {}) {
if (concurrency === 0) {
concurrency = Infinity
}
return new Promise((resolve, reject) => {
const it = (iterable[Symbol.iterator] || iterable[Symbol.asyncIterator]).call(iterable)
const errors = []

View File

@@ -1,6 +1,8 @@
'use strict'
/* eslint-env jest */
const { describe, it, beforeEach } = require('test')
const assert = require('assert').strict
const { spy } = require('sinon')
const { asyncEach } = require('./')
@@ -34,12 +36,18 @@ describe('asyncEach', () => {
})
it('works', async () => {
const iteratee = jest.fn(async () => {})
const iteratee = spy(async () => {})
await asyncEach.call(thisArg, iterable, iteratee)
await asyncEach.call(thisArg, iterable, iteratee, { concurrency: 1 })
expect(iteratee.mock.instances).toEqual(Array.from(values, () => thisArg))
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
assert.deepStrictEqual(
iteratee.thisValues,
Array.from(values, () => thisArg)
)
assert.deepStrictEqual(
iteratee.args,
Array.from(values, (value, index) => [value, index, iterable])
)
})
;[1, 2, 4].forEach(concurrency => {
it('respects a concurrency of ' + concurrency, async () => {
@@ -49,7 +57,7 @@ describe('asyncEach', () => {
values,
async () => {
++running
expect(running).toBeLessThanOrEqual(concurrency)
assert.deepStrictEqual(running <= concurrency, true)
await randomDelay()
--running
},
@@ -59,40 +67,52 @@ describe('asyncEach', () => {
})
it('stops on first error when stopOnError is true', async () => {
const tracker = new assert.CallTracker()
const error = new Error()
const iteratee = jest.fn((_, i) => {
const iteratee = tracker.calls((_, i) => {
if (i === 1) {
throw error
}
})
}, 2)
assert.deepStrictEqual(
await rejectionOf(asyncEach(iterable, iteratee, { concurrency: 1, stopOnError: true })),
error
)
expect(await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: true }))).toBe(error)
expect(iteratee).toHaveBeenCalledTimes(2)
tracker.verify()
})
it('rejects AggregateError when stopOnError is false', async () => {
const errors = []
const iteratee = jest.fn(() => {
const iteratee = spy(() => {
const error = new Error()
errors.push(error)
throw error
})
const error = await rejectionOf(asyncEach(iterable, iteratee, { stopOnError: false }))
expect(error.errors).toEqual(errors)
expect(iteratee.mock.calls).toEqual(Array.from(values, (value, index) => [value, index, iterable]))
assert.deepStrictEqual(error.errors, errors)
assert.deepStrictEqual(
iteratee.args,
Array.from(values, (value, index) => [value, index, iterable])
)
})
it('can be interrupted with an AbortSignal', async () => {
const tracker = new assert.CallTracker()
const ac = new AbortController()
const iteratee = jest.fn((_, i) => {
const iteratee = tracker.calls((_, i) => {
if (i === 1) {
ac.abort()
}
}, 2)
await assert.rejects(asyncEach(iterable, iteratee, { concurrency: 1, signal: ac.signal }), {
message: 'asyncEach aborted',
})
await expect(asyncEach(iterable, iteratee, { signal: ac.signal })).rejects.toThrow('asyncEach aborted')
expect(iteratee).toHaveBeenCalledTimes(2)
tracker.verify()
})
})
)

View File

@@ -24,11 +24,17 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.0",
"version": "1.0.0",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"sinon": "^14.0.1",
"tap": "^16.3.0",
"test": "^3.2.1"
}
}

View File

@@ -1,6 +1,7 @@
'use strict'
/* eslint-env jest */
const { describe, it } = require('test')
const assert = require('assert')
const { coalesceCalls } = require('./')
@@ -23,13 +24,13 @@ describe('coalesceCalls', () => {
const promise2 = fn(defer2.promise)
defer1.resolve('foo')
expect(await promise1).toBe('foo')
expect(await promise2).toBe('foo')
assert.strictEqual(await promise1, 'foo')
assert.strictEqual(await promise2, 'foo')
const defer3 = pDefer()
const promise3 = fn(defer3.promise)
defer3.resolve('bar')
expect(await promise3).toBe('bar')
assert.strictEqual(await promise3, 'bar')
})
})

View File

@@ -30,6 +30,10 @@
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
}
}

View File

@@ -1,6 +1,7 @@
'use strict'
/* eslint-env jest */
const { describe, it } = require('test')
const assert = require('node:assert').strict
const { compose } = require('./')
@@ -9,43 +10,42 @@ const mul3 = x => x * 3
describe('compose()', () => {
it('throws when no functions is passed', () => {
expect(() => compose()).toThrow(TypeError)
expect(() => compose([])).toThrow(TypeError)
assert.throws(() => compose(), TypeError)
assert.throws(() => compose([]), TypeError)
})
it('applies from left to right', () => {
expect(compose(add2, mul3)(5)).toBe(21)
assert.strictEqual(compose(add2, mul3)(5), 21)
})
it('accepts functions in an array', () => {
expect(compose([add2, mul3])(5)).toBe(21)
assert.strictEqual(compose([add2, mul3])(5), 21)
})
it('can apply from right to left', () => {
expect(compose({ right: true }, add2, mul3)(5)).toBe(17)
assert.strictEqual(compose({ right: true }, add2, mul3)(5), 17)
})
it('accepts options with functions in an array', () => {
expect(compose({ right: true }, [add2, mul3])(5)).toBe(17)
assert.strictEqual(compose({ right: true }, [add2, mul3])(5), 17)
})
it('can compose async functions', async () => {
expect(
assert.strictEqual(
await compose(
{ async: true },
async x => x + 2,
async x => x * 3
)(5)
).toBe(21)
)(5),
21
)
})
it('forwards all args to first function', () => {
expect.assertions(1)
const expectedArgs = [Math.random(), Math.random()]
compose(
(...args) => {
expect(args).toEqual(expectedArgs)
assert.deepEqual(args, expectedArgs)
},
// add a second function to avoid the one function special case
Function.prototype
@@ -53,15 +53,13 @@ describe('compose()', () => {
})
it('forwards context to all functions', () => {
expect.assertions(2)
const expectedThis = {}
compose(
function () {
expect(this).toBe(expectedThis)
assert.strictEqual(this, expectedThis)
},
function () {
expect(this).toBe(expectedThis)
assert.strictEqual(this, expectedThis)
}
).call(expectedThis)
})

View File

@@ -19,6 +19,10 @@
"node": ">=7.6"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
}
}

View File

@@ -1,7 +1,7 @@
'use strict'
const assert = require('assert')
const { describe, it } = require('tap').mocha
const { describe, it } = require('test')
const { decorateClass, decorateWith, decorateMethodsWith, perInstance } = require('./')

View File

@@ -26,9 +26,9 @@
},
"scripts": {
"postversion": "npm publish --access public",
"test": "tap"
"test": "node--test"
},
"devDependencies": {
"tap": "^16.0.1"
"test": "^3.2.1"
}
}

View File

@@ -1,16 +1,17 @@
'use strict'
/* eslint-env jest */
const { describe, it } = require('test')
const { useFakeTimers, spy, assert } = require('sinon')
const { createDebounceResource } = require('./debounceResource')
jest.useFakeTimers()
const clock = useFakeTimers()
describe('debounceResource()', () => {
it('calls the resource disposer after 10 seconds', async () => {
const debounceResource = createDebounceResource()
const delay = 10e3
const dispose = jest.fn()
const dispose = spy()
const resource = await debounceResource(
Promise.resolve({
@@ -22,10 +23,10 @@ describe('debounceResource()', () => {
resource.dispose()
expect(dispose).not.toBeCalled()
assert.notCalled(dispose)
jest.advanceTimersByTime(delay)
clock.tick(delay)
expect(dispose).toBeCalled()
assert.called(dispose)
})
})

View File

@@ -1,13 +1,14 @@
'use strict'
/* eslint-env jest */
const { describe, it } = require('test')
const { spy, assert } = require('sinon')
const { deduped } = require('./deduped')
describe('deduped()', () => {
it('calls the resource function only once', async () => {
const value = {}
const getResource = jest.fn(async () => ({
const getResource = spy(async () => ({
value,
dispose: Function.prototype,
}))
@@ -17,13 +18,13 @@ describe('deduped()', () => {
const { value: v1 } = await dedupedGetResource()
const { value: v2 } = await dedupedGetResource()
expect(getResource).toHaveBeenCalledTimes(1)
expect(v1).toBe(value)
expect(v2).toBe(value)
assert.calledOnce(getResource)
assert.match(v1, value)
assert.match(v2, value)
})
it('only disposes the source disposable when its all copies dispose', async () => {
const dispose = jest.fn()
const dispose = spy()
const getResource = async () => ({
value: '',
dispose,
@@ -36,35 +37,35 @@ describe('deduped()', () => {
d1()
expect(dispose).not.toHaveBeenCalled()
assert.notCalled(dispose)
d2()
expect(dispose).toHaveBeenCalledTimes(1)
assert.calledOnce(dispose)
})
it('works with sync factory', () => {
const value = {}
const dispose = jest.fn()
const dispose = spy()
const dedupedGetResource = deduped(() => ({ value, dispose }))
const d1 = dedupedGetResource()
expect(d1.value).toBe(value)
assert.match(d1.value, value)
const d2 = dedupedGetResource()
expect(d2.value).toBe(value)
assert.match(d2.value, value)
d1.dispose()
expect(dispose).not.toHaveBeenCalled()
assert.notCalled(dispose)
d2.dispose()
expect(dispose).toHaveBeenCalledTimes(1)
assert.calledOnce(dispose)
})
it('no race condition on dispose before async acquisition', async () => {
const dispose = jest.fn()
const dispose = spy()
const dedupedGetResource = deduped(async () => ({ value: 42, dispose }))
const d1 = await dedupedGetResource()
@@ -73,6 +74,6 @@ describe('deduped()', () => {
d1.dispose()
expect(dispose).not.toHaveBeenCalled()
assert.notCalled(dispose)
})
})

View File

@@ -14,17 +14,22 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "0.1.1",
"version": "0.1.3",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "node--test"
},
"dependencies": {
"@vates/multi-key-map": "^0.1.0",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/log": "^0.5.0",
"ensure-array": "^1.0.0"
},
"devDependencies": {
"sinon": "^14.0.1",
"test": "^3.2.1"
}
}

View File

@@ -9,7 +9,7 @@ exports.EventListenersManager = class EventListenersManager {
}
add(type, listener) {
let listeners = this._listeners[type]
let listeners = this._listeners.get(type)
if (listeners === undefined) {
listeners = new Set()
this._listeners.set(type, listeners)

View File

@@ -0,0 +1,67 @@
'use strict'
const t = require('tap')
const { EventEmitter } = require('events')
const { EventListenersManager } = require('./')
const noop = Function.prototype
// function spy (impl = Function.prototype) {
// function spy() {
// spy.calls.push([Array.from(arguments), this])
// }
// spy.calls = []
// return spy
// }
function assertListeners(t, event, listeners) {
t.strictSame(t.context.ee.listeners(event), listeners)
}
t.beforeEach(function (t) {
t.context.ee = new EventEmitter()
t.context.em = new EventListenersManager(t.context.ee)
})
t.test('.add adds a listener', function (t) {
t.context.em.add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.add does not add a duplicate listener', function (t) {
t.context.em.add('foo', noop).add('foo', noop)
assertListeners(t, 'foo', [noop])
t.end()
})
t.test('.remove removes a listener', function (t) {
t.context.em.add('foo', noop).remove('foo', noop)
assertListeners(t, 'foo', [])
t.end()
})
t.test('.removeAll removes all listeners of a given type', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll('foo')
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [noop])
t.end()
})
t.test('.removeAll removes all listeners', function (t) {
t.context.em.add('foo', noop).add('bar', noop).removeAll()
assertListeners(t, 'foo', [])
assertListeners(t, 'bar', [])
t.end()
})

View File

@@ -35,8 +35,12 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"version": "1.0.1",
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "tap --branches=72"
},
"devDependencies": {
"tap": "^16.2.0"
}
}

1
@vates/fuse-vhd/.npmignore Symbolic link
View File

@@ -0,0 +1 @@
../../scripts/npmignore

66
@vates/fuse-vhd/index.js Normal file
View File

@@ -0,0 +1,66 @@
'use strict'
const LRU = require('lru-cache')
const Fuse = require('fuse-native')
const { VhdSynthetic } = require('vhd-lib')
const { Disposable, fromCallback } = require('promise-toolbox')
// build a s stat object from https://github.com/fuse-friends/fuse-native/blob/master/test/fixtures/stat.js
const stat = st => ({
mtime: st.mtime || new Date(),
atime: st.atime || new Date(),
ctime: st.ctime || new Date(),
size: st.size !== undefined ? st.size : 0,
mode: st.mode === 'dir' ? 16877 : st.mode === 'file' ? 33188 : st.mode === 'link' ? 41453 : st.mode,
uid: st.uid !== undefined ? st.uid : process.getuid(),
gid: st.gid !== undefined ? st.gid : process.getgid(),
})
exports.mount = Disposable.factory(async function* mount(handler, diskPath, mountDir) {
const vhd = yield VhdSynthetic.fromVhdChain(handler, diskPath)
const cache = new LRU({
max: 16, // each cached block is 2MB in size
})
await vhd.readBlockAllocationTable()
const fuse = new Fuse(mountDir, {
async readdir(path, cb) {
if (path === '/') {
return cb(null, ['vhd0'])
}
cb(Fuse.ENOENT)
},
async getattr(path, cb) {
if (path === '/') {
return cb(
null,
stat({
mode: 'dir',
size: 4096,
})
)
}
if (path === '/vhd0') {
return cb(
null,
stat({
mode: 'file',
size: vhd.footer.currentSize,
})
)
}
cb(Fuse.ENOENT)
},
read(path, fd, buf, len, pos, cb) {
if (path === '/vhd0') {
return vhd.readRawData(pos, len, cache, buf).then(cb)
}
throw new Error(`read file ${path} not exists`)
},
})
return new Disposable(
() => fromCallback(() => fuse.unmount()),
fromCallback(() => fuse.mount())
)
})

View File

@@ -0,0 +1,29 @@
{
"name": "@vates/fuse-vhd",
"version": "1.0.0",
"license": "ISC",
"private": false,
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/fuse-vhd",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/fuse-vhd",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"engines": {
"node": ">=10.0"
},
"dependencies": {
"fuse-native": "^2.2.6",
"lru-cache": "^7.14.0",
"promise-toolbox": "^0.21.0",
"vhd-lib": "^4.2.0"
},
"scripts": {
"postversion": "npm publish --access public"
}
}

View File

@@ -1,6 +1,7 @@
'use strict'
/* eslint-env jest */
const { describe, it } = require('test')
const assert = require('node:assert')
const { MultiKeyMap } = require('./')
@@ -28,9 +29,9 @@ describe('MultiKeyMap', () => {
keys.forEach((key, i) => {
// copy the key to make sure the array itself is not the key
expect(map.get(key.slice())).toBe(values[i])
assert.strictEqual(map.get(key.slice()), values[i])
map.delete(key.slice())
expect(map.get(key.slice())).toBe(undefined)
assert.strictEqual(map.get(key.slice()), undefined)
})
})
})

View File

@@ -23,6 +23,10 @@
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "node--test"
},
"devDependencies": {
"test": "^3.2.1"
}
}

View File

@@ -0,0 +1,16 @@
### `new NdbClient({address, exportname, secure = true, port = 10809})`
create a new nbd client
```js
import NbdClient from '@vates/nbd-client'
const client = new NbdClient({
address: 'MY_NBD_HOST',
exportname: 'MY_SECRET_EXPORT',
cert: 'Server certificate', // optional, will use encrypted link if provided
})
await client.connect()
const block = await client.readBlock(blockIndex, BlockSize)
await client.disconnect()
```

View File

@@ -0,0 +1 @@
../../scripts/npmignore

View File

@@ -0,0 +1,47 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/nbd-client
[![Package Version](https://badgen.net/npm/v/@vates/nbd-client)](https://npmjs.org/package/@vates/nbd-client) ![License](https://badgen.net/npm/license/@vates/nbd-client) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/nbd-client)](https://bundlephobia.com/result?p=@vates/nbd-client) [![Node compatibility](https://badgen.net/npm/node/@vates/nbd-client)](https://npmjs.org/package/@vates/nbd-client)
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/nbd-client):
```
> npm install --save @vates/nbd-client
```
## Usage
### `new NdbClient({address, exportname, secure = true, port = 10809})`
create a new nbd client
```js
import NbdClient from '@vates/nbd-client'
const client = new NbdClient({
address: 'MY_NBD_HOST',
exportname: 'MY_SECRET_EXPORT',
cert: 'Server certificate', // optional, will use encrypted link if provided
})
await client.connect()
const block = await client.readBlock(blockIndex, BlockSize)
await client.disconnect()
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

View File

@@ -0,0 +1,42 @@
'use strict'
exports.INIT_PASSWD = Buffer.from('NBDMAGIC') // "NBDMAGIC" ensure we're connected to a nbd server
exports.OPTS_MAGIC = Buffer.from('IHAVEOPT') // "IHAVEOPT" start an option block
exports.NBD_OPT_REPLY_MAGIC = 1100100111001001n // magic received during negociation
exports.NBD_OPT_EXPORT_NAME = 1
exports.NBD_OPT_ABORT = 2
exports.NBD_OPT_LIST = 3
exports.NBD_OPT_STARTTLS = 5
exports.NBD_OPT_INFO = 6
exports.NBD_OPT_GO = 7
exports.NBD_FLAG_HAS_FLAGS = 1 << 0
exports.NBD_FLAG_READ_ONLY = 1 << 1
exports.NBD_FLAG_SEND_FLUSH = 1 << 2
exports.NBD_FLAG_SEND_FUA = 1 << 3
exports.NBD_FLAG_ROTATIONAL = 1 << 4
exports.NBD_FLAG_SEND_TRIM = 1 << 5
exports.NBD_FLAG_FIXED_NEWSTYLE = 1 << 0
exports.NBD_CMD_FLAG_FUA = 1 << 0
exports.NBD_CMD_FLAG_NO_HOLE = 1 << 1
exports.NBD_CMD_FLAG_DF = 1 << 2
exports.NBD_CMD_FLAG_REQ_ONE = 1 << 3
exports.NBD_CMD_FLAG_FAST_ZERO = 1 << 4
exports.NBD_CMD_READ = 0
exports.NBD_CMD_WRITE = 1
exports.NBD_CMD_DISC = 2
exports.NBD_CMD_FLUSH = 3
exports.NBD_CMD_TRIM = 4
exports.NBD_CMD_CACHE = 5
exports.NBD_CMD_WRITE_ZEROES = 6
exports.NBD_CMD_BLOCK_STATUS = 7
exports.NBD_CMD_RESIZE = 8
exports.NBD_REQUEST_MAGIC = 0x25609513 // magic number to create a new NBD request to send to the server
exports.NBD_REPLY_MAGIC = 0x67446698 // magic number received from the server when reading response to a nbd request
exports.NBD_REPLY_ACK = 1
exports.NBD_DEFAULT_PORT = 10809
exports.NBD_DEFAULT_BLOCK_SIZE = 64 * 1024

243
@vates/nbd-client/index.js Normal file
View File

@@ -0,0 +1,243 @@
'use strict'
const assert = require('node:assert')
const { Socket } = require('node:net')
const { connect } = require('node:tls')
const {
INIT_PASSWD,
NBD_CMD_READ,
NBD_DEFAULT_BLOCK_SIZE,
NBD_DEFAULT_PORT,
NBD_FLAG_FIXED_NEWSTYLE,
NBD_FLAG_HAS_FLAGS,
NBD_OPT_EXPORT_NAME,
NBD_OPT_REPLY_MAGIC,
NBD_OPT_STARTTLS,
NBD_REPLY_ACK,
NBD_REPLY_MAGIC,
NBD_REQUEST_MAGIC,
OPTS_MAGIC,
} = require('./constants.js')
const { fromCallback } = require('promise-toolbox')
const { readChunkStrict } = require('@vates/read-chunk')
// documentation is here : https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md
module.exports = class NbdClient {
#serverAddress
#serverCert
#serverPort
#serverSocket
#exportName
#exportSize
// AFAIK, there is no guaranty the server answers in the same order as the queries
// so we handle a backlog of command waiting for response and handle concurrency manually
#waitingForResponse // there is already a listenner waiting for a response
#nextCommandQueryId = BigInt(0)
#commandQueryBacklog // map of command waiting for an response queryId => { size/*in byte*/, resolve, reject}
constructor({ address, port = NBD_DEFAULT_PORT, exportname, cert }) {
this.#serverAddress = address
this.#serverPort = port
this.#exportName = exportname
this.#serverCert = cert
}
get exportSize() {
return this.#exportSize
}
async #tlsConnect() {
return new Promise((resolve, reject) => {
this.#serverSocket = connect({
socket: this.#serverSocket,
rejectUnauthorized: false,
cert: this.#serverCert,
})
this.#serverSocket.once('error', reject)
this.#serverSocket.once('secureConnect', () => {
this.#serverSocket.removeListener('error', reject)
resolve()
})
})
}
// mandatory , at least to start the handshake
async #unsecureConnect() {
this.#serverSocket = new Socket()
return new Promise((resolve, reject) => {
this.#serverSocket.connect(this.#serverPort, this.#serverAddress)
this.#serverSocket.once('error', reject)
this.#serverSocket.once('connect', () => {
this.#serverSocket.removeListener('error', reject)
resolve()
})
})
}
async connect() {
// first we connect to the serve without tls, and then we upgrade the connection
// to tls during the handshake
await this.#unsecureConnect()
await this.#handshake()
// reset internal state if we reconnected a nbd client
this.#commandQueryBacklog = new Map()
this.#waitingForResponse = false
}
async disconnect() {
await this.#serverSocket.destroy()
}
// we can use individual read/write from the socket here since there is no concurrency
async #sendOption(option, buffer = Buffer.alloc(0)) {
await this.#write(OPTS_MAGIC)
await this.#writeInt32(option)
await this.#writeInt32(buffer.length)
await this.#write(buffer)
assert.strictEqual(await this.#readInt64(), NBD_OPT_REPLY_MAGIC) // magic number everywhere
assert.strictEqual(await this.#readInt32(), option) // the option passed
assert.strictEqual(await this.#readInt32(), NBD_REPLY_ACK) // ACK
const length = await this.#readInt32()
assert.strictEqual(length, 0) // length
}
// we can use individual read/write from the socket here since there is only one handshake at once, no concurrency
async #handshake() {
assert((await this.#read(8)).equals(INIT_PASSWD))
assert((await this.#read(8)).equals(OPTS_MAGIC))
const flagsBuffer = await this.#read(2)
const flags = flagsBuffer.readInt16BE(0)
assert.strictEqual(flags & NBD_FLAG_FIXED_NEWSTYLE, NBD_FLAG_FIXED_NEWSTYLE) // only FIXED_NEWSTYLE one is supported from the server options
await this.#writeInt32(NBD_FLAG_FIXED_NEWSTYLE) // client also support NBD_FLAG_C_FIXED_NEWSTYLE
if (this.#serverCert !== undefined) {
// upgrade socket to TLS if needed
await this.#sendOption(NBD_OPT_STARTTLS)
await this.#tlsConnect()
}
// send export name we want to access.
// it's implictly closing the negociation phase.
await this.#write(OPTS_MAGIC)
await this.#writeInt32(NBD_OPT_EXPORT_NAME)
const exportNameBuffer = Buffer.from(this.#exportName)
await this.#writeInt32(exportNameBuffer.length)
await this.#write(exportNameBuffer)
// 8 (export size ) + 2 (flags) + 124 zero = 134
// must read all to ensure nothing stays in the buffer
const answer = await this.#read(134)
this.#exportSize = answer.readBigUInt64BE(0)
const transmissionFlags = answer.readInt16BE(8)
assert.strictEqual(transmissionFlags & NBD_FLAG_HAS_FLAGS, NBD_FLAG_HAS_FLAGS, 'NBD_FLAG_HAS_FLAGS') // must always be 1 by the norm
// note : xapi server always send NBD_FLAG_READ_ONLY (3) as a flag
}
#read(length) {
return readChunkStrict(this.#serverSocket, length)
}
#write(buffer) {
return fromCallback.call(this.#serverSocket, 'write', buffer)
}
async #readInt32() {
const buffer = await this.#read(4)
return buffer.readInt32BE(0)
}
async #readInt64() {
const buffer = await this.#read(8)
return buffer.readBigUInt64BE(0)
}
#writeInt32(int) {
const buffer = Buffer.alloc(4)
buffer.writeInt32BE(int)
return this.#write(buffer)
}
// when one read fail ,stop everything
async #rejectAll(error) {
this.#commandQueryBacklog.forEach(({ reject }) => {
reject(error)
})
await this.disconnect()
}
async #readBlockResponse() {
// ensure at most one read occur in parallel
if (this.#waitingForResponse) {
return
}
try {
this.#waitingForResponse = true
const magic = await this.#readInt32()
if (magic !== NBD_REPLY_MAGIC) {
throw new Error(`magic number for block answer is wrong : ${magic} ${NBD_REPLY_MAGIC}`)
}
const error = await this.#readInt32()
if (error !== 0) {
// @todo use error code from constants.mjs
throw new Error(`GOT ERROR CODE : ${error}`)
}
const blockQueryId = await this.#readInt64()
const query = this.#commandQueryBacklog.get(blockQueryId)
if (!query) {
throw new Error(` no query associated with id ${blockQueryId}`)
}
this.#commandQueryBacklog.delete(blockQueryId)
const data = await this.#read(query.size)
query.resolve(data)
this.#waitingForResponse = false
if (this.#commandQueryBacklog.size > 0) {
await this.#readBlockResponse()
}
} catch (error) {
// reject all the promises
// we don't need to call readBlockResponse on failure
// since we will empty the backlog
await this.#rejectAll(error)
}
}
async readBlock(index, size = NBD_DEFAULT_BLOCK_SIZE) {
const queryId = this.#nextCommandQueryId
this.#nextCommandQueryId++
// create and send command at once to ensure there is no concurrency issue
const buffer = Buffer.alloc(28)
buffer.writeInt32BE(NBD_REQUEST_MAGIC, 0) // it is a nbd request
buffer.writeInt16BE(0, 4) // no command flags for a simple block read
buffer.writeInt16BE(NBD_CMD_READ, 6) // we want to read a data block
buffer.writeBigUInt64BE(queryId, 8)
// byte offset in the raw disk
buffer.writeBigUInt64BE(BigInt(index) * BigInt(size), 16)
buffer.writeInt32BE(size, 24)
return new Promise((resolve, reject) => {
// this will handle one block response, but it can be another block
// since server does not guaranty to handle query in order
this.#commandQueryBacklog.set(queryId, {
size,
resolve,
reject,
})
// really send the command to the server
this.#write(buffer).catch(reject)
// #readBlockResponse never throws directly
// but if it fails it will reject all the promises in the backlog
this.#readBlockResponse()
})
}
}

View File

@@ -0,0 +1,76 @@
'use strict'
const NbdClient = require('./index.js')
const { spawn } = require('node:child_process')
const fs = require('node:fs/promises')
const { test } = require('tap')
const tmp = require('tmp')
const { pFromCallback } = require('promise-toolbox')
const { asyncEach } = require('@vates/async-each')
const FILE_SIZE = 2 * 1024 * 1024
async function createTempFile(size) {
const tmpPath = await pFromCallback(cb => tmp.file(cb))
const data = Buffer.alloc(size, 0)
for (let i = 0; i < size; i += 4) {
data.writeUInt32BE(i, i)
}
await fs.writeFile(tmpPath, data)
return tmpPath
}
test('it works with unsecured network', async tap => {
const path = await createTempFile(FILE_SIZE)
const nbdServer = spawn(
'nbdkit',
[
'file',
path,
'--newstyle', //
'--exit-with-parent',
'--read-only',
'--export-name=MY_SECRET_EXPORT',
],
{
stdio: ['inherit', 'inherit', 'inherit'],
}
)
const client = new NbdClient({
address: 'localhost',
exportname: 'MY_SECRET_EXPORT',
secure: false,
})
await client.connect()
tap.equal(client.exportSize, BigInt(FILE_SIZE))
const CHUNK_SIZE = 128 * 1024 // non default size
const indexes = []
for (let i = 0; i < FILE_SIZE / CHUNK_SIZE; i++) {
indexes.push(i)
}
// read mutiple blocks in parallel
await asyncEach(
indexes,
async i => {
const block = await client.readBlock(i, CHUNK_SIZE)
let blockOk = true
let firstFail
for (let j = 0; j < CHUNK_SIZE; j += 4) {
const wanted = i * CHUNK_SIZE + j
const found = block.readUInt32BE(j)
blockOk = blockOk && found === wanted
if (!blockOk && firstFail === undefined) {
firstFail = j
}
}
tap.ok(blockOk, `check block ${i} content`)
},
{ concurrency: 8 }
)
await client.disconnect()
nbdServer.kill()
await fs.unlink(path)
})

View File

@@ -0,0 +1,35 @@
{
"private": false,
"name": "@vates/nbd-client",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/nbd-client",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"directory": "@vates/nbd-client",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=14.0"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/read-chunk": "^1.0.1",
"@xen-orchestra/async-map": "^0.1.2",
"promise-toolbox": "^0.21.0",
"xen-api": "^1.2.2"
},
"devDependencies": {
"tap": "^16.3.0",
"tmp": "^0.2.1"
},
"scripts": {
"postversion": "npm publish --access public",
"test-integration": "tap *.spec.js"
}
}

130
@vates/otp/.USAGE.md Normal file
View File

@@ -0,0 +1,130 @@
### Usual workflow
> This section presents how this library should be used to implement a classic two factor authentification.
#### Setup
```js
import { generateSecret, generateTotp } from '@vates/otp'
import QrCode from 'qrcode'
// Generates a secret that will be shared by both the service and the user:
const secret = generateSecret()
// Stores the secret in the service:
await currentUser.saveOtpSecret(secret)
// Generates an URI to present to the user
const uri = generateTotpUri({ secret })
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
const qr = await QrCode.toDataURL(uri)
```
#### Authentication
```js
import { verifyTotp } from '@vates/otp'
// Verifies a `token` entered by the user against a `secret` generated during setup.
if (await verifyTotp(token, { secret })) {
console.log('authenticated!')
}
```
### API
#### Secret
```js
import { generateSecret } from '@vates/otp'
const secret = generateSecret()
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
#### HOTP
> This is likely not what you want to use, see TOTP below instead.
```js
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
// a sequence number, see HOTP specification
const counter = 0
// generate a token
//
// optional params:
// - digits
const token = await generateHotp({ counter, secret })
// '239988'
// verify a token
//
// optional params:
// - digits
const isValid = await verifyHotp(token, { counter, secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
#### TOTP
```js
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
// generate a token
//
// optional params:
// - digits
// - period
// - timestamp
const token = await generateTotp({ secret })
// '632869'
// verify a token
//
// optional params:
// - digits
// - period
// - timestamp
// - window
const isValid = await verifyTotp(token, { secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
// - period
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
- `period = 30`: number of seconds a token is valid
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
#### Verification from URI
```js
import { verifyFromUri } from '@vates/otp'
// Verify the token using all the information contained in the URI
const isValid = await verifyFromUri(token, uri)
// true
```

1
@vates/otp/.npmignore Symbolic link
View File

@@ -0,0 +1 @@
../../scripts/npmignore

163
@vates/otp/README.md Normal file
View File

@@ -0,0 +1,163 @@
<!-- DO NOT EDIT MANUALLY, THIS FILE HAS BEEN GENERATED -->
# @vates/otp
[![Package Version](https://badgen.net/npm/v/@vates/otp)](https://npmjs.org/package/@vates/otp) ![License](https://badgen.net/npm/license/@vates/otp) [![PackagePhobia](https://badgen.net/bundlephobia/minzip/@vates/otp)](https://bundlephobia.com/result?p=@vates/otp) [![Node compatibility](https://badgen.net/npm/node/@vates/otp)](https://npmjs.org/package/@vates/otp)
> Minimal HTOP/TOTP implementation
## Install
Installation of the [npm package](https://npmjs.org/package/@vates/otp):
```
> npm install --save @vates/otp
```
## Usage
### Usual workflow
> This section presents how this library should be used to implement a classic two factor authentification.
#### Setup
```js
import { generateSecret, generateTotp } from '@vates/otp'
import QrCode from 'qrcode'
// Generates a secret that will be shared by both the service and the user:
const secret = generateSecret()
// Stores the secret in the service:
await currentUser.saveOtpSecret(secret)
// Generates an URI to present to the user
const uri = generateTotpUri({ secret })
// Generates the QR code from the URI to make it easily importable in Authy or Google Authenticator
const qr = await QrCode.toDataURL(uri)
```
#### Authentication
```js
import { verifyTotp } from '@vates/otp'
// Verifies a `token` entered by the user against a `secret` generated during setup.
if (await verifyTotp(token, { secret })) {
console.log('authenticated!')
}
```
### API
#### Secret
```js
import { generateSecret } from '@vates/otp'
const secret = generateSecret()
// 'OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
#### HOTP
> This is likely not what you want to use, see TOTP below instead.
```js
import { generateHotp, generateHotpUri, verifyHotp } from '@vates/otp'
// a sequence number, see HOTP specification
const counter = 0
// generate a token
//
// optional params:
// - digits
const token = await generateHotp({ counter, secret })
// '239988'
// verify a token
//
// optional params:
// - digits
const isValid = await verifyHotp(token, { counter, secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
const uri = generateHotpUri({ counter, label: 'account name', issuer: 'my app', secret })
// 'otpauth://hotp/my%20app:account%20name?counter=0&issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
#### TOTP
```js
import { generateTotp, generateTotpUri, verifyTotp } from '@vates/otp'
// generate a token
//
// optional params:
// - digits
// - period
// - timestamp
const token = await generateTotp({ secret })
// '632869'
// verify a token
//
// optional params:
// - digits
// - period
// - timestamp
// - window
const isValid = await verifyTotp(token, { secret })
// true
// generate a URI than can be displayed as a QR code to be used with Authy or Google Authenticator
//
// optional params:
// - digits
// - period
const uri = generateTotpUri({ label: 'account name', issuer: 'my app', secret })
// 'otpauth://totp/my%20app:account%20name?issuer=my%20app&secret=OJOKA65RY5FQQ2RYWVKD5Y3YG5CSHGYH'
```
Optional params and their default values:
- `digits = 6`: length of the token, avoid using it because not compatible with Google Authenticator
- `period = 30`: number of seconds a token is valid
- `timestamp = Date.now() / 1e3`: Unix timestamp, in seconds, when this token will be valid, default to now
- `window = 1`: number of periods before and after `timestamp` for which the token is considered valid
#### Verification from URI
```js
import { verifyFromUri } from '@vates/otp'
// Verify the token using all the information contained in the URI
const isValid = await verifyFromUri(token, uri)
// true
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on
the code.
You may:
- report any [issue](https://github.com/vatesfr/xen-orchestra/issues)
you've encountered;
- fork and create a pull request.
## License
[ISC](https://spdx.org/licenses/ISC) © [Vates SAS](https://vates.fr)

111
@vates/otp/index.mjs Normal file
View File

@@ -0,0 +1,111 @@
import { base32 } from 'rfc4648'
import { webcrypto } from 'node:crypto'
const { subtle } = webcrypto
function assert(name, value) {
if (!value) {
throw new TypeError('invalid value for param ' + name)
}
}
// https://github.com/google/google-authenticator/wiki/Key-Uri-Format
function generateUri(protocol, label, params) {
assert('label', typeof label === 'string')
assert('secret', typeof params.secret === 'string')
let path = encodeURIComponent(label)
const { issuer } = params
if (issuer !== undefined) {
path = encodeURIComponent(issuer) + ':' + path
}
const query = Object.entries(params)
.filter(_ => _[1] !== undefined)
.map(([key, value]) => key + '=' + encodeURIComponent(value))
.join('&')
return `otpauth://${protocol}/${path}?${query}`
}
export function generateSecret() {
// https://www.rfc-editor.org/rfc/rfc4226 recommends 160 bits (i.e. 20 bytes)
const data = new Uint8Array(20)
webcrypto.getRandomValues(data)
return base32.stringify(data, { pad: false })
}
const DIGITS = 6
// https://www.rfc-editor.org/rfc/rfc4226
export async function generateHotp({ counter, digits = DIGITS, secret }) {
const data = new Uint8Array(8)
new DataView(data.buffer).setBigInt64(0, BigInt(counter), false)
const key = await subtle.importKey(
'raw',
base32.parse(secret, { loose: true }),
{ name: 'HMAC', hash: 'SHA-1' },
false,
['sign', 'verify']
)
const digest = new DataView(await subtle.sign('HMAC', key, data))
const offset = digest.getUint8(digest.byteLength - 1) & 0xf
const p = digest.getUint32(offset) & 0x7f_ff_ff_ff
return String(p % Math.pow(10, digits)).padStart(digits, '0')
}
export function generateHotpUri({ counter, digits, issuer, label, secret }) {
assert('counter', typeof counter === 'number')
return generateUri('hotp', label, { counter, digits, issuer, secret })
}
export async function verifyHotp(token, opts) {
return token === (await generateHotp(opts))
}
function totpCounter(period = 30, timestamp = Math.floor(Date.now() / 1e3)) {
return Math.floor(timestamp / period)
}
// https://www.rfc-editor.org/rfc/rfc6238.html
export async function generateTotp({ period, timestamp, ...opts }) {
opts.counter = totpCounter(period, timestamp)
return await generateHotp(opts)
}
export function generateTotpUri({ digits, issuer, label, period, secret }) {
return generateUri('totp', label, { digits, issuer, period, secret })
}
export async function verifyTotp(token, { period, timestamp, window = 1, ...opts }) {
const counter = totpCounter(period, timestamp)
const end = counter + window
opts.counter = counter - window
while (opts.counter <= end) {
if (token === (await generateHotp(opts))) {
return true
}
opts.counter += 1
}
return false
}
export async function verifyFromUri(token, uri) {
const url = new URL(uri)
assert('protocol', url.protocol === 'otpauth:')
const { host } = url
const opts = Object.fromEntries(url.searchParams.entries())
if (host === 'hotp') {
return await verifyHotp(token, opts)
}
if (host === 'totp') {
return await verifyTotp(token, opts)
}
assert('host', false)
}

112
@vates/otp/index.spec.mjs Normal file
View File

@@ -0,0 +1,112 @@
import { strict as assert } from 'node:assert'
import { describe, it } from 'tap/mocha'
import {
generateHotp,
generateHotpUri,
generateSecret,
generateTotp,
generateTotpUri,
verifyHotp,
verifyTotp,
} from './index.mjs'
describe('generateSecret', function () {
it('generates a string of 32 chars', async function () {
const secret = generateSecret()
assert.equal(typeof secret, 'string')
assert.equal(secret.length, 32)
})
it('generates a different secret at each call', async function () {
assert.notEqual(generateSecret(), generateSecret())
})
})
describe('HOTP', function () {
it('generate and verify valid tokens', async function () {
for (const [token, opts] of Object.entries({
382752: {
counter: -3088,
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
},
163376: {
counter: 30598,
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
},
})) {
assert.equal(await generateHotp(opts), token)
assert(await verifyHotp(token, opts))
}
})
describe('generateHotpUri', function () {
const opts = {
counter: 59732,
label: 'the label',
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
}
Object.entries({
'without optional params': [
opts,
'otpauth://hotp/the%20label?counter=59732&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with issuer': [
{ ...opts, issuer: 'the issuer' },
'otpauth://hotp/the%20issuer:the%20label?counter=59732&issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with digits': [
{ ...opts, digits: 7 },
'otpauth://hotp/the%20label?counter=59732&digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
}).forEach(([title, [opts, uri]]) => {
it(title, async function () {
assert.strictEqual(generateHotpUri(opts), uri)
})
})
})
})
describe('TOTP', function () {
Object.entries({
'033702': {
secret: 'PJYFSZ3JNVXVQMZXOB2EQYJSKB2HE6TB',
timestamp: 1665416296,
period: 30,
},
107250: {
secret: 'GBUDQZ3UKZZGIMRLNVYXA33GMFMEGQKN',
timestamp: 1665416674,
period: 60,
},
}).forEach(([token, opts]) => {
it('works', async function () {
assert.equal(await generateTotp(opts), token)
assert(await verifyTotp(token, opts))
})
})
describe('generateHotpUri', function () {
const opts = {
label: 'the label',
secret: 'OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
}
Object.entries({
'without optional params': [opts, 'otpauth://totp/the%20label?secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX'],
'with issuer': [
{ ...opts, issuer: 'the issuer' },
'otpauth://totp/the%20issuer:the%20label?issuer=the%20issuer&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
'with digits': [
{ ...opts, digits: 7 },
'otpauth://totp/the%20label?digits=7&secret=OGK45BBZAIGNGELHZPXYKN4GUVWWO6YX',
],
}).forEach(([title, [opts, uri]]) => {
it(title, async function () {
assert.strictEqual(generateTotpUri(opts), uri)
})
})
})
})

39
@vates/otp/package.json Normal file
View File

@@ -0,0 +1,39 @@
{
"private": false,
"name": "@vates/otp",
"description": "Minimal HTOP/TOTP implementation",
"keywords": [
"2fa",
"authenticator",
"hotp",
"otp",
"totp"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@vates/otp",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"main": "index.mjs",
"repository": {
"directory": "@vates/otp",
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"engines": {
"node": ">=15"
},
"dependencies": {
"rfc4648": "^1.5.2"
},
"scripts": {
"postversion": "npm publish --access public",
"test": "tap"
},
"devDependencies": {
"tap": "^16.3.0"
}
}

View File

@@ -1,7 +1,7 @@
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = every(undefined, some(predicate2, undefined))
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
// ends up as
@@ -36,6 +36,21 @@ isBetween3And10(10)
// → false
```
### `not(predicate)`
> Returns a predicate that returns the negation of the predicate.
```js
const isEven = n => n % 2 === 0
const isOdd = not(isEven)
isOdd(1)
// true
isOdd(2)
// false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.

View File

@@ -19,7 +19,7 @@ Installation of the [npm package](https://npmjs.org/package/@vates/predicates):
`undefined` predicates are ignored and `undefined` is returned if all predicates are `undefined`, this permits the most efficient composition:
```js
const compositePredicate = every(undefined, some(predicate2, undefined))
const compositePredicate = not(every(undefined, some(not(predicate2), undefined)))
// ends up as
@@ -54,6 +54,21 @@ isBetween3And10(10)
// → false
```
### `not(predicate)`
> Returns a predicate that returns the negation of the predicate.
```js
const isEven = n => n % 2 === 0
const isOdd = not(isEven)
isOdd(1)
// true
isOdd(2)
// false
```
### `some(predicates)`
> Returns a predicate that returns `true` iff some predicate returns `true`.

View File

@@ -51,6 +51,22 @@ exports.every = function every() {
}
}
const notPredicateTag = {}
exports.not = function not(predicate) {
if (isDefinedPredicate(predicate)) {
if (predicate.tag === notPredicateTag) {
return predicate.predicate
}
function notPredicate() {
return !predicate.apply(this, arguments)
}
notPredicate.predicate = predicate
notPredicate.tag = notPredicateTag
return notPredicate
}
}
exports.some = function some() {
const predicates = handleArgs.apply(this, arguments)
const n = predicates.length

View File

@@ -3,20 +3,14 @@
const assert = require('assert/strict')
const { describe, it } = require('tap').mocha
const { every, some } = require('./')
const { every, not, some } = require('./')
const T = () => true
const F = () => false
const testArgsHandling = fn => {
it('returns undefined if all predicates are undefined', () => {
const testArgHandling = fn => {
it('returns undefined if predicate is undefined', () => {
assert.equal(fn(undefined), undefined)
assert.equal(fn([undefined]), undefined)
})
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('throws if it receives a non-predicate', () => {
@@ -24,6 +18,15 @@ const testArgsHandling = fn => {
error.value = 3
assert.throws(() => fn(3), error)
})
}
const testArgsHandling = fn => {
testArgHandling(fn)
it('returns the predicate if only a single one is passed', () => {
assert.equal(fn(undefined, T), T)
assert.equal(fn([undefined, T]), T)
})
it('forwards this and arguments to predicates', () => {
const thisArg = 'qux'
@@ -36,17 +39,21 @@ const testArgsHandling = fn => {
})
}
const runTests = (fn, truthTable) =>
const runTests = (fn, acceptMultiple, truthTable) =>
it('works', () => {
truthTable.forEach(([result, ...predicates]) => {
if (acceptMultiple) {
assert.equal(fn(predicates)(), result)
} else {
assert.equal(predicates.length, 1)
}
assert.equal(fn(...predicates)(), result)
assert.equal(fn(predicates)(), result)
})
})
describe('every', () => {
testArgsHandling(every)
runTests(every, [
runTests(every, true, [
[true, T, T],
[false, T, F],
[false, F, T],
@@ -54,9 +61,22 @@ describe('every', () => {
])
})
describe('not', () => {
testArgHandling(not)
it('returns the original predicate if negated twice', () => {
assert.equal(not(not(T)), T)
})
runTests(not, false, [
[true, F],
[false, T],
])
})
describe('some', () => {
testArgsHandling(some)
runTests(some, [
runTests(some, true, [
[true, T, T],
[true, T, F],
[true, F, T],

View File

@@ -26,7 +26,7 @@
"url": "https://vates.fr"
},
"license": "ISC",
"version": "1.0.0",
"version": "1.1.0",
"engines": {
"node": ">=6"
},

View File

@@ -1,6 +1,9 @@
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns `null` if the stream has ended
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
```js
import { readChunk } from '@vates/read-chunk'
@@ -11,3 +14,13 @@ import { readChunk } from '@vates/read-chunk'
}
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```

View File

@@ -16,9 +16,12 @@ Installation of the [npm package](https://npmjs.org/package/@vates/read-chunk):
## Usage
### `readChunk(stream, [size])`
- returns the next available chunk of data
- like `stream.read()`, a number of bytes can be specified
- returns `null` if the stream has ended
- returns with less data than expected if stream has ended
- returns `null` if the stream has ended and no data has been read
```js
import { readChunk } from '@vates/read-chunk'
@@ -30,6 +33,16 @@ import { readChunk } from '@vates/read-chunk'
})()
```
### `readChunkStrict(stream, [size])`
Similar behavior to `readChunk` but throws if the stream ended before the requested data could be read.
```js
import { readChunkStrict } from '@vates/read-chunk'
const chunk = await readChunkStrict(stream, 1024)
```
## Contributions
Contributions are _very_ welcomed, either on the documentation or on

View File

@@ -1,7 +1,9 @@
'use strict'
const readChunk = (stream, size) =>
size === 0
stream.closed || stream.readableEnded
? Promise.resolve(null)
: size === 0
? Promise.resolve(Buffer.alloc(0))
: new Promise((resolve, reject) => {
function onEnd() {
@@ -30,3 +32,22 @@ const readChunk = (stream, size) =>
onReadable()
})
exports.readChunk = readChunk
exports.readChunkStrict = async function readChunkStrict(stream, size) {
const chunk = await readChunk(stream, size)
if (chunk === null) {
throw new Error('stream has ended without data')
}
if (size !== undefined && chunk.length !== size) {
const error = new Error('stream has ended with not enough data')
Object.defineProperties(error, {
chunk: {
value: chunk,
},
})
throw error
}
return chunk
}

View File

@@ -1,45 +0,0 @@
'use strict'
/* eslint-env jest */
const { Readable } = require('stream')
const { readChunk } = require('./')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
describe('readChunk', () => {
it('returns null if stream is empty', async () => {
expect(await readChunk(makeStream([]))).toBe(null)
})
describe('with binary stream', () => {
it('returns the first chunk of data', async () => {
expect(await readChunk(makeStream(['foo', 'bar']))).toEqual(Buffer.from('foo'))
})
it('returns a chunk of the specified size (smaller than first)', async () => {
expect(await readChunk(makeStream(['foo', 'bar']), 2)).toEqual(Buffer.from('fo'))
})
it('returns a chunk of the specified size (larger than first)', async () => {
expect(await readChunk(makeStream(['foo', 'bar']), 4)).toEqual(Buffer.from('foob'))
})
it('returns less data if stream ends', async () => {
expect(await readChunk(makeStream(['foo', 'bar']), 10)).toEqual(Buffer.from('foobar'))
})
it('returns an empty buffer if the specified size is 0', async () => {
expect(await readChunk(makeStream(['foo', 'bar']), 0)).toEqual(Buffer.alloc(0))
})
})
describe('with object stream', () => {
it('returns the first chunk of data verbatim', async () => {
const chunks = [{}, {}]
expect(await readChunk(makeStream.obj(chunks))).toBe(chunks[0])
})
})
})

View File

@@ -0,0 +1,77 @@
'use strict'
const { describe, it } = require('test')
const assert = require('node:assert').strict
const { Readable } = require('stream')
const { readChunk, readChunkStrict } = require('./')
const makeStream = it => Readable.from(it, { objectMode: false })
makeStream.obj = Readable.from
describe('readChunk', () => {
it('returns null if stream is empty', async () => {
assert.strictEqual(await readChunk(makeStream([])), null)
})
it('returns null if the stream is already ended', async () => {
const stream = await makeStream([])
await readChunk(stream)
assert.strictEqual(await readChunk(stream), null)
})
describe('with binary stream', () => {
it('returns the first chunk of data', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar'])), Buffer.from('foo'))
})
it('returns a chunk of the specified size (smaller than first)', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 2), Buffer.from('fo'))
})
it('returns a chunk of the specified size (larger than first)', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 4), Buffer.from('foob'))
})
it('returns less data if stream ends', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 10), Buffer.from('foobar'))
})
it('returns an empty buffer if the specified size is 0', async () => {
assert.deepEqual(await readChunk(makeStream(['foo', 'bar']), 0), Buffer.alloc(0))
})
})
describe('with object stream', () => {
it('returns the first chunk of data verbatim', async () => {
const chunks = [{}, {}]
assert.strictEqual(await readChunk(makeStream.obj(chunks)), chunks[0])
})
})
})
const rejectionOf = promise =>
promise.then(
value => {
throw value
},
error => error
)
describe('readChunkStrict', function () {
it('throws if stream is empty', async () => {
const error = await rejectionOf(readChunkStrict(makeStream([])))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended without data')
assert.strictEqual(error.chunk, undefined)
})
it('throws if stream ends with not enough data', async () => {
const error = await rejectionOf(readChunkStrict(makeStream(['foo', 'bar']), 10))
assert(error instanceof Error)
assert.strictEqual(error.message, 'stream has ended with not enough data')
assert.deepEqual(error.chunk, Buffer.from('foobar'))
})
})

View File

@@ -19,15 +19,19 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.1.2",
"version": "1.0.1",
"engines": {
"node": ">=8.10"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "node--test"
},
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"devDependencies": {
"test": "^3.2.1"
}
}

View File

@@ -30,6 +30,7 @@ if (args.length === 0) {
${name} v${version}
`)
// eslint-disable-next-line n/no-process-exit
process.exit()
}

View File

@@ -1,6 +1,8 @@
'use strict'
/* eslint-env jest */
const { describe, it } = require('test')
const assert = require('assert').strict
const sinon = require('sinon')
const { asyncMapSettled } = require('./')
@@ -9,26 +11,29 @@ const noop = Function.prototype
describe('asyncMapSettled', () => {
it('works', async () => {
const values = [Math.random(), Math.random()]
const spy = jest.fn(async v => v * 2)
const spy = sinon.spy(async v => v * 2)
const iterable = new Set(values)
// returns an array containing the result of each calls
expect(await asyncMapSettled(iterable, spy)).toEqual(values.map(value => value * 2))
assert.deepStrictEqual(
await asyncMapSettled(iterable, spy),
values.map(value => value * 2)
)
for (let i = 0, n = values.length; i < n; ++i) {
// each call receive the current item as sole argument
expect(spy.mock.calls[i]).toEqual([values[i]])
assert.deepStrictEqual(spy.args[i], [values[i]])
// each call as this bind to the iterable
expect(spy.mock.instances[i]).toBe(iterable)
assert.deepStrictEqual(spy.thisValues[i], iterable)
}
})
it('can use a specified thisArg', () => {
const thisArg = {}
const spy = jest.fn()
const spy = sinon.spy()
asyncMapSettled(['foo'], spy, thisArg)
expect(spy.mock.instances[0]).toBe(thisArg)
assert.deepStrictEqual(spy.thisValues[0], thisArg)
})
it('rejects only when all calls as resolved', async () => {
@@ -55,19 +60,22 @@ describe('asyncMapSettled', () => {
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
expect(hasSettled).toBe(false)
assert.strictEqual(hasSettled, false)
defers[1].resolve()
// wait for all microtasks to settle
await new Promise(resolve => setImmediate(resolve))
expect(hasSettled).toBe(true)
await expect(promise).rejects.toBe(error)
assert.strictEqual(hasSettled, true)
await assert.rejects(promise, error)
})
it('issues when latest promise rejects', async () => {
const error = new Error()
await expect(asyncMapSettled([1], () => Promise.reject(error))).rejects.toBe(error)
await assert.rejects(
asyncMapSettled([1], () => Promise.reject(error)),
error
)
})
})

View File

@@ -31,6 +31,11 @@
"lodash": "^4.17.4"
},
"scripts": {
"postversion": "npm publish"
"postversion": "npm publish",
"test": "node--test"
},
"devDependencies": {
"sinon": "^14.0.1",
"test": "^3.2.1"
}
}

View File

@@ -7,7 +7,7 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.2.0",
"version": "0.2.2",
"engines": {
"node": ">=14"
},
@@ -17,7 +17,7 @@
},
"dependencies": {
"@vates/decorate-with": "^2.0.0",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/log": "^0.5.0",
"golike-defer": "^0.5.1",
"object-hash": "^2.0.1"
},

View File

@@ -5,7 +5,6 @@ const PRESETS_RE = /^@babel\/preset-.+$/
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const configs = {
'@babel/plugin-proposal-decorators': {
@@ -15,7 +14,7 @@ const configs = {
proposal: 'minimal',
},
'@babel/preset-env': {
debug: !__TEST__,
debug: __PROD__,
// disabled until https://github.com/babel/babel/issues/8323 is resolved
// loose: true,

View File

@@ -1,11 +1,10 @@
'use strict'
import { readFileSync } from 'fs'
import getopts from 'getopts'
const getopts = require('getopts')
const { version } = JSON.parse(readFileSync(new URL('package.json', import.meta.url)))
const { version } = require('./package.json')
module.exports = commands =>
async function (args, prefix) {
export function composeCommands(commands) {
return async function (args, prefix) {
const opts = getopts(args, {
alias: {
help: 'h',
@@ -30,5 +29,6 @@ xo-backups v${version}
return
}
return command.main(args.slice(1), prefix + ' ' + commandName)
return (await command.default)(args.slice(1), prefix + ' ' + commandName)
}
}

View File

@@ -1,11 +1,9 @@
'use strict'
import fs from 'fs/promises'
import { dirname } from 'path'
const { dirname } = require('path')
export * from 'fs/promises'
const fs = require('promise-toolbox/promisifyAll')(require('fs'))
module.exports = fs
fs.getSize = path =>
export const getSize = path =>
fs.stat(path).then(
_ => _.size,
error => {
@@ -16,7 +14,7 @@ fs.getSize = path =>
}
)
fs.mktree = async function mkdirp(path) {
export async function mktree(path) {
try {
await fs.mkdir(path)
} catch (error) {
@@ -26,8 +24,8 @@ fs.mktree = async function mkdirp(path) {
return
}
if (code === 'ENOENT') {
await mkdirp(dirname(path))
return mkdirp(path)
await mktree(dirname(path))
return mktree(path)
}
throw error
}
@@ -37,7 +35,7 @@ fs.mktree = async function mkdirp(path) {
// - single param for direct use in `Array#map`
// - files are prefixed with directory path
// - safer: returns empty array if path is missing or not a directory
fs.readdir2 = path =>
export const readdir2 = path =>
fs.readdir(path).then(
entries => {
entries.forEach((entry, i) => {
@@ -59,7 +57,7 @@ fs.readdir2 = path =>
}
)
fs.symlink2 = async (target, path) => {
export async function symlink2(target, path) {
try {
await fs.symlink(target, path)
} catch (error) {

View File

@@ -1,34 +0,0 @@
'use strict'
// -----------------------------------------------------------------------------
const asyncMap = require('lodash/curryRight')(require('@xen-orchestra/async-map').asyncMap)
const getopts = require('getopts')
const { RemoteAdapter } = require('@xen-orchestra/backups/RemoteAdapter')
const { resolve } = require('path')
const adapter = new RemoteAdapter(require('@xen-orchestra/fs').getHandler({ url: 'file://' }))
module.exports = async function main(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, async vmDir => {
vmDir = resolve(vmDir)
try {
await adapter.cleanVm(vmDir, { fixMetadata: fix, remove, merge, onLog: (...args) => console.warn(...args) })
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
}

View File

@@ -0,0 +1,38 @@
import { asyncMap } from '@xen-orchestra/async-map'
import { RemoteAdapter } from '@xen-orchestra/backups/RemoteAdapter.js'
import { getSyncedHandler } from '@xen-orchestra/fs'
import getopts from 'getopts'
import { basename, dirname } from 'path'
import Disposable from 'promise-toolbox/Disposable'
import { pathToFileURL } from 'url'
export default async function cleanVms(args) {
const { _, fix, remove, merge } = getopts(args, {
alias: {
fix: 'f',
remove: 'r',
merge: 'm',
},
boolean: ['fix', 'merge', 'remove'],
default: {
merge: false,
remove: false,
},
})
await asyncMap(_, vmDir =>
Disposable.use(getSyncedHandler({ url: pathToFileURL(dirname(vmDir)).href }), async handler => {
try {
await new RemoteAdapter(handler).cleanVm(basename(vmDir), {
fixMetadata: fix,
remove,
merge,
logInfo: (...args) => console.log(...args),
logWarn: (...args) => console.warn(...args),
})
} catch (error) {
console.error('adapter.cleanVm', vmDir, error)
}
})
)
}

View File

@@ -1,13 +1,10 @@
'use strict'
import { mktree, readdir2, readFile, symlink2 } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import filenamify from 'filenamify'
import get from 'lodash/get.js'
import { dirname, join, relative } from 'path'
const filenamify = require('filenamify')
const get = require('lodash/get')
const { asyncMap } = require('@xen-orchestra/async-map')
const { dirname, join, relative } = require('path')
const { mktree, readdir2, readFile, symlink2 } = require('../_fs')
module.exports = async function createSymlinkIndex([backupDir, fieldPath]) {
export default async function createSymlinkIndex([backupDir, fieldPath]) {
const indexDir = join(backupDir, 'indexes', filenamify(fieldPath))
await mktree(indexDir)

View File

@@ -1,16 +1,13 @@
'use strict'
const groupBy = require('lodash/groupBy')
const { asyncMap } = require('@xen-orchestra/async-map')
const { createHash } = require('crypto')
const { dirname, resolve } = require('path')
const { readdir2, readFile, getSize } = require('../_fs')
import { readdir2, readFile, getSize } from '../_fs.mjs'
import { asyncMap } from '@xen-orchestra/async-map'
import { createHash } from 'crypto'
import groupBy from 'lodash/groupBy.js'
import { dirname, resolve } from 'path'
const sha512 = str => createHash('sha512').update(str).digest('hex')
const sum = values => values.reduce((a, b) => a + b)
module.exports = async function info(vmDirs) {
export default async function info(vmDirs) {
const jsonFiles = (
await asyncMap(vmDirs, async vmDir => (await readdir2(vmDir)).filter(_ => _.endsWith('.json')))
).flat()

View File

@@ -1,11 +1,12 @@
#!/usr/bin/env node
import { composeCommands } from './_composeCommands.mjs'
'use strict'
const importDefault = async path => (await import(path)).default
require('./_composeCommands')({
composeCommands({
'clean-vms': {
get main() {
return require('./commands/clean-vms')
get default() {
return importDefault('./commands/clean-vms.mjs')
},
usage: `[--fix] [--merge] [--remove] xo-vm-backups/*
@@ -18,14 +19,14 @@ require('./_composeCommands')({
`,
},
'create-symlink-index': {
get main() {
return require('./commands/create-symlink-index')
get default() {
return importDefault('./commands/create-symlink-index.mjs')
},
usage: 'xo-vm-backups <field path>',
},
info: {
get main() {
return require('./commands/info')
get default() {
return importDefault('./commands/info.mjs')
},
usage: 'xo-vm-backups/*',
},

View File

@@ -1,21 +1,21 @@
{
"private": false,
"bin": {
"xo-backups": "index.js"
"xo-backups": "index.mjs"
},
"preferGlobal": true,
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/backups": "^0.23.0",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/backups": "^0.29.1",
"@xen-orchestra/fs": "^3.3.0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"lodash": "^4.17.15",
"promise-toolbox": "^0.21.0"
},
"engines": {
"node": ">=7.10.1"
"node": ">=14"
},
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/backups-cli",
"name": "@xen-orchestra/backups-cli",
@@ -27,7 +27,7 @@
"scripts": {
"postversion": "npm publish --access public"
},
"version": "0.7.1",
"version": "1.0.0",
"license": "AGPL-3.0-or-later",
"author": {
"name": "Vates SAS",

View File

@@ -6,7 +6,7 @@ const ignoreErrors = require('promise-toolbox/ignoreErrors')
const { compileTemplate } = require('@xen-orchestra/template')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { extractIdsFromSimplePattern } = require('./_extractIdsFromSimplePattern.js')
const { extractIdsFromSimplePattern } = require('./extractIdsFromSimplePattern.js')
const { PoolMetadataBackup } = require('./_PoolMetadataBackup.js')
const { Task } = require('./Task.js')
const { VmBackup } = require('./_VmBackup.js')
@@ -43,6 +43,7 @@ const DEFAULT_VM_SETTINGS = {
offlineSnapshot: false,
snapshotRetention: 0,
timeout: 0,
useNbd: false,
unconditionalSnapshot: false,
vmTimeout: 0,
}
@@ -245,7 +246,7 @@ exports.Backup = class Backup {
})
)
),
() => settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined,
() => (settings.healthCheckSr !== undefined ? this._getRecord('SR', settings.healthCheckSr) : undefined),
async (srs, remoteAdapters, healthCheckSr) => {
// remove adapters that failed (already handled)
remoteAdapters = remoteAdapters.filter(_ => _ !== undefined)

View File

@@ -15,18 +15,22 @@ const { deduped } = require('@vates/disposable/deduped.js')
const { decorateMethodsWith } = require('@vates/decorate-with')
const { compose } = require('@vates/compose')
const { execFile } = require('child_process')
const { readdir, stat } = require('fs-extra')
const { readdir, lstat } = require('fs-extra')
const { v4: uuidv4 } = require('uuid')
const { ZipFile } = require('yazl')
const zlib = require('zlib')
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
const { cleanVm } = require('./_cleanVm.js')
const { formatFilenameDate } = require('./_filenameDate.js')
const { getTmpDir } = require('./_getTmpDir.js')
const { isMetadataFile } = require('./_backupType.js')
const { isValidXva } = require('./_isValidXva.js')
const { listPartitions, LVM_PARTITION_TYPE } = require('./_listPartitions.js')
const { lvs, pvs } = require('./_lvm.js')
// @todo : this import is marked extraneous , sould be fixed when lib is published
const { mount } = require('@vates/fuse-vhd')
const { asyncEach } = require('@vates/async-each')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
@@ -34,7 +38,7 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
const { warn } = createLogger('xo:backups:RemoteAdapter')
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
@@ -44,16 +48,13 @@ const resolveRelativeFromFile = (file, path) => resolve('/', dirname(file), path
const resolveSubpath = (root, path) => resolve(root, `.${resolve('/', path)}`)
const RE_VHDI = /^vhdi(\d+)$/
async function addDirectory(files, realPath, metadataPath) {
try {
const subFiles = await readdir(realPath)
await asyncMap(subFiles, file => addDirectory(files, realPath + '/' + file, metadataPath + '/' + file))
} catch (error) {
if (error == null || error.code !== 'ENOTDIR') {
throw error
}
const stats = await lstat(realPath)
if (stats.isDirectory()) {
await asyncMap(await readdir(realPath), file =>
addDirectory(files, realPath + '/' + file, metadataPath + '/' + file)
)
} else if (stats.isFile()) {
files.push({
realPath,
metadataPath,
@@ -75,12 +76,16 @@ const debounceResourceFactory = factory =>
}
class RemoteAdapter {
constructor(handler, { debounceResource = res => res, dirMode, vhdDirectoryCompression } = {}) {
constructor(
handler,
{ debounceResource = res => res, dirMode, vhdDirectoryCompression, useGetDiskLegacy = false } = {}
) {
this._debounceResource = debounceResource
this._dirMode = dirMode
this._handler = handler
this._vhdDirectoryCompression = vhdDirectoryCompression
this._readCacheListVmBackups = synchronized.withKey()(this._readCacheListVmBackups)
this._useGetDiskLegacy = useGetDiskLegacy
}
get handler() {
@@ -128,7 +133,9 @@ class RemoteAdapter {
}
async *_getPartition(devicePath, partition) {
const options = ['loop', 'ro']
// the norecovery option is necessary because if the partition is dirty,
// mount will try to fix it which is impossible if because the device is read-only
const options = ['loop', 'ro', 'norecovery']
if (partition !== undefined) {
const { size, start } = partition
@@ -225,11 +232,30 @@ class RemoteAdapter {
return promise
}
#removeVmBackupsFromCache(backups) {
for (const [dir, filenames] of Object.entries(
groupBy(
backups.map(_ => _._filename),
dirname
)
)) {
// detached async action, will not reject
this._updateCache(dir + '/cache.json.gz', backups => {
for (const filename of filenames) {
debug('removing cache entry', { entry: filename })
delete backups[filename]
}
})
}
}
async deleteDeltaVmBackups(backups) {
const handler = this._handler
// this will delete the json, unused VHDs will be detected by `cleanVm`
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
this.#removeVmBackupsFromCache(backups)
}
async deleteMetadataBackup(backupId) {
@@ -257,6 +283,8 @@ class RemoteAdapter {
await asyncMapSettled(backups, ({ _filename, xva }) =>
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
)
this.#removeVmBackupsFromCache(backups)
}
deleteVmBackup(file) {
@@ -277,14 +305,13 @@ class RemoteAdapter {
full !== undefined && this.deleteFullVmBackups(full),
])
const dirs = new Set(files.map(file => dirname(file)))
for (const dir of dirs) {
// don't merge in main process, unused VHDs will be merged in the next backup run
await this.cleanVm(dir, { remove: true, onLog: warn })
}
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
await asyncMap(new Set(files.map(file => dirname(file))), dir =>
// - don't merge in main process, unused VHDs will be merged in the next backup run
// - don't error in case this fails:
// - if lock is already being held, a backup is running and cleanVm will be ran at the end
// - otherwise, there is nothing more we can do, orphan file will be cleaned in the future
this.cleanVm(dir, { remove: true, logWarn: warn }).catch(noop)
)
}
#getCompressionType() {
@@ -292,14 +319,15 @@ class RemoteAdapter {
}
#useVhdDirectory() {
return this.handler.type === 's3'
return this.handler.useVhdDirectory()
}
#useAlias() {
return this.#useVhdDirectory()
}
async *getDisk(diskId) {
async *#getDiskLegacy(diskId) {
const RE_VHDI = /^vhdi(\d+)$/
const handler = this._handler
const diskPath = handler._getFilePath('/' + diskId)
@@ -329,6 +357,20 @@ class RemoteAdapter {
}
}
async *getDisk(diskId) {
if (this._useGetDiskLegacy) {
yield* this.#getDiskLegacy(diskId)
return
}
const handler = this._handler
// this is a disposable
const mountDir = yield getTmpDir()
// this is also a disposable
yield mount(handler, diskId, mountDir)
// this will yield disk path to caller
yield `${mountDir}/vhd0`
}
// partitionId values:
//
// - undefined: raw disk
@@ -379,18 +421,25 @@ class RemoteAdapter {
listPartitionFiles(diskId, partitionId, path) {
return Disposable.use(this.getPartition(diskId, partitionId), async rootPath => {
path = resolveSubpath(rootPath, path)
const entriesMap = {}
await asyncMap(await readdir(path), async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
await asyncEach(
await readdir(path),
async name => {
try {
const stats = await lstat(`${path}/${name}`)
if (stats.isDirectory()) {
entriesMap[name + '/'] = {}
} else if (stats.isFile()) {
entriesMap[name] = {}
}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
}
}
})
},
{ concurrency: 1 }
)
return entriesMap
})
@@ -455,11 +504,42 @@ class RemoteAdapter {
return backupsByPool
}
async invalidateVmBackupListCache(vmUuid) {
await this.handler.unlink(`${BACKUP_DIR}/${vmUuid}/cache.json.gz`)
#getVmBackupsCache(vmUuid) {
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
}
async #readCache(path) {
try {
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
} catch (error) {
if (error.code !== 'ENOENT') {
warn('#readCache', { error, path })
}
}
}
_updateCache = synchronized.withKey()(this._updateCache)
// eslint-disable-next-line no-dupe-class-members
async _updateCache(path, fn) {
const cache = await this.#readCache(path)
if (cache !== undefined) {
fn(cache)
await this.#writeCache(path, cache)
}
}
async #writeCache(path, data) {
try {
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
} catch (error) {
warn('#writeCache', { error, path })
}
}
async #getCachabledDataListVmBackups(dir) {
debug('generating cache', { path: dir })
const handler = this._handler
const backups = {}
@@ -495,41 +575,26 @@ class RemoteAdapter {
// if cache is missing or broken => regenerate it and return
async _readCacheListVmBackups(vmUuid) {
const dir = `${BACKUP_DIR}/${vmUuid}`
const path = `${dir}/cache.json.gz`
const path = this.#getVmBackupsCache(vmUuid)
try {
const gzipped = await this.handler.readFile(path)
const text = await fromCallback(zlib.gunzip, gzipped)
return JSON.parse(text)
} catch (error) {
if (error.code !== 'ENOENT') {
warn('Cache file was unreadable', { vmUuid, error })
}
const cache = await this.#readCache(path)
if (cache !== undefined) {
debug('found VM backups cache, using it', { path })
return cache
}
// nothing cached, or cache unreadable => regenerate it
const backups = await this.#getCachabledDataListVmBackups(dir)
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
if (backups === undefined) {
return
}
// detached async action, will not reject
this.#writeVmBackupsCache(path, backups)
this.#writeCache(path, backups)
return backups
}
async #writeVmBackupsCache(cacheFile, backups) {
try {
const text = JSON.stringify(backups)
const zipped = await fromCallback(zlib.gzip, text)
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
} catch (error) {
warn('writeVmBackupsCache', { cacheFile, error })
}
}
async listVmBackups(vmUuid, predicate) {
const backups = []
const cached = await this._readCacheListVmBackups(vmUuid)
@@ -568,18 +633,40 @@ class RemoteAdapter {
return backups.sort(compareTimestamp)
}
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
const handler = this._handler
async writeVmBackupMetadata(vmUuid, metadata) {
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`
await this.handler.outputFile(path, JSON.stringify(metadata), {
dirMode: this._dirMode,
})
// will not throw
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
debug('adding cache entry', { entry: path })
backups[path] = {
...metadata,
// these values are required in the cache
_filename: path,
id: path,
}
})
return path
}
async writeVhd(path, input, { checksum = true, validator = noop, writeBlockConcurrency, nbdClient } = {}) {
const handler = this._handler
if (this.#useVhdDirectory()) {
const dataPath = `${dirname(path)}/data/${uuidv4()}.vhd`
await createVhdDirectoryFromStream(handler, dataPath, input, {
concurrency: 16,
concurrency: writeBlockConcurrency,
compression: this.#getCompressionType(),
async validator() {
await input.task
return validator.apply(this, arguments)
},
nbdClient,
})
await VhdAbstract.createAlias(handler, path, dataPath)
} else {

View File

@@ -3,8 +3,10 @@
const CancelToken = require('promise-toolbox/CancelToken')
const Zone = require('node-zone')
const logAfterEnd = () => {
throw new Error('task has already ended')
const logAfterEnd = log => {
const error = new Error('task has already ended')
error.log = log
throw error
}
const noop = Function.prototype

View File

@@ -128,35 +128,49 @@ class VmBackup {
}
// calls fn for each function, warns of any errors, and throws only if there are no writers left
async _callWriters(fn, warnMessage, parallel = true) {
async _callWriters(fn, step, parallel = true) {
const writers = this._writers
const n = writers.size
if (n === 0) {
return
}
if (n === 1) {
const [writer] = writers
async function callWriter(writer) {
const { name } = writer.constructor
try {
debug('writer step starting', { step, writer: name })
await fn(writer)
debug('writer step succeeded', { duration: step, writer: name })
} catch (error) {
writers.delete(writer)
warn('writer step failed', { error, step, writer: name })
// these two steps are the only one that are not already in their own sub tasks
if (step === 'writer.checkBaseVdis()' || step === 'writer.beforeBackup()') {
Task.warning(
`the writer ${name} has failed the step ${step} with error ${error.message}. It won't be used anymore in this job execution.`
)
}
throw error
}
return
}
if (n === 1) {
const [writer] = writers
return callWriter(writer)
}
const errors = []
await (parallel ? asyncMap : asyncEach)(writers, async function (writer) {
try {
await fn(writer)
await callWriter(writer)
} catch (error) {
errors.push(error)
this.delete(writer)
warn(warnMessage, { error, writer: writer.constructor.name })
}
})
if (writers.size === 0) {
throw new AggregateError(errors, 'all targets have failed, step: ' + warnMessage)
throw new AggregateError(errors, 'all targets have failed, step: ' + step)
}
}

View File

@@ -1,22 +1,27 @@
'use strict'
const assert = require('assert')
const sum = require('lodash/sum')
const UUID = require('uuid')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { isVhdAlias, resolveVhdAlias } = require('vhd-lib/aliases')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
const { limitConcurrency } = require('limit-concurrency-decorator')
const { mergeVhdChain } = require('vhd-lib/merge')
const { Task } = require('./Task.js')
const { Disposable } = require('promise-toolbox')
const handlerPath = require('@xen-orchestra/fs/path')
// checking the size of a vhd directory is costly
// 1 Http Query per 1000 blocks
// we only check size of all the vhd are VhdFiles
function shouldComputeVhdsSize(vhds) {
function shouldComputeVhdsSize(handler, vhds) {
if (handler.isEncrypted) {
return false
}
return vhds.every(vhd => vhd instanceof VhdFile)
}
@@ -24,73 +29,49 @@ const computeVhdsSize = (handler, vhdPaths) =>
Disposable.use(
vhdPaths.map(vhdPath => openVhd(handler, vhdPath)),
async vhds => {
if (shouldComputeVhdsSize(vhds)) {
if (shouldComputeVhdsSize(handler, vhds)) {
const sizes = await asyncMap(vhds, vhd => vhd.getSize())
return sum(sizes)
}
}
)
// chain is [ ancestor, child1, ..., childn]
// 1. Create a VhdSynthetic from all children
// 2. Merge the VhdSynthetic into the ancestor
// 3. Delete all (now) unused VHDs
// 4. Rename the ancestor with the merged data to the latest child
//
// VhdSynthetic
// |
// /‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾\
// [ ancestor, child1, ...,child n-1, childn ]
// | \___________________/ ^
// | | |
// | unused VHDs |
// | |
// \___________rename_____________/
async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
assert(chain.length >= 2)
const chainCopy = [...chain]
const parent = chainCopy.pop()
const children = chainCopy
// chain is [ ancestor, child_1, ..., child_n ]
async function _mergeVhdChain(handler, chain, { logInfo, remove, merge, mergeBlockConcurrency }) {
if (merge) {
onLog(`merging ${children.length} children into ${parent}`)
logInfo(`merging VHD chain`, { chain })
let done, total
const handle = setInterval(() => {
if (done !== undefined) {
onLog(`merging ${children.join(',')} into ${parent}: ${done}/${total}`)
logInfo('merge in progress', {
done,
parent: chain[0],
progress: Math.round((100 * done) / total),
total,
})
}
}, 10e3)
const mergedSize = await mergeVhd(handler, parent, handler, children, {
onProgress({ done: d, total: t }) {
done = d
total = t
},
})
clearInterval(handle)
const mergeTargetChild = children.shift()
await Promise.all([
VhdAbstract.rename(handler, parent, mergeTargetChild),
asyncMap(children, child => {
onLog(`the VHD ${child} is already merged`)
if (remove) {
onLog(`deleting merged VHD ${child}`)
return VhdAbstract.unlink(handler, child)
}
}),
])
return mergedSize
try {
return await mergeVhdChain(handler, chain, {
logInfo,
mergeBlockConcurrency,
onProgress({ done: d, total: t }) {
done = d
total = t
},
removeUnused: remove,
})
} finally {
clearInterval(handle)
}
}
}
const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir) => {
const listVhds = async (handler, vmDir, logWarn) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()
@@ -110,12 +91,23 @@ const listVhds = async (handler, vmDir) => {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})
aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd)).map(file => `${vdiDir}/${file}`)
list.forEach(file => {
await asyncMap(list, async file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
vhds.add(`${vdiDir}/${file}`)
} else {
interruptedVhds.set(`${vdiDir}/${res[1]}`, `${vdiDir}/${file}`)
try {
const mergeState = JSON.parse(await handler.readFile(`${vdiDir}/${file}`))
interruptedVhds.set(`${vdiDir}/${res[1]}`, {
statePath: `${vdiDir}/${file}`,
chain: mergeState.chain,
})
} catch (error) {
// fall back to a non resuming merge
vhds.add(`${vdiDir}/${file}`)
logWarn('failed to read existing merge state', { path: file, error })
}
}
})
}
@@ -125,16 +117,21 @@ const listVhds = async (handler, vmDir) => {
return { vhds, interruptedVhds, aliases }
}
async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
async function checkAliases(
aliasPaths,
targetDataRepository,
{ handler, logInfo = noop, logWarn = console.warn, remove = false }
) {
const aliasFound = []
for (const path of aliasPaths) {
const target = await resolveVhdAlias(handler, path)
for (const alias of aliasPaths) {
const target = await resolveVhdAlias(handler, alias)
if (!isVhdFile(target)) {
onLog(`Alias ${path} references a non vhd target: ${target}`)
logWarn('alias references non VHD target', { alias, target })
if (remove) {
logInfo('removing alias and non VHD target', { alias, target })
await handler.unlink(target)
await handler.unlink(path)
await handler.unlink(alias)
}
continue
}
@@ -147,13 +144,13 @@ async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog =
// error during dispose should not trigger a deletion
}
} catch (error) {
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
logWarn('missing or broken alias target', { alias, target, error })
if (remove) {
try {
await VhdAbstract.unlink(handler, path)
} catch (e) {
if (e.code !== 'ENOENT') {
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
await VhdAbstract.unlink(handler, alias)
} catch (error) {
if (error.code !== 'ENOENT') {
logWarn('error deleting alias target', { alias, target, error })
}
}
}
@@ -163,37 +160,48 @@ async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog =
aliasFound.push(resolve('/', target))
}
const entries = await handler.list(targetDataRepository, {
const vhds = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})
entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
onLog(`the Vhd ${entry} is not referenced by a an alias`)
await asyncMap(vhds, async path => {
if (!aliasFound.includes(path)) {
logWarn('no alias references VHD', { path })
if (remove) {
await VhdAbstract.unlink(handler, entry)
logInfo('deleting unused VHD', { path })
await VhdAbstract.unlink(handler, path)
}
}
})
}
exports.checkAliases = checkAliases
const defaultMergeLimiter = limitConcurrency(1)
exports.cleanVm = async function cleanVm(
vmDir,
{ fixMetadata, remove, merge, mergeLimiter = defaultMergeLimiter, onLog = noop }
{
fixMetadata,
remove,
merge,
mergeBlockConcurrency,
mergeLimiter = defaultMergeLimiter,
logInfo = noop,
logWarn = console.warn,
}
) {
const limitedMergeVhdChain = mergeLimiter(mergeVhdChain)
const limitedMergeVhdChain = mergeLimiter(_mergeVhdChain)
const handler = this._handler
const vhdsToJSons = new Set()
const vhdById = new Map()
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir, logWarn)
// remove broken VHDs
await asyncMap(vhds, async path => {
@@ -211,12 +219,31 @@ exports.cleanVm = async function cleanVm(
}
vhdChildren[parent] = path
}
// Detect VHDs with the same UUIDs
//
// Due to a bug introduced in a1bcd35e2
const duplicate = vhdById.get(UUID.stringify(vhd.footer.uuid))
let vhdKept = vhd
if (duplicate !== undefined) {
logWarn('uuid is duplicated', { uuid: UUID.stringify(vhd.footer.uuid) })
if (duplicate.containsAllDataOf(vhd)) {
logWarn(`should delete ${path}`)
vhdKept = duplicate
vhds.delete(path)
} else if (vhd.containsAllDataOf(duplicate)) {
logWarn(`should delete ${duplicate._path}`)
vhds.delete(duplicate._path)
} else {
logWarn('same ids but different content')
}
}
vhdById.set(UUID.stringify(vhdKept.footer.uuid), vhdKept)
})
} catch (error) {
vhds.delete(path)
onLog(`error while checking the VHD with path ${path}`, { error })
logWarn('VHD check error', { path, error })
if (error?.code === 'ERR_ASSERTION' && remove) {
onLog(`deleting broken ${path}`)
logInfo('deleting broken VHD', { path })
return VhdAbstract.unlink(handler, path)
}
}
@@ -225,15 +252,15 @@ exports.cleanVm = async function cleanVm(
// remove interrupted merge states for missing VHDs
for (const interruptedVhd of interruptedVhds.keys()) {
if (!vhds.has(interruptedVhd)) {
const statePath = interruptedVhds.get(interruptedVhd)
const { statePath } = interruptedVhds.get(interruptedVhd)
interruptedVhds.delete(interruptedVhd)
onLog('orphan merge state', {
logWarn('orphan merge state', {
mergeStatePath: statePath,
missingVhdPath: interruptedVhd,
})
if (remove) {
onLog(`deleting orphan merge state ${statePath}`)
logInfo('deleting orphan merge state', { statePath })
await handler.unlink(statePath)
}
}
@@ -242,7 +269,7 @@ exports.cleanVm = async function cleanVm(
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
await checkAliases(aliases[dir], `${dir}/data`, { handler, logInfo, logWarn, remove })
})
// remove VHDs with missing ancestors
@@ -264,9 +291,9 @@ exports.cleanVm = async function cleanVm(
if (!vhds.has(parent)) {
vhds.delete(vhdPath)
onLog(`the parent ${parent} of the VHD ${vhdPath} is missing`)
logWarn('parent VHD is missing', { parent, child: vhdPath })
if (remove) {
onLog(`deleting orphan VHD ${vhdPath}`)
logInfo('deleting orphan VHD', { path: vhdPath })
deletions.push(VhdAbstract.unlink(handler, vhdPath))
}
}
@@ -284,6 +311,7 @@ exports.cleanVm = async function cleanVm(
}
const jsons = new Set()
let mustInvalidateCache = false
const xvas = new Set()
const xvaSums = []
const entries = await handler.list(vmDir, {
@@ -303,7 +331,7 @@ exports.cleanVm = async function cleanVm(
// check is not good enough to delete the file, the best we can do is report
// it
if (!(await this.isValidXva(path))) {
onLog(`the XVA with path ${path} is potentially broken`)
logWarn('XVA might be broken', { path })
}
})
@@ -317,7 +345,7 @@ exports.cleanVm = async function cleanVm(
try {
metadata = JSON.parse(await handler.readFile(json))
} catch (error) {
onLog(`failed to read metadata file ${json}`, { error })
logWarn('failed to read backup metadata', { path: json, error })
jsons.delete(json)
return
}
@@ -328,10 +356,11 @@ exports.cleanVm = async function cleanVm(
if (xvas.has(linkedXva)) {
unusedXvas.delete(linkedXva)
} else {
onLog(`the XVA linked to the metadata ${json} is missing`)
logWarn('the XVA linked to the backup is missing', { backup: json, xva: linkedXva })
if (remove) {
onLog(`deleting incomplete backup ${json}`)
logInfo('deleting incomplete backup', { path: json })
jsons.delete(json)
mustInvalidateCache = true
await handler.unlink(json)
}
}
@@ -351,9 +380,10 @@ exports.cleanVm = async function cleanVm(
vhdsToJSons[path] = json
})
} else {
onLog(`Some VHDs linked to the metadata ${json} are missing`, { missingVhds })
logWarn('some VHDs linked to the backup are missing', { backup: json, missingVhds })
if (remove) {
onLog(`deleting incomplete backup ${json}`)
logInfo('deleting incomplete backup', { path: json })
mustInvalidateCache = true
jsons.delete(json)
await handler.unlink(json)
}
@@ -365,7 +395,7 @@ exports.cleanVm = async function cleanVm(
const unusedVhdsDeletion = []
const toMerge = []
{
// VHD chains (as list from child to ancestor) to merge indexed by last
// VHD chains (as list from oldest to most recent) to merge indexed by most recent
// ancestor
const vhdChainsToMerge = { __proto__: null }
@@ -389,14 +419,14 @@ exports.cleanVm = async function cleanVm(
if (child !== undefined) {
const chain = getUsedChildChainOrDelete(child)
if (chain !== undefined) {
chain.push(vhd)
chain.unshift(vhd)
return chain
}
}
onLog(`the VHD ${vhd} is unused`)
logWarn('unused VHD', { path: vhd })
if (remove) {
onLog(`deleting unused VHD ${vhd}`)
logInfo('deleting unused VHD', { path: vhd })
unusedVhdsDeletion.push(VhdAbstract.unlink(handler, vhd))
}
}
@@ -407,7 +437,13 @@ exports.cleanVm = async function cleanVm(
// merge interrupted VHDs
for (const parent of interruptedVhds.keys()) {
vhdChainsToMerge[parent] = [vhdChildren[parent], parent]
// before #6349 the chain wasn't in the mergeState
const { chain, statePath } = interruptedVhds.get(parent)
if (chain === undefined) {
vhdChainsToMerge[parent] = [parent, vhdChildren[parent]]
} else {
vhdChainsToMerge[parent] = chain.map(vhdPath => handlerPath.resolveFromFile(statePath, vhdPath))
}
}
Object.values(vhdChainsToMerge).forEach(chain => {
@@ -420,9 +456,15 @@ exports.cleanVm = async function cleanVm(
const metadataWithMergedVhd = {}
const doMerge = async () => {
await asyncMap(toMerge, async chain => {
const merged = await limitedMergeVhdChain(chain, { handler, onLog, remove, merge })
const merged = await limitedMergeVhdChain(handler, chain, {
logInfo,
logWarn,
remove,
merge,
mergeBlockConcurrency,
})
if (merged !== undefined) {
const metadataPath = vhdsToJSons[chain[0]] // all the chain should have the same metada file
const metadataPath = vhdsToJSons[chain[chain.length - 1]] // all the chain should have the same metada file
metadataWithMergedVhd[metadataPath] = true
}
})
@@ -432,18 +474,18 @@ exports.cleanVm = async function cleanVm(
...unusedVhdsDeletion,
toMerge.length !== 0 && (merge ? Task.run({ name: 'merge' }, doMerge) : doMerge()),
asyncMap(unusedXvas, path => {
onLog(`the XVA ${path} is unused`)
logWarn('unused XVA', { path })
if (remove) {
onLog(`deleting unused XVA ${path}`)
logInfo('deleting unused XVA', { path })
return handler.unlink(path)
}
}),
asyncMap(xvaSums, path => {
// no need to handle checksums for XVAs deleted by the script, they will be handled by `unlink()`
if (!xvas.has(path.slice(0, -'.checksum'.length))) {
onLog(`the XVA checksum ${path} is unused`)
logInfo('unused XVA checksum', { path })
if (remove) {
onLog(`deleting unused XVA checksum ${path}`)
logInfo('deleting unused XVA checksum', { path })
return handler.unlink(path)
}
}
@@ -465,7 +507,11 @@ exports.cleanVm = async function cleanVm(
if (mode === 'full') {
// a full backup : check size
const linkedXva = resolve('/', vmDir, xva)
fileSystemSize = await handler.getSize(linkedXva)
try {
fileSystemSize = await handler.getSize(linkedXva)
} catch (error) {
// can fail with encrypted remote
}
} else if (mode === 'delta') {
const linkedVhds = Object.keys(vhds).map(key => resolve('/', vmDir, vhds[key]))
fileSystemSize = await computeVhdsSize(handler, linkedVhds)
@@ -477,11 +523,15 @@ exports.cleanVm = async function cleanVm(
// don't warn if the size has changed after a merge
if (!merged && fileSystemSize !== size) {
onLog(`incorrect size in metadata: ${size ?? 'none'} instead of ${fileSystemSize}`)
logWarn('incorrect backup size in metadata', {
path: metadataPath,
actual: size ?? 'none',
expected: fileSystemSize,
})
}
}
} catch (error) {
onLog(`failed to get size of ${metadataPath}`, { error })
logWarn('failed to get backup size', { backup: metadataPath, error })
return
}
@@ -491,11 +541,16 @@ exports.cleanVm = async function cleanVm(
try {
await handler.writeFile(metadataPath, JSON.stringify(metadata), { flags: 'w' })
} catch (error) {
onLog(`failed to update size in backup metadata ${metadataPath} after merge`, { error })
logWarn('failed to update backup size in metadata', { path: metadataPath, error })
}
}
})
// purge cache if a metadata file has been deleted
if (mustInvalidateCache) {
await handler.unlink(vmDir + '/cache.json.gz')
}
return {
// boolean whether some VHDs were merged (or should be merged)
merge: toMerge.length !== 0,

View File

@@ -1,6 +1,7 @@
'use strict'
/* eslint-env jest */
const { beforeEach, afterEach, test, describe } = require('test')
const assert = require('assert').strict
const rimraf = require('rimraf')
const tmp = require('tmp')
@@ -14,9 +15,8 @@ const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')
let tempDir, adapter, handler, jobId, vdiId, basePath
jest.setTimeout(60000)
let tempDir, adapter, handler, jobId, vdiId, basePath, relativePath
const rootPath = 'xo-vm-backups/VMUUID/'
beforeEach(async () => {
tempDir = await pFromCallback(cb => tmp.dir(cb))
@@ -25,7 +25,8 @@ beforeEach(async () => {
adapter = new RemoteAdapter(handler)
jobId = uniqueId()
vdiId = uniqueId()
basePath = `vdis/${jobId}/${vdiId}`
relativePath = `vdis/${jobId}/${vdiId}`
basePath = `${rootPath}/${relativePath}`
await fs.mkdirp(`${tempDir}/${basePath}`)
})
@@ -35,7 +36,7 @@ afterEach(async () => {
})
const uniqueId = () => uuid.v1()
const uniqueIdBuffer = () => Buffer.from(uniqueId(), 'utf-8')
const uniqueIdBuffer = () => uuid.v1({}, Buffer.alloc(16))
async function generateVhd(path, opts = {}) {
let vhd
@@ -76,18 +77,18 @@ test('It remove broken vhd', async () => {
// todo also tests a directory and an alias
await handler.writeFile(`${basePath}/notReallyAVhd.vhd`, 'I AM NOT A VHD')
expect((await handler.list(basePath)).length).toEqual(1)
assert.equal((await handler.list(basePath)).length, 1)
let loggued = ''
const onLog = message => {
const logInfo = message => {
loggued += message
}
await adapter.cleanVm('/', { remove: false, onLog })
expect(loggued).toEqual(`error while checking the VHD with path /${basePath}/notReallyAVhd.vhd`)
await adapter.cleanVm(rootPath, { remove: false, logInfo, logWarn: logInfo, lock: false })
assert.equal(loggued, `VHD check error`)
// not removed
expect((await handler.list(basePath)).length).toEqual(1)
assert.deepEqual(await handler.list(basePath), ['notReallyAVhd.vhd'])
// really remove it
await adapter.cleanVm('/', { remove: true, onLog })
expect((await handler.list(basePath)).length).toEqual(0)
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
assert.deepEqual(await handler.list(basePath), [])
})
test('it remove vhd with missing or multiple ancestors', async () => {
@@ -118,15 +119,13 @@ test('it remove vhd with missing or multiple ancestors', async () => {
)
// clean
let loggued = ''
const onLog = message => {
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
const deletedOrphanVhd = loggued.match(/deleting orphan VHD/g) || []
expect(deletedOrphanVhd.length).toEqual(1) // only one vhd should have been deleted
const deletedAbandonnedVhd = loggued.match(/abandonned.vhd is missing/g) || []
expect(deletedAbandonnedVhd.length).toEqual(1) // and it must be abandonned.vhd
assert.equal(deletedOrphanVhd.length, 1) // only one vhd should have been deleted
// we don't test the filew on disk, since they will all be marker as unused and deleted without a metadata.json file
})
@@ -134,12 +133,12 @@ test('it remove vhd with missing or multiple ancestors', async () => {
test('it remove backup meta data referencing a missing vhd in delta backup', async () => {
// create a metadata file marking child and orphan as ok
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
`${relativePath}/orphan.vhd`,
`${relativePath}/child.vhd`,
// abandonned.json is not here
],
})
@@ -159,44 +158,42 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
})
let loggued = ''
const onLog = message => {
const logInfo = message => {
loggued += message + '\n'
}
await adapter.cleanVm('/', { remove: true, onLog })
let matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(1) // only one vhd should have been deleted
matched = loggued.match(/abandonned.vhd is unused/g) || []
expect(matched.length).toEqual(1) // and it must be abandonned.vhd
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
let matched = loggued.match(/deleting unused VHD/g) || []
assert.equal(matched.length, 1) // only one vhd should have been deleted
// a missing vhd cause clean to remove all vhds
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
`deleted.vhd`, // in metadata but not in vhds
`orphan.vhd`,
`child.vhd`,
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
)
loggued = ''
await adapter.cleanVm('/', { remove: true, onLog })
matched = loggued.match(/deleting unused VHD /g) || []
expect(matched.length).toEqual(2) // all vhds (orphan and child ) should have been deleted
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: () => {}, lock: false })
matched = loggued.match(/deleting unused VHD/g) || []
assert.equal(matched.length, 2) // all vhds (orphan and child ) should have been deleted
})
test('it merges delta of non destroyed chain', async () => {
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
size: 12000, // a size too small
vhds: [
`${basePath}/grandchild.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
`${relativePath}/grandchild.vhd`, // grand child should not be merged
`${relativePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
})
@@ -220,36 +217,36 @@ test('it merges delta of non destroyed chain', async () => {
})
let loggued = []
const onLog = message => {
const logInfo = message => {
loggued.push(message)
}
await adapter.cleanVm('/', { remove: true, onLog })
expect(loggued[0]).toEqual(`incorrect size in metadata: 12000 instead of 209920`)
await adapter.cleanVm(rootPath, { remove: true, logInfo, logWarn: logInfo, lock: false })
assert.equal(loggued[0], `incorrect backup size in metadata`)
loggued = []
await adapter.cleanVm('/', { remove: true, merge: true, onLog })
await adapter.cleanVm(rootPath, { remove: true, merge: true, logInfo, logWarn: () => {}, lock: false })
const [merging] = loggued
expect(merging).toEqual(`merging 1 children into /${basePath}/orphan.vhd`)
assert.equal(merging, `merging VHD chain`)
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children after the merge
expect(metadata.size).toEqual(209920)
assert.equal(metadata.size, 209920)
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(2)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
expect(remainingVhds.includes('grandchild.vhd')).toEqual(true)
assert.equal(remainingVhds.length, 2)
assert.equal(remainingVhds.includes('child.vhd'), true)
assert.equal(remainingVhds.includes('grandchild.vhd'), true)
})
test('it finish unterminated merge ', async () => {
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
size: 209920,
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
vhds: [`${relativePath}/orphan.vhd`, `${relativePath}/child.vhd`],
})
)
@@ -275,13 +272,13 @@ test('it finish unterminated merge ', async () => {
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)
// only check deletion
const remainingVhds = await handler.list(basePath)
expect(remainingVhds.length).toEqual(1)
expect(remainingVhds.includes('child.vhd')).toEqual(true)
assert.equal(remainingVhds.length, 1)
assert.equal(remainingVhds.includes('child.vhd'), true)
})
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
@@ -371,22 +368,22 @@ describe('tests multiple combination ', () => {
// the metadata file
await handler.writeFile(
`metadata.json`,
`${rootPath}/metadata.json`,
JSON.stringify({
mode: 'delta',
vhds: [
`${basePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${basePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${basePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
`${relativePath}/grandchild.vhd` + (useAlias ? '.alias.vhd' : ''), // grand child should not be merged
`${relativePath}/child.vhd` + (useAlias ? '.alias.vhd' : ''),
`${relativePath}/clean.vhd` + (useAlias ? '.alias.vhd' : ''),
],
})
)
await adapter.cleanVm('/', { remove: true, merge: true })
await adapter.cleanVm(rootPath, { remove: true, merge: true, logWarn: () => {}, lock: false })
const metadata = JSON.parse(await handler.readFile(`metadata.json`))
const metadata = JSON.parse(await handler.readFile(`${rootPath}/metadata.json`))
// size should be the size of children + grand children + clean after the merge
expect(metadata.size).toEqual(vhdMode === 'file' ? 314880 : undefined)
assert.deepEqual(metadata.size, vhdMode === 'file' ? 314880 : undefined)
// broken vhd, non referenced, abandonned should be deleted ( alias and data)
// ancestor and child should be merged
@@ -396,19 +393,19 @@ describe('tests multiple combination ', () => {
if (useAlias) {
const dataSurvivors = await handler.list(basePath + '/data')
// the goal of the alias : do not move a full folder
expect(dataSurvivors).toContain('ancestor.vhd')
expect(dataSurvivors).toContain('grandchild.vhd')
expect(dataSurvivors).toContain('cleanAncestor.vhd')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(4) // the 3 ok + data
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
assert.equal(dataSurvivors.includes('ancestor.vhd'), true)
assert.equal(dataSurvivors.includes('grandchild.vhd'), true)
assert.equal(dataSurvivors.includes('cleanAncestor.vhd'), true)
assert.equal(survivors.includes('clean.vhd.alias.vhd'), true)
assert.equal(survivors.includes('child.vhd.alias.vhd'), true)
assert.equal(survivors.includes('grandchild.vhd.alias.vhd'), true)
assert.equal(survivors.length, 4) // the 3 ok + data
assert.equal(dataSurvivors.length, 3)
} else {
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
expect(survivors).toContain('grandchild.vhd')
expect(survivors.length).toEqual(3)
assert.equal(survivors.includes('clean.vhd'), true)
assert.equal(survivors.includes('child.vhd'), true)
assert.equal(survivors.includes('grandchild.vhd'), true)
assert.equal(survivors.length, 3)
}
})
}
@@ -418,9 +415,9 @@ describe('tests multiple combination ', () => {
test('it cleans orphan merge states ', async () => {
await handler.writeFile(`${basePath}/.orphan.vhd.merge.json`, '')
await adapter.cleanVm('/', { remove: true })
await adapter.cleanVm(rootPath, { remove: true, logWarn: () => {}, lock: false })
expect(await handler.list(basePath)).toEqual([])
assert.deepEqual(await handler.list(basePath), [])
})
test('check Aliases should work alone', async () => {
@@ -433,12 +430,16 @@ test('check Aliases should work alone', async () => {
await generateVhd(`vhds/data/missingalias.vhd`)
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })
await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', {
remove: true,
handler,
logWarn: () => {},
})
// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
expect(alias.length).toEqual(1)
assert.equal(alias.length, 1)
const data = await handler.list('vhds/data')
expect(data.length).toEqual(1)
assert.equal(data.length, 1)
})

View File

@@ -1,12 +1,12 @@
'use strict'
const compareVersions = require('compare-versions')
const find = require('lodash/find.js')
const groupBy = require('lodash/groupBy.js')
const ignoreErrors = require('promise-toolbox/ignoreErrors')
const omit = require('lodash/omit.js')
const { asyncMap } = require('@xen-orchestra/async-map')
const { CancelToken } = require('promise-toolbox')
const { compareVersions } = require('compare-versions')
const { createVhdStreamWithLength } = require('vhd-lib')
const { defer } = require('golike-defer')

View File

@@ -3,6 +3,8 @@
const eos = require('end-of-stream')
const { PassThrough } = require('stream')
const { debug } = require('@xen-orchestra/log').createLogger('xo:backups:forkStreamUnpipe')
// create a new readable stream from an existing one which may be piped later
//
// in case of error in the new readable stream, it will simply be unpiped
@@ -11,18 +13,23 @@ exports.forkStreamUnpipe = function forkStreamUnpipe(stream) {
const { forks = 0 } = stream
stream.forks = forks + 1
debug('forking', { forks: stream.forks })
const proxy = new PassThrough()
stream.pipe(proxy)
eos(stream, error => {
if (error !== undefined) {
debug('error on original stream, destroying fork', { error })
proxy.destroy(error)
}
})
eos(proxy, _ => {
stream.forks--
eos(proxy, error => {
debug('end of stream, unpiping', { error, forks: --stream.forks })
stream.unpipe(proxy)
if (stream.forks === 0) {
debug('no more forks, destroying original stream')
stream.destroy(new Error('no more consumers for this stream'))
}
})

View File

@@ -49,6 +49,11 @@ const isValidTar = async (handler, size, fd) => {
// TODO: find an heuristic for compressed files
async function isValidXva(path) {
const handler = this._handler
// size is longer when encrypted + reading part of an encrypted file is not implemented
if (handler.isEncrypted) {
return true
}
try {
const fd = await handler.openFile(path, 'r')
try {
@@ -66,7 +71,6 @@ async function isValidXva(path) {
}
} catch (error) {
// never throw, log and report as valid to avoid side effects
console.error('isValidXva', path, error)
return true
}
}

View File

@@ -14,12 +14,14 @@
## File structure on remote
### with vhd files
```
<remote>
└─ xo-vm-backups
├─ index.json // TODO
└─ <VM UUID>
├─ index.json // TODO
├─ cache.json.gz
├─ vdis
│ └─ <job UUID>
│ └─ <VDI UUID>
@@ -30,6 +32,31 @@
└─ <YYYYMMDD>T<HHmmss>.xva.checksum
```
### with vhd directories
When `useVhdDirectory` is enabled on the remote, the directory containing the VHDs has a slightly different architecture:
```
<vdis>/<job UUID>/<VDI UUID>
├─ <YYYYMMDD>T<HHmmss>.alias.vhd // contains the relative path to a VHD directory
├─ <YYYYMMDD>T<HHmmss>.alias.vhd
└─ data
├─ <uuid>.vhd // VHD directory format is described in vhd-lib/Vhd/VhdDirectory.js
└─ <uuid>.vhd
```
## Cache for a VM
In a VM directory, if the file `cache.json.gz` exists, it contains the metadata for all the backups for this VM.
Add the following file: `xo-vm-backups/<VM UUID>/cache.json.gz`.
This cache is compressed in Gzip and contains an JSON object with the metadata for all the backups of this VM indexed by their absolute path (i.e. `/xo-vm-backups/<VM UUID>/<timestamp>.json`).
This file is generated on demande when listing the backups, and directly updated on backup creation/deletion.
In case any incoherence is detected, the file is deleted so it will be fully generated when required.
## Attributes
### Of created snapshots
@@ -69,6 +96,8 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
├─ task.warning(message: string)
├─ task.start(data: { type: 'VM', id: string })
│ ├─ task.warning(message: string)
| ├─ task.start(message: 'clean-vm')
│ │ └─ task.end
│ ├─ task.start(message: 'snapshot')
│ │ └─ task.end
│ ├─ task.start(message: 'export', data: { type: 'SR' | 'remote', id: string, isFull: boolean })
@@ -89,12 +118,8 @@ job.start(data: { mode: Mode, reportWhen: ReportWhen })
│ │ ├─ task.start(message: 'clean')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end
│ │
│ │ │ // in case of delta backup
│ │ ├─ task.start(message: 'merge')
│ │ │ ├─ task.warning(message: string)
│ │ │ └─ task.end(result: { size: number })
│ │ │
│ │ └─ task.end
| ├─ task.start(message: 'clean-vm')
│ │ └─ task.end
│ └─ task.end
└─ job.end

View File

@@ -64,7 +64,7 @@ const main = Disposable.wrap(async function* main(args) {
try {
const vmDir = getVmBackupDir(String(await handler.readFile(taskFile)))
try {
await adapter.cleanVm(vmDir, { merge: true, onLog: info, remove: true })
await adapter.cleanVm(vmDir, { merge: true, logInfo: info, logWarn: warn, remove: true })
} catch (error) {
// consider the clean successful if the VM dir is missing
if (error.code !== 'ENOENT') {

View File

@@ -8,24 +8,28 @@
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"version": "0.23.0",
"version": "0.29.1",
"engines": {
"node": ">=14.6"
},
"scripts": {
"postversion": "npm publish --access public"
"postversion": "npm publish --access public",
"test": "node--test"
},
"dependencies": {
"@vates/async-each": "^1.0.0",
"@vates/cached-dns.lookup": "^1.0.0",
"@vates/compose": "^2.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/disposable": "^0.1.1",
"@vates/disposable": "^0.1.3",
"@vates/fuse-vhd": "^1.0.0",
"@vates/nbd-client": "*",
"@vates/parse-duration": "^0.1.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/fs": "^1.0.1",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/fs": "^3.3.0",
"@xen-orchestra/log": "^0.5.0",
"@xen-orchestra/template": "^0.1.0",
"compare-versions": "^4.0.1",
"compare-versions": "^5.0.1",
"d3-time-format": "^3.0.0",
"decorator-synchronized": "^0.6.0",
"end-of-stream": "^1.4.4",
@@ -37,16 +41,18 @@
"parse-pairs": "^1.1.0",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"uuid": "^8.3.2",
"vhd-lib": "^3.1.0",
"uuid": "^9.0.0",
"vhd-lib": "^4.2.0",
"yazl": "^2.5.1"
},
"devDependencies": {
"rimraf": "^3.0.2",
"sinon": "^14.0.1",
"test": "^3.2.1",
"tmp": "^0.2.1"
},
"peerDependencies": {
"@xen-orchestra/xapi": "^1.0.0"
"@xen-orchestra/xapi": "^1.5.3"
},
"license": "AGPL-3.0-or-later",
"author": {

View File

@@ -19,10 +19,9 @@ const { AbstractDeltaWriter } = require('./_AbstractDeltaWriter.js')
const { checkVhd } = require('./_checkVhd.js')
const { packUuid } = require('./_packUuid.js')
const { Disposable } = require('promise-toolbox')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const NbdClient = require('@vates/nbd-client')
const { warn } = createLogger('xo:backups:DeltaBackupWriter')
const { debug, warn } = createLogger('xo:backups:DeltaBackupWriter')
exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(AbstractDeltaWriter) {
async checkBaseVdis(baseUuidToSrcVdi) {
@@ -38,6 +37,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
try {
const vhds = await handler.list(`${vdisDir}/${srcVdi.uuid}`, {
filter: _ => _[0] !== '.' && _.endsWith('.vhd'),
ignoreMissing: true,
prependDir: true,
})
const packedBaseUuid = packUuid(baseUuid)
@@ -71,35 +71,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
return this._cleanVm({ merge: true })
}
healthCheck(sr) {
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
prepare({ isFull }) {
// create the task related to this export and ensure all methods are called in this context
const task = new Task({
@@ -189,7 +160,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}/${adapter.getVhdFileName(basename)}`
)
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
const metadataContent = {
jobId,
mode: job.mode,
@@ -230,11 +200,30 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
await checkVhd(handler, parentPath)
}
const vdiRef = vm.$xapi.getObject(vdi.uuid).$ref
let nbdClient
if (!this._backup.config.useNbd) {
// get nbd if possible
try {
// this will always take the first host in the list
const [nbdInfo] = await vm.$xapi.call('VDI.get_nbd_info', vdiRef)
nbdClient = new NbdClient(nbdInfo)
await nbdClient.connect()
debug(`got nbd connection `, { vdi: vdi.uuid })
} catch (error) {
nbdClient = undefined
debug(`can't connect to nbd server or no server available`, { error })
}
}
await adapter.writeVhd(path, deltaExport.streams[`${id}.vhd`], {
// no checksum for VHDs, because they will be invalidated by
// merges and chainings
checksum: false,
validator: tmpPath => checkVhd(handler, tmpPath),
writeBlockConcurrency: this._backup.config.writeBlockConcurrency,
nbdClient,
})
if (isDelta) {
@@ -254,9 +243,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
}
})
metadataContent.size = size
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
dirMode: backup.config.dirMode,
})
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
// TODO: run cleanup?
}

View File

@@ -34,7 +34,6 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const { job, scheduleId, vm } = backup
const adapter = this._adapter
const handler = adapter.handler
const backupDir = getVmBackupDir(vm.uuid)
// TODO: clean VM backup directory
@@ -50,7 +49,6 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
const dataBasename = basename + '.xva'
const dataFilename = backupDir + '/' + dataBasename
const metadataFilename = `${backupDir}/${basename}.json`
const metadata = {
jobId: job.id,
mode: job.mode,
@@ -74,9 +72,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
return { size: sizeContainer.size }
})
metadata.size = sizeContainer.size
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
dirMode: backup.config.dirMode,
})
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
if (!deleteFirst) {
await deleteOldBackups()

View File

@@ -3,11 +3,15 @@
const { createLogger } = require('@xen-orchestra/log')
const { join } = require('path')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const MergeWorker = require('../merge-worker/index.js')
const assert = require('assert')
const { formatFilenameDate } = require('../_filenameDate.js')
const { getVmBackupDir } = require('../_getVmBackupDir.js')
const { HealthCheckVmBackup } = require('../HealthCheckVmBackup.js')
const { ImportVmBackup } = require('../ImportVmBackup.js')
const { Task } = require('../Task.js')
const MergeWorker = require('../merge-worker/index.js')
const { warn } = createLogger('xo:backups:MixinBackupWriter')
const { info, warn } = createLogger('xo:backups:MixinBackupWriter')
exports.MixinBackupWriter = (BaseClass = Object) =>
class MixinBackupWriter extends BaseClass {
@@ -25,11 +29,18 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
async _cleanVm(options) {
try {
return await this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
onLog: warn,
lock: false,
return await Task.run({ name: 'clean-vm' }, () => {
return this._adapter.cleanVm(this.#vmBackupDir, {
...options,
fixMetadata: true,
logInfo: info,
logWarn: (message, data) => {
warn(message, data)
Task.warning(message, data)
},
lock: false,
mergeBlockConcurrency: this._backup.config.mergeBlockConcurrency,
})
})
} catch (error) {
warn(error)
@@ -64,6 +75,39 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
const remotePath = handler._getRealPath()
await MergeWorker.run(remotePath)
}
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
}
healthCheck(sr) {
assert.notStrictEqual(
this._metadataFileName,
undefined,
'Metadata file name should be defined before making a healthcheck'
)
return Task.run(
{
name: 'health check',
},
async () => {
const xapi = sr.$xapi
const srUuid = sr.uuid
const adapter = this._adapter
const metadata = await adapter.readVmBackupMetadata(this._metadataFileName)
const { id: restoredId } = await new ImportVmBackup({
adapter,
metadata,
srUuid,
xapi,
}).run()
const restoredVm = xapi.getObject(restoredId)
try {
await new HealthCheckVmBackup({
restoredVm,
xapi,
}).run()
} finally {
await xapi.VM_destroy(restoredVm.$ref)
}
}
)
}
}

View File

@@ -18,7 +18,7 @@
"preferGlobal": true,
"dependencies": {
"golike-defer": "^0.5.1",
"xen-api": "^1.2.0"
"xen-api": "^1.2.2"
},
"scripts": {
"postversion": "npm publish"

View File

@@ -1,24 +1,20 @@
/* eslint-env jest */
'use strict'
const test = require('test')
const assert = require('assert').strict
const sinon = require('sinon')
const { createSchedule } = require('./')
jest.useFakeTimers()
const clock = sinon.useFakeTimers()
const wrap = value => () => value
describe('issues', () => {
test('issues', async t => {
let originalDateNow
beforeAll(() => {
originalDateNow = Date.now
})
afterAll(() => {
Date.now = originalDateNow
originalDateNow = undefined
})
originalDateNow = Date.now
test('stop during async execution', async () => {
await t.test('stop during async execution', async () => {
let nCalls = 0
let resolve, promise
@@ -35,20 +31,20 @@ describe('issues', () => {
job.start()
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
clock.runAll()
expect(nCalls).toBe(1)
assert.strictEqual(nCalls, 1)
job.stop()
resolve()
await promise
jest.runAllTimers()
expect(nCalls).toBe(1)
clock.runAll()
assert.strictEqual(nCalls, 1)
})
test('stop then start during async job execution', async () => {
await t.test('stop then start during async job execution', async () => {
let nCalls = 0
let resolve, promise
@@ -65,9 +61,9 @@ describe('issues', () => {
job.start()
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
clock.runAll()
expect(nCalls).toBe(1)
assert.strictEqual(nCalls, 1)
job.stop()
job.start()
@@ -76,7 +72,10 @@ describe('issues', () => {
await promise
Date.now = wrap(+schedule.next(1)[0])
jest.runAllTimers()
expect(nCalls).toBe(2)
clock.runAll()
assert.strictEqual(nCalls, 2)
})
Date.now = originalDateNow
originalDateNow = undefined
})

View File

@@ -1,7 +1,8 @@
/* eslint-env jest */
'use strict'
const { describe, it } = require('test')
const assert = require('assert').strict
const mapValues = require('lodash/mapValues')
const moment = require('moment-timezone')
@@ -25,24 +26,24 @@ describe('next()', () => {
},
([pattern, result], title) =>
it(title, () => {
expect(N(pattern)).toBe(result)
assert.strictEqual(N(pattern), result)
})
)
it('select first between month-day and week-day', () => {
expect(N('* * 10 * wen')).toBe('2018-04-10T00:00')
expect(N('* * 12 * wen')).toBe('2018-04-11T00:00')
assert.strictEqual(N('* * 10 * wen'), '2018-04-10T00:00')
assert.strictEqual(N('* * 12 * wen'), '2018-04-11T00:00')
})
it('select the last available day of a month', () => {
expect(N('* * 29 feb *')).toBe('2020-02-29T00:00')
assert.strictEqual(N('* * 29 feb *'), '2020-02-29T00:00')
})
it('fails when no solutions has been found', () => {
expect(() => N('0 0 30 feb *')).toThrow('no solutions found for this schedule')
assert.throws(() => N('0 0 30 feb *'), { message: 'no solutions found for this schedule' })
})
it('select the first sunday of the month', () => {
expect(N('* * * * 0', '2018-03-31T00:00')).toBe('2018-04-01T00:00')
assert.strictEqual(N('* * * * 0', '2018-03-31T00:00'), '2018-04-01T00:00')
})
})

View File

@@ -38,6 +38,11 @@
"moment-timezone": "^0.5.14"
},
"scripts": {
"postversion": "npm publish"
"postversion": "npm publish",
"test": "node--test"
},
"devDependencies": {
"sinon": "^14.0.1",
"test": "^3.2.1"
}
}

View File

@@ -1,49 +0,0 @@
/* eslint-env jest */
'use strict'
const parse = require('./parse')
describe('parse()', () => {
it('works', () => {
expect(parse('0 0-10 */10 jan,2,4-11/3 *')).toEqual({
minute: [0],
hour: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dayOfMonth: [1, 11, 21, 31],
month: [0, 2, 4, 7, 10],
})
})
it('correctly parse months', () => {
expect(parse('* * * 0,11 *')).toEqual({
month: [0, 11],
})
expect(parse('* * * jan,dec *')).toEqual({
month: [0, 11],
})
})
it('correctly parse days', () => {
expect(parse('* * * * mon,sun')).toEqual({
dayOfWeek: [0, 1],
})
})
it('reports missing integer', () => {
expect(() => parse('*/a')).toThrow('minute: missing integer at character 2')
expect(() => parse('*')).toThrow('hour: missing integer at character 1')
})
it('reports invalid aliases', () => {
expect(() => parse('* * * jan-foo *')).toThrow('month: missing alias or integer at character 10')
})
it('dayOfWeek: 0 and 7 bind to sunday', () => {
expect(parse('* * * * 0')).toEqual({
dayOfWeek: [0],
})
expect(parse('* * * * 7')).toEqual({
dayOfWeek: [0],
})
})
})

View File

@@ -0,0 +1,50 @@
'use strict'
const { describe, it } = require('test')
const assert = require('assert').strict
const parse = require('./parse')
describe('parse()', () => {
it('works', () => {
assert.deepStrictEqual(parse('0 0-10 */10 jan,2,4-11/3 *'), {
minute: [0],
hour: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dayOfMonth: [1, 11, 21, 31],
month: [0, 2, 4, 7, 10],
})
})
it('correctly parse months', () => {
assert.deepStrictEqual(parse('* * * 0,11 *'), {
month: [0, 11],
})
assert.deepStrictEqual(parse('* * * jan,dec *'), {
month: [0, 11],
})
})
it('correctly parse days', () => {
assert.deepStrictEqual(parse('* * * * mon,sun'), {
dayOfWeek: [0, 1],
})
})
it('reports missing integer', () => {
assert.throws(() => parse('*/a'), { message: 'minute: missing integer at character 2' })
assert.throws(() => parse('*'), { message: 'hour: missing integer at character 1' })
})
it('reports invalid aliases', () => {
assert.throws(() => parse('* * * jan-foo *'), { message: 'month: missing alias or integer at character 10' })
})
it('dayOfWeek: 0 and 7 bind to sunday', () => {
assert.deepStrictEqual(parse('* * * * 0'), {
dayOfWeek: [0],
})
assert.deepStrictEqual(parse('* * * * 7'), {
dayOfWeek: [0],
})
})
})

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/emit-async",
"version": "0.1.0",
"version": "1.0.0",
"license": "ISC",
"description": "Emit an event for async listeners to settle",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/emit-async",

View File

@@ -0,0 +1,19 @@
## metadata files
- Older remotes dont have any metadata file
- Remote used since 5.75 have two files : encryption.json and metadata.json
The metadata files are checked by the sync() method. If the check fails it MUST throw an error and dismount.
If the remote is empty, the `sync` method creates them
### encryption.json
A non encrypted file contain the algorithm and parameters used for this remote.
This MUST NOT contains the key.
### metadata.json
An encrypted JSON file containing the settings of a remote. Today this is an empty JSON file ( `{random: <randomuuid>}` ), it serves to check if the encryption key set in the remote is valid, but in the future will be able to store some remote settings to ease disaster recovery.
If this file can't be read (decrypted, decompressed, .. ), that means that the remote settings have been updated. If the remote is empty, update the `encryption.json` and `metadata.json` files , else raise an error.

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "1.0.1",
"version": "3.3.0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
@@ -17,20 +17,20 @@
"xo-fs": "./cli.js"
},
"engines": {
"node": ">=14"
"node": ">=14.13"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.54.0",
"@aws-sdk/lib-storage": "^3.54.0",
"@aws-sdk/middleware-apply-body-checksum": "^3.58.0",
"@aws-sdk/node-http-handler": "^3.54.0",
"@marsaud/smb2": "^0.18.0",
"@sindresorhus/df": "^3.1.1",
"@vates/async-each": "^0.1.0",
"@vates/async-each": "^1.0.0",
"@vates/coalesce-calls": "^0.1.0",
"@vates/decorate-with": "^2.0.0",
"@vates/read-chunk": "^1.0.1",
"@xen-orchestra/async-map": "^0.1.2",
"@xen-orchestra/log": "^0.3.0",
"@xen-orchestra/log": "^0.5.0",
"bind-property-descriptor": "^2.0.0",
"decorator-synchronized": "^0.6.0",
"execa": "^5.0.0",
@@ -40,9 +40,9 @@
"lodash": "^4.17.4",
"promise-toolbox": "^0.21.0",
"proper-lockfile": "^4.1.2",
"readable-stream": "^3.0.6",
"readable-stream": "^4.1.0",
"through2": "^4.0.2",
"xo-remote-parser": "^0.8.0"
"xo-remote-parser": "^0.9.2"
},
"devDependencies": {
"@babel/cli": "^7.0.0",
@@ -50,11 +50,11 @@
"@babel/plugin-proposal-decorators": "^7.1.6",
"@babel/plugin-proposal-function-bind": "^7.0.0",
"@babel/preset-env": "^7.8.0",
"async-iterator-to-stream": "^1.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"dotenv": "^16.0.0",
"rimraf": "^3.0.0"
"rimraf": "^3.0.0",
"tmp": "^0.2.1"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
@@ -68,5 +68,9 @@
"author": {
"name": "Vates SAS",
"url": "https://vates.fr"
},
"exports": {
".": "./dist/index.js",
"./path": "./dist/path.js"
}
}

View File

@@ -0,0 +1,135 @@
const { pipeline } = require('node:stream')
const { readChunk } = require('@vates/read-chunk')
const crypto = require('crypto')
export const DEFAULT_ENCRYPTION_ALGORITHM = 'aes-256-gcm'
export const UNENCRYPTED_ALGORITHM = 'none'
export function isLegacyEncryptionAlgorithm(algorithm) {
return algorithm !== UNENCRYPTED_ALGORITHM && algorithm !== DEFAULT_ENCRYPTION_ALGORITHM
}
function getEncryptor(algorithm = DEFAULT_ENCRYPTION_ALGORITHM, key) {
if (key === undefined) {
return {
id: 'NULL_ENCRYPTOR',
algorithm: 'none',
key: 'none',
ivLength: 0,
encryptData: buffer => buffer,
encryptStream: stream => stream,
decryptData: buffer => buffer,
decryptStream: stream => stream,
}
}
const info = crypto.getCipherInfo(algorithm, { keyLength: key.length })
if (info === undefined) {
const error = new Error(
`Either the algorithm ${algorithm} is not available, or the key length ${
key.length
} is incorrect. Supported algorithm are ${crypto.getCiphers()}`
)
error.code = 'BAD_ALGORITHM'
throw error
}
const { ivLength, mode } = info
const authTagLength = ['gcm', 'ccm', 'ocb'].includes(mode) ? 16 : 0
function encryptStream(input) {
return pipeline(
input,
async function* (source) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
yield iv
for await (const data of source) {
yield cipher.update(data)
}
yield cipher.final()
// must write the auth tag at the end of the encryption stream
if (authTagLength > 0) {
yield cipher.getAuthTag()
}
},
() => {}
)
}
function decryptStream(encryptedStream) {
return pipeline(
encryptedStream,
async function* (source) {
/**
* WARNING
*
* the crypted size has an initializtion vector + eventually an auth tag + a padding at the end
* whe can't predict the decrypted size from the start of the encrypted size
* thus, we can't set decrypted.length reliably
*
*/
const iv = await readChunk(source, ivLength)
const cipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
let authTag = Buffer.alloc(0)
for await (const data of source) {
if (data.length >= authTagLength) {
// fast path, no buffer concat
yield cipher.update(authTag)
authTag = data.slice(data.length - authTagLength)
yield cipher.update(data.slice(0, data.length - authTagLength))
} else {
// slower since there is a concat
const fullData = Buffer.concat([authTag, data])
const fullDataLength = fullData.length
if (fullDataLength > authTagLength) {
authTag = fullData.slice(fullDataLength - authTagLength)
yield cipher.update(fullData.slice(0, fullDataLength - authTagLength))
} else {
authTag = fullData
}
}
}
if (authTagLength > 0) {
cipher.setAuthTag(authTag)
}
yield cipher.final()
},
() => {}
)
}
function encryptData(buffer) {
const iv = crypto.randomBytes(ivLength)
const cipher = crypto.createCipheriv(algorithm, Buffer.from(key), iv)
const encrypted = cipher.update(buffer)
return Buffer.concat([iv, encrypted, cipher.final(), authTagLength > 0 ? cipher.getAuthTag() : Buffer.alloc(0)])
}
function decryptData(buffer) {
const iv = buffer.slice(0, ivLength)
const decipher = crypto.createDecipheriv(algorithm, Buffer.from(key), iv)
let encrypted
if (authTagLength > 0) {
const authTag = buffer.slice(buffer.length - authTagLength)
decipher.setAuthTag(authTag)
encrypted = buffer.slice(ivLength, buffer.length - authTagLength)
} else {
encrypted = buffer.slice(ivLength)
}
const decrypted = decipher.update(encrypted)
return Buffer.concat([decrypted, decipher.final()])
}
return {
id: algorithm,
algorithm,
key,
ivLength,
encryptData,
encryptStream,
decryptData,
decryptStream,
}
}
exports._getEncryptor = getEncryptor

View File

@@ -0,0 +1,50 @@
/* eslint-env jest */
import { Readable } from 'node:stream'
import { _getEncryptor } from './_encryptor'
import crypto from 'crypto'
const algorithms = ['none', 'aes-256-cbc', 'aes-256-gcm']
function streamToBuffer(stream) {
return new Promise(resolve => {
const bufs = []
stream.on('data', function (d) {
bufs.push(d)
})
stream.on('end', function () {
resolve(Buffer.concat(bufs))
})
})
}
algorithms.forEach(algorithm => {
describe(`test algorithm ${algorithm}`, () => {
const key = algorithm === 'none' ? undefined : '73c1838d7d8a6088ca2317fb5f29cd91'
const encryptor = _getEncryptor(algorithm, key)
const buffer = crypto.randomBytes(1024 * 1024 + 1)
it('handle buffer', () => {
const encrypted = encryptor.encryptData(buffer)
if (algorithm !== 'none') {
expect(encrypted.equals(buffer)).toEqual(false) // encrypted should be different
// ivlength, auth tag, padding
expect(encrypted.length).not.toEqual(buffer.length)
}
const decrypted = encryptor.decryptData(encrypted)
expect(decrypted.equals(buffer)).toEqual(true)
})
it('handle stream', async () => {
const stream = Readable.from(buffer)
stream.length = buffer.length
const encrypted = encryptor.encryptStream(stream)
if (algorithm !== 'none') {
expect(encrypted.length).toEqual(undefined)
}
const decrypted = encryptor.decryptStream(encrypted)
const decryptedBuffer = await streamToBuffer(decrypted)
expect(decryptedBuffer.equals(buffer)).toEqual(true)
})
})
})

View File

@@ -1,15 +1,20 @@
import asyncMapSettled from '@xen-orchestra/async-map/legacy'
import assert from 'assert'
import getStream from 'get-stream'
import { coalesceCalls } from '@vates/coalesce-calls'
import { createLogger } from '@xen-orchestra/log'
import { fromCallback, fromEvent, ignoreErrors, timeout } from 'promise-toolbox'
import { limitConcurrency } from 'limit-concurrency-decorator'
import { parse } from 'xo-remote-parser'
import { pipeline } from 'stream'
import { randomBytes } from 'crypto'
import { randomBytes, randomUUID } from 'crypto'
import { synchronized } from 'decorator-synchronized'
import { basename, dirname, normalize as normalizePath } from './_path'
import { basename, dirname, normalize as normalizePath } from './path'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
import { DEFAULT_ENCRYPTION_ALGORITHM, _getEncryptor } from './_encryptor'
const { info, warn } = createLogger('@xen-orchestra:fs')
const checksumFile = file => file + '.checksum'
const computeRate = (hrtime, size) => {
@@ -20,6 +25,9 @@ const computeRate = (hrtime, size) => {
const DEFAULT_TIMEOUT = 6e5 // 10 min
const DEFAULT_MAX_PARALLEL_OPERATIONS = 10
const ENCRYPTION_DESC_FILENAME = 'encryption.json'
const ENCRYPTION_METADATA_FILENAME = 'metadata.json'
const ignoreEnoent = error => {
if (error == null || error.code !== 'ENOENT') {
throw error
@@ -60,6 +68,15 @@ class PrefixWrapper {
}
export default class RemoteHandlerAbstract {
#encryptor
get _encryptor() {
if (this.#encryptor === undefined) {
throw new Error(`Can't access to encryptor before remote synchronization`)
}
return this.#encryptor
}
constructor(remote, options = {}) {
if (remote.url === 'test://') {
this._remote = remote
@@ -108,90 +125,51 @@ export default class RemoteHandlerAbstract {
await this.__closeFile(fd)
}
// TODO: remove method
async createOutputStream(file, { checksum = false, dirMode, ...options } = {}) {
async createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (options.end !== undefined || options.start !== undefined) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of a file when encryption is active ${file}`)
}
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout.call(
this._createOutputStream(file, {
dirMode,
flags: 'wx',
...options,
}),
let stream = await timeout.call(
this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }),
this._timeout
)
if (!checksum) {
return streamP
}
// detect early errors
await fromEvent(stream, 'readable')
const checksumStream = createChecksumStream()
const forwardError = error => {
checksumStream.emit('error', error)
}
if (checksum) {
try {
const path = typeof file === 'string' ? file : file.path
const checksum = await this._readFile(checksumFile(path), { flags: 'r' })
const stream = await streamP
stream.on('error', forwardError)
checksumStream.pipe(stream)
checksumStream.checksumWritten = checksumStream.checksum
.then(value => this._outputFile(checksumFile(path), value, { flags: 'wx' }))
.catch(forwardError)
return checksumStream
}
createReadStream(file, { checksum = false, ignoreMissingChecksum = false, ...options } = {}) {
if (typeof file === 'string') {
file = normalizePath(file)
}
const path = typeof file === 'string' ? file : file.path
const streamP = timeout
.call(this._createReadStream(file, { ...options, highWaterMark: this._highWaterMark }), this._timeout)
.then(stream => {
// detect early errors
let promise = fromEvent(stream, 'readable')
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
promise = Promise.all([
promise,
ignoreErrors.call(
this._getSize(file).then(size => {
stream.length = size
})
),
])
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
} catch (error) {
if (!(ignoreMissingChecksum && error.code === 'ENOENT')) {
throw error
}
return promise.then(() => stream)
})
if (!checksum) {
return streamP
}
// avoid a unhandled rejection warning
ignoreErrors.call(streamP)
return this._readFile(checksumFile(path), { flags: 'r' }).then(
checksum =>
streamP.then(stream => {
const { length } = stream
stream = validChecksumOfReadStream(stream, String(checksum).trim())
stream.length = length
return stream
}),
error => {
if (ignoreMissingChecksum && error && error.code === 'ENOENT') {
return streamP
}
throw error
}
)
}
if (this.isEncrypted) {
stream = this._encryptor.decryptStream(stream)
} else {
// try to add the length prop if missing and not a range stream
if (stream.length === undefined && options.end === undefined && options.start === undefined) {
try {
stream.length = await this._getSize(file)
} catch (error) {
// ignore errors
}
}
}
return stream
}
/**
@@ -207,6 +185,8 @@ export default class RemoteHandlerAbstract {
async outputStream(path, input, { checksum = true, dirMode, validator } = {}) {
path = normalizePath(path)
let checksumStream
input = this._encryptor.encryptStream(input)
if (checksum) {
checksumStream = createChecksumStream()
pipeline(input, checksumStream, noop)
@@ -217,6 +197,8 @@ export default class RemoteHandlerAbstract {
validator,
})
if (checksum) {
// using _outpuFile means the checksum will NOT be encrypted
// it is by design to allow checking of encrypted files without the key
await this._outputFile(checksumFile(path), await checksumStream.checksum, { dirMode, flags: 'wx' })
}
}
@@ -236,8 +218,13 @@ export default class RemoteHandlerAbstract {
return timeout.call(this._getInfo(), this._timeout)
}
// when using encryption, the file size is aligned with the encryption block size ( 16 bytes )
// that means that the size will be 1 to 16 bytes more than the content size + the initialized vector length (16 bytes)
async getSize(file) {
return timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
assert.strictEqual(this.isEncrypted, false, `Can't compute size of an encrypted file ${file}`)
const size = await timeout.call(this._getSize(typeof file === 'string' ? normalizePath(file) : file), this._timeout)
return size - this._encryptor.ivLength
}
async list(dir, { filter, ignoreMissing = false, prependDir = false } = {}) {
@@ -283,26 +270,39 @@ export default class RemoteHandlerAbstract {
}
async outputFile(file, data, { dirMode, flags = 'wx' } = {}) {
await this._outputFile(normalizePath(file), data, { dirMode, flags })
const encryptedData = this._encryptor.encryptData(data)
await this._outputFile(normalizePath(file), encryptedData, { dirMode, flags })
}
async read(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't read part of an encrypted file ${file}`)
return this._read(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async readFile(file, { flags = 'r' } = {}) {
return this._readFile(normalizePath(file), { flags })
const data = await this._readFile(normalizePath(file), { flags })
return this._encryptor.decryptData(data)
}
async rename(oldPath, newPath, { checksum = false } = {}) {
oldPath = normalizePath(oldPath)
newPath = normalizePath(newPath)
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
async #rename(oldPath, newPath, { checksum }, createTree = true) {
try {
let p = timeout.call(this._rename(oldPath, newPath), this._timeout)
if (checksum) {
p = Promise.all([p, this._rename(checksumFile(oldPath), checksumFile(newPath))])
}
await p
} catch (error) {
// ENOENT can be a missing target directory OR a missing source
if (error.code === 'ENOENT' && createTree) {
await this._mktree(dirname(newPath))
return this.#rename(oldPath, newPath, { checksum }, false)
}
throw error
}
return p
}
rename(oldPath, newPath, { checksum = false } = {}) {
return this.#rename(normalizePath(oldPath), normalizePath(newPath), { checksum })
}
async copy(oldPath, newPath, { checksum = false } = {}) {
@@ -331,6 +331,71 @@ export default class RemoteHandlerAbstract {
@synchronized()
async sync() {
await this._sync()
try {
await this._checkMetadata()
} catch (error) {
await this._forget()
throw error
}
}
async _canWriteMetadata() {
const list = await this.list('/', {
filter: e => !e.startsWith('.') && e !== ENCRYPTION_DESC_FILENAME && e !== ENCRYPTION_METADATA_FILENAME,
})
return list.length === 0
}
async _createMetadata() {
const encryptionAlgorithm = this._remote.encryptionKey === undefined ? 'none' : DEFAULT_ENCRYPTION_ALGORITHM
this.#encryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
await Promise.all([
this._writeFile(normalizePath(ENCRYPTION_DESC_FILENAME), JSON.stringify({ algorithm: encryptionAlgorithm }), {
flags: 'w',
}), // not encrypted
this.writeFile(ENCRYPTION_METADATA_FILENAME, `{"random":"${randomUUID()}"}`, { flags: 'w' }), // encrypted
])
}
async _checkMetadata() {
let encryptionAlgorithm = 'none'
let data
try {
// this file is not encrypted
data = await this._readFile(normalizePath(ENCRYPTION_DESC_FILENAME), 'utf-8')
const json = JSON.parse(data)
encryptionAlgorithm = json.algorithm
} catch (error) {
if (error.code !== 'ENOENT') {
throw error
}
encryptionAlgorithm = this._remote.encryptionKey === undefined ? 'none' : DEFAULT_ENCRYPTION_ALGORITHM
}
try {
this.#encryptor = _getEncryptor(encryptionAlgorithm, this._remote.encryptionKey)
// this file is encrypted
const data = await this.readFile(ENCRYPTION_METADATA_FILENAME, 'utf-8')
JSON.parse(data)
} catch (error) {
// can be enoent, bad algorithm, or broeken json ( bad key or algorithm)
if (encryptionAlgorithm !== 'none') {
if (await this._canWriteMetadata()) {
// any other error , but on empty remote => update with remote settings
info('will update metadata of this remote')
return this._createMetadata()
} else {
warn(
`The encryptionKey settings of this remote does not match the key used to create it. You won't be able to read any data from this remote`,
{ error }
)
// will probably send a ERR_OSSL_EVP_BAD_DECRYPT if key is incorrect
throw error
}
}
}
}
async test() {
@@ -357,11 +422,12 @@ export default class RemoteHandlerAbstract {
readRate: computeRate(readDuration, SIZE),
}
} catch (error) {
warn(`error while testing the remote at step ${step}`, { error })
return {
success: false,
step,
file: testFileName,
error: error.message || String(error),
error,
}
} finally {
ignoreErrors.call(this._unlink(testFileName))
@@ -383,11 +449,13 @@ export default class RemoteHandlerAbstract {
}
async write(file, buffer, position) {
assert.strictEqual(this.isEncrypted, false, `Can't write part of a file with encryption ${file}`)
await this._write(typeof file === 'string' ? normalizePath(file) : file, buffer, position)
}
async writeFile(file, data, { flags = 'wx' } = {}) {
await this._writeFile(normalizePath(file), data, { flags })
const encryptedData = this._encryptor.encryptData(data)
await this._writeFile(normalizePath(file), encryptedData, { flags })
}
// Methods that can be called by private methods to avoid parallel limit on public methods
@@ -420,6 +488,10 @@ export default class RemoteHandlerAbstract {
// Methods that can be implemented by inheriting classes
useVhdDirectory() {
return this._remote.useVhdDirectory ?? false
}
async _closeFile(fd) {
throw new Error('Not implemented')
}
@@ -502,9 +574,13 @@ export default class RemoteHandlerAbstract {
async _outputStream(path, input, { dirMode, validator }) {
const tmpPath = `${dirname(path)}/.${basename(path)}`
const output = await this.createOutputStream(tmpPath, {
dirMode,
})
const output = await timeout.call(
this._createOutputStream(tmpPath, {
dirMode,
flags: 'wx',
}),
this._timeout
)
try {
await fromCallback(pipeline, input, output)
if (validator !== undefined) {
@@ -587,6 +663,10 @@ export default class RemoteHandlerAbstract {
async _writeFile(file, data, options) {
throw new Error('Not implemented')
}
get isEncrypted() {
return this._encryptor.id !== 'NULL_ENCRYPTOR'
}
}
function createPrefixWrapperMethods() {

View File

@@ -1,21 +1,29 @@
/* eslint-env jest */
import { TimeoutError } from 'promise-toolbox'
import { DEFAULT_ENCRYPTION_ALGORITHM, _getEncryptor } from './_encryptor'
import { Disposable, pFromCallback, TimeoutError } from 'promise-toolbox'
import { getSyncedHandler } from '.'
import AbstractHandler from './abstract'
import fs from 'fs-extra'
import rimraf from 'rimraf'
import tmp from 'tmp'
const TIMEOUT = 10e3
class TestHandler extends AbstractHandler {
constructor(impl) {
super({ url: 'test://' }, { timeout: TIMEOUT })
Object.defineProperty(this, 'isEncrypted', {
get: () => false, // encryption is tested separatly
})
Object.keys(impl).forEach(method => {
this[`_${method}`] = impl[method]
})
}
}
const noop = Function.prototype
jest.useFakeTimers()
describe('closeFile()', () => {
@@ -30,18 +38,6 @@ describe('closeFile()', () => {
})
})
describe('createOutputStream()', () => {
it(`throws in case of timeout`, async () => {
const testHandler = new TestHandler({
createOutputStream: () => new Promise(() => {}),
})
const promise = testHandler.createOutputStream('File')
jest.advanceTimersByTime(TIMEOUT)
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('getInfo()', () => {
it('throws in case of timeout', async () => {
const testHandler = new TestHandler({
@@ -113,3 +109,112 @@ describe('rmdir()', () => {
await expect(promise).rejects.toThrowError(TimeoutError)
})
})
describe('encryption', () => {
let dir
beforeEach(async () => {
dir = await pFromCallback(cb => tmp.dir(cb))
})
afterAll(async () => {
await pFromCallback(cb => rimraf(dir, cb))
})
it('sync should NOT create metadata if missing (not encrypted)', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
expect(await fs.readdir(dir)).toEqual([])
})
it('sync should create metadata if missing (encrypted)', async () => {
await Disposable.use(
getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` }),
noop
)
expect(await fs.readdir(dir)).toEqual(['encryption.json', 'metadata.json'])
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
// encrypted , should not be parsable
expect(async () => JSON.parse(await fs.readFile(`${dir}/metadata.json`))).rejects.toThrowError()
})
it('sync should not modify existing metadata', async () => {
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "none"}`)
await fs.writeFile(`${dir}/metadata.json`, `{"random": "NOTSORANDOM"}`)
await Disposable.use(await getSyncedHandler({ url: `file://${dir}` }), noop)
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual('none')
const metadata = JSON.parse(await fs.readFile(`${dir}/metadata.json`, 'utf-8'))
expect(metadata.random).toEqual('NOTSORANDOM')
})
it('should modify metadata if empty', async () => {
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
// nothing created without encryption
await Disposable.use(
getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` }),
noop
)
let encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
await Disposable.use(getSyncedHandler({ url: `file://${dir}` }), noop)
encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual('none')
})
it(
'sync should work with encrypted',
Disposable.wrap(async function* () {
const encryptor = _getEncryptor(DEFAULT_ENCRYPTION_ALGORITHM, '73c1838d7d8a6088ca2317fb5f29cd91')
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "${DEFAULT_ENCRYPTION_ALGORITHM}"}`)
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
const handler = yield getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` })
const encryption = JSON.parse(await fs.readFile(`${dir}/encryption.json`, 'utf-8'))
expect(encryption.algorithm).toEqual(DEFAULT_ENCRYPTION_ALGORITHM)
const metadata = JSON.parse(await handler.readFile(`./metadata.json`))
expect(metadata.random).toEqual('NOTSORANDOM')
})
)
it('sync should fail when changing key on non empty remote ', async () => {
const encryptor = _getEncryptor(DEFAULT_ENCRYPTION_ALGORITHM, '73c1838d7d8a6088ca2317fb5f29cd91')
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "${DEFAULT_ENCRYPTION_ALGORITHM}"}`)
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
// different key but empty remote => ok
await Disposable.use(
getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd00"` }),
noop
)
// remote is now non empty : can't modify key anymore
await fs.writeFile(`${dir}/nonempty.json`, 'content')
await expect(
Disposable.use(getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd10"` }), noop)
).rejects.toThrowError()
})
it('sync should fail when changing algorithm', async () => {
// encrypt with a non default algorithm
const encryptor = _getEncryptor('aes-256-cbc', '73c1838d7d8a6088ca2317fb5f29cd91')
await fs.writeFile(`${dir}/encryption.json`, `{"algorithm": "aes-256-gmc"}`)
await fs.writeFile(`${dir}/metadata.json`, encryptor.encryptData(`{"random": "NOTSORANDOM"}`))
// remote is now non empty : can't modify key anymore
await fs.writeFile(`${dir}/nonempty.json`, 'content')
await expect(
Disposable.use(getSyncedHandler({ url: `file://${dir}?encryptionKey="73c1838d7d8a6088ca2317fb5f29cd91"` }), noop)
).rejects.toThrowError()
})
})

View File

@@ -1,10 +1,7 @@
/* eslint-env jest */
import 'dotenv/config'
import asyncIteratorToStream from 'async-iterator-to-stream'
import { forOwn, random } from 'lodash'
import { fromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { tmpdir } from 'os'
import { getHandler } from '.'
@@ -27,9 +24,6 @@ const unsecureRandomBytes = n => {
const TEST_DATA_LEN = 1024
const TEST_DATA = unsecureRandomBytes(TEST_DATA_LEN)
const createTestDataStream = asyncIteratorToStream(function* () {
yield TEST_DATA
})
const rejectionOf = p =>
p.then(
@@ -82,14 +76,6 @@ handlers.forEach(url => {
})
})
describe('#createOutputStream()', () => {
it('creates parent dir if missing', async () => {
const stream = await handler.createOutputStream('dir/file')
await fromCallback(pipeline, createTestDataStream(), stream)
await expect(await handler.readFile('dir/file')).toEqual(TEST_DATA)
})
})
describe('#getInfo()', () => {
let info
beforeAll(async () => {
@@ -242,6 +228,17 @@ handlers.forEach(url => {
expect(await handler.list('.')).toEqual(['file2'])
expect(await handler.readFile(`file2`)).toEqual(TEST_DATA)
})
it(`should rename the file and create dest directory`, async () => {
await handler.outputFile('file', TEST_DATA)
await handler.rename('file', `sub/file2`)
expect(await handler.list('sub')).toEqual(['file2'])
expect(await handler.readFile(`sub/file2`)).toEqual(TEST_DATA)
})
it(`should fail with enoent if source file is missing`, async () => {
const error = await rejectionOf(handler.rename('file', `sub/file2`))
expect(error.code).toBe('ENOENT')
})
})
describe('#rmdir()', () => {

View File

@@ -5,7 +5,7 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerS3 from './s3'
import RemoteHandlerSmb from './smb'
import RemoteHandlerSmbMount from './smb-mount'
export { DEFAULT_ENCRYPTION_ALGORITHM, UNENCRYPTED_ALGORITHM, isLegacyEncryptionAlgorithm } from './_encryptor'
const HANDLERS = {
file: RemoteHandlerLocal,
@@ -15,10 +15,8 @@ const HANDLERS = {
try {
execa.sync('mount.cifs', ['-V'])
HANDLERS.smb = RemoteHandlerSmbMount
} catch (_) {
HANDLERS.smb = RemoteHandlerSmb
}
} catch (_) {}
export const getHandler = (remote, ...rest) => {
const Handler = HANDLERS[parse(remote.url).type]

View File

@@ -1,13 +1,38 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
import lockfile from 'proper-lockfile'
import { createLogger } from '@xen-orchestra/log'
import { fromEvent, retry } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
const { info, warn } = createLogger('xo:fs:local')
// save current stack trace and add it to any rejected error
//
// This is especially useful when the resolution is separate from the initial
// call, which is often the case with RPC libs.
//
// There is a perf impact and it should be avoided in production.
async function addSyncStackTrace(fn, ...args) {
const stackContainer = new Error()
try {
return await fn.apply(this, args)
} catch (error) {
error.syncStack = stackContainer.stack
throw error
}
}
function dontAddSyncStackTrace(fn, ...args) {
return fn.apply(this, args)
}
export default class LocalHandler extends RemoteHandlerAbstract {
constructor(remote, opts = {}) {
super(remote)
this._addSyncStackTrace = opts.syncStackTraces ?? true ? addSyncStackTrace : dontAddSyncStackTrace
this._retriesOnEagain = {
delay: 1e3,
retries: 9,
@@ -30,17 +55,17 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {
return fs.close(fd)
return this._addSyncStackTrace(fs.close, fd)
}
async _copy(oldPath, newPath) {
return fs.copy(this._getFilePath(oldPath), this._getFilePath(newPath))
return this._addSyncStackTrace(fs.copy, this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _createReadStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createReadStream(this._getFilePath(file), options)
await fromEvent(stream, 'open')
await this._addSyncStackTrace(fromEvent, stream, 'open')
return stream
}
return fs.createReadStream('', {
@@ -53,7 +78,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
async _createWriteStream(file, options) {
if (typeof file === 'string') {
const stream = fs.createWriteStream(this._getFilePath(file), options)
await fromEvent(stream, 'open')
await this._addSyncStackTrace(fromEvent, stream, 'open')
return stream
}
return fs.createWriteStream('', {
@@ -79,71 +104,98 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _getSize(file) {
const stats = await fs.stat(this._getFilePath(typeof file === 'string' ? file : file.path))
const stats = await this._addSyncStackTrace(fs.stat, this._getFilePath(typeof file === 'string' ? file : file.path))
return stats.size
}
async _list(dir) {
return fs.readdir(this._getFilePath(dir))
return this._addSyncStackTrace(fs.readdir, this._getFilePath(dir))
}
_lock(path) {
return lockfile.lock(this._getFilePath(path))
async _lock(path) {
const acquire = lockfile.lock.bind(undefined, this._getFilePath(path), {
async onCompromised(error) {
warn('lock compromised', { error })
try {
release = await acquire()
info('compromised lock was reacquired')
} catch (error) {
warn('compromised lock could not be reacquired', { error })
}
},
})
let release = await this._addSyncStackTrace(acquire)
return async () => {
try {
await this._addSyncStackTrace(release)
} catch (error) {
warn('lock could not be released', { error })
}
}
}
_mkdir(dir, { mode }) {
return fs.mkdir(this._getFilePath(dir), { mode })
return this._addSyncStackTrace(fs.mkdir, this._getFilePath(dir), { mode })
}
async _openFile(path, flags) {
return fs.open(this._getFilePath(path), flags)
return this._addSyncStackTrace(fs.open, this._getFilePath(path), flags)
}
async _read(file, buffer, position) {
const needsClose = typeof file === 'string'
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
file = needsClose ? await this._addSyncStackTrace(fs.open, this._getFilePath(file), 'r') : file.fd
try {
return await fs.read(file, buffer, 0, buffer.length, position === undefined ? null : position)
return await this._addSyncStackTrace(
fs.read,
file,
buffer,
0,
buffer.length,
position === undefined ? null : position
)
} finally {
if (needsClose) {
await fs.close(file)
await this._addSyncStackTrace(fs.close, file)
}
}
}
async _readFile(file, options) {
const filePath = this._getFilePath(file)
return await retry(() => fs.readFile(filePath, options), this._retriesOnEagain)
return await this._addSyncStackTrace(retry, () => fs.readFile(filePath, options), this._retriesOnEagain)
}
async _rename(oldPath, newPath) {
return fs.rename(this._getFilePath(oldPath), this._getFilePath(newPath))
return this._addSyncStackTrace(fs.rename, this._getFilePath(oldPath), this._getFilePath(newPath))
}
async _rmdir(dir) {
return fs.rmdir(this._getFilePath(dir))
return this._addSyncStackTrace(fs.rmdir, this._getFilePath(dir))
}
async _sync() {
const path = this._getRealPath('/')
await fs.ensureDir(path)
await fs.access(path, fs.R_OK | fs.W_OK)
await this._addSyncStackTrace(fs.ensureDir, path)
await this._addSyncStackTrace(fs.access, path, fs.R_OK | fs.W_OK)
}
_truncate(file, len) {
return fs.truncate(this._getFilePath(file), len)
return this._addSyncStackTrace(fs.truncate, this._getFilePath(file), len)
}
async _unlink(file) {
const filePath = this._getFilePath(file)
return await retry(() => fs.unlink(filePath), this._retriesOnEagain)
return await this._addSyncStackTrace(retry, () => fs.unlink(filePath), this._retriesOnEagain)
}
_writeFd(file, buffer, position) {
return fs.write(file.fd, buffer, 0, buffer.length, position)
return this._addSyncStackTrace(fs.write, file.fd, buffer, 0, buffer.length, position)
}
_writeFile(file, data, { flags }) {
return fs.writeFile(this._getFilePath(file), data, { flag: flags })
return this._addSyncStackTrace(fs.writeFile, this._getFilePath(file), data, { flag: flags })
}
}

View File

@@ -1,6 +1,6 @@
import path from 'path'
const { basename, dirname, join, resolve, sep } = path.posix
const { basename, dirname, join, resolve, relative, sep } = path.posix
export { basename, dirname, join }
@@ -19,3 +19,6 @@ export function split(path) {
return parts
}
export const relativeFromFile = (file, path) => relative(dirname(file), path)
export const resolveFromFile = (file, path) => resolve('/', dirname(file), path).slice(1)

View File

@@ -27,7 +27,7 @@ import copyStreamToBuffer from './_copyStreamToBuffer.js'
import createBufferFromStream from './_createBufferFromStream.js'
import guessAwsRegion from './_guessAwsRegion.js'
import RemoteHandlerAbstract from './abstract'
import { basename, join, split } from './_path'
import { basename, join, split } from './path'
import { asyncEach } from '@vates/async-each'
// endpoints https://docs.aws.amazon.com/general/latest/gr/s3.html
@@ -155,6 +155,14 @@ export default class S3Handler extends RemoteHandlerAbstract {
if (e.name === 'EntityTooLarge') {
return this._multipartCopy(oldPath, newPath)
}
// normalize this error code
if (e.name === 'NoSuchKey') {
const error = new Error(`ENOENT: no such file or directory '${oldPath}'`)
error.cause = e
error.code = 'ENOENT'
error.path = oldPath
throw error
}
throw e
}
}
@@ -525,4 +533,8 @@ export default class S3Handler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {}
useVhdDirectory() {
return true
}
}

View File

@@ -1,23 +0,0 @@
import { parse } from 'xo-remote-parser'
import MountHandler from './_mount'
import { normalize } from './_path'
export default class SmbMountHandler extends MountHandler {
constructor(remote, opts) {
const { domain = 'WORKGROUP', host, password, path, username } = parse(remote.url)
super(remote, opts, {
type: 'cifs',
device: '//' + host + normalize(path),
options: `domain=${domain}`,
env: {
USER: username,
PASSWD: password,
},
})
}
get type() {
return 'smb'
}
}

Some files were not shown because too many files have changed in this diff Show More