Compare commits

..

210 Commits

Author SHA1 Message Date
Julien Fontanet
62979d5c22 feat(xo-server): 5.20.1 2018-06-15 15:35:00 +02:00
Julien Fontanet
ec8a4da73c feat(xo-vmdk-to-vhd): 0.1.3 2018-06-15 15:34:33 +02:00
Julien Fontanet
dea1bfee01 feat(xo-server-backup-reports): 0.12.2 2018-06-15 15:32:02 +02:00
Julien Fontanet
c18b82504a feat(xen-api): 0.16.10 2018-06-15 15:30:59 +02:00
Julien Fontanet
ed5460273f feat(vhd-lib): 0.1.2 2018-06-15 15:27:21 +02:00
Julien Fontanet
b91f8b21b9 feat(fs): 0.1.0 2018-06-15 15:26:48 +02:00
Julien Fontanet
5cea18e577 feat(delta NG): check VDIs before export (#3069) 2018-06-15 15:22:12 +02:00
Julien Fontanet
148eaa6a72 fix(xo-server/backup NG): retention checks (#3072)
They were broken by the introduction of `copyRetention`.
2018-06-15 12:13:55 +02:00
Rajaa.BARHTAOUI
80794211af feat(xo-server,xo-web/snapshots): fast-clone to create VM from snapshot (#3030)
Fixes #2937
2018-06-14 15:01:19 +02:00
Rajaa.BARHTAOUI
75dcbae417 chore(xo-web/backup): deprecate legacy backup creation (#3035)
Fixes #2956
2018-06-14 14:18:17 +02:00
Rajaa.BARHTAOUI
b19682b3c5 fix(xo-web/New VM): networks predicate in Self Service mode (#3027)
Fixes #3011
2018-06-14 13:25:08 +02:00
badrAZ
dd3b97cae2 feat(backup/overview): add tooltip to migrate action button (#3067)
Fixes #3042
2018-06-14 11:06:28 +02:00
Julien Fontanet
79891235f3 chore(CHANGELOG): add missing entry related to prev commit 2018-06-13 15:34:11 +02:00
Julien Fontanet
1e2f72ab6b feat(backup NG): new setting copyRetention (#2976)
Fixes #2895
2018-06-13 15:24:46 +02:00
Julien Fontanet
66d02e3808 feat(xo-server/backup NG): more logs (#3066) 2018-06-13 13:11:03 +02:00
Julien Fontanet
275e1f8f4c chore(xo-server): Xapi#_assertHealthyVdiChains is sync 2018-06-13 12:01:41 +02:00
Julien Fontanet
84dbbb0fbb chore(xo-server/backups-ng): add FIXME 2018-06-13 11:33:05 +02:00
Julien Fontanet
a36ef5209c fix(backup NG): only display the concerned tasks (#3063)
Allows us to add other tasks if necessary without breaking xo-web listing and the backup reports.
2018-06-12 13:18:46 +02:00
badrAZ
3497889302 fix(backup-ng): only display the concerned tasks 2018-06-12 11:53:43 +02:00
badrAZ
0a2f6b4ce7 feat(xo-web/backupNg/new): improve backupNg feedback (#2873)
See #2711
2018-06-12 11:52:19 +02:00
Julien Fontanet
f8be44d746 chore(xo-server): remove Xapi#barrier legacy type param 2018-06-11 18:00:16 +02:00
Julien Fontanet
379253c5ae chore(xen-api): lodash.forEach is unnecessary here 2018-06-11 17:57:02 +02:00
Julien Fontanet
aed1ba474c chore(xen-api): remove unnecessary test
`eventWatchers` is defined if event watching is enabled which is always the case here.
2018-06-11 17:55:47 +02:00
badrAZ
bc72e67442 feat(backup NG): new option to shutdown VMs before snapshot (#3060)
Fixes #3058
2018-06-11 17:25:18 +02:00
Julien Fontanet
26c965faa9 feat(xen-api/examples): handle cancelation 2018-06-11 15:48:43 +02:00
Pierre Donias
b3a3965ed2 feat(xo-web/SR): copy VDIs' UUIDs from Disks tab (#3059)
Fixes #3051
2018-06-11 14:57:37 +02:00
Julien Fontanet
7f88b46f4c chore(xen-api/examples): update deps 2018-06-08 15:36:29 +02:00
badrAZ
dd60d82d3d fix(xo-web/Backup-ng/logs): ability to retry a single failed/interrupted VM backup (#3052)
Fixes #2912
2018-06-08 10:52:38 +02:00
Julien Fontanet
4eeb995340 chore(CHANGELOG): update for next release 2018-06-08 10:32:00 +02:00
Julien Fontanet
1d29348e30 feat(xo-server-backup-reports): 0.12.1 2018-06-07 18:46:53 +02:00
Pierre Donias
a24db3f896 fix(xo-web/SortedTable): show grouped actions when all items selected (#3049)
Fixes #3048
2018-06-07 17:26:58 +02:00
Julien Fontanet
cffac27d0a feat(xo-server/jobs): implement cancelation (#3046)
Related to #3047

This is the first step toward Backup NG cancelation, the server side stuff should be ok (but need testing), so the next step is to expose it in the UI.
2018-06-07 17:20:06 +02:00
Julien Fontanet
b207cbdd77 feat(fs/read): read part of a file in an existing Buffer (#3036)
Easier to use and probably more efficient than `createReadStream` for this specific usage.
2018-06-07 17:19:33 +02:00
badrAZ
10baecefb9 fix(xo-server-backup-reports): not display size and speed if not have a transfer/merge (#3038) 2018-06-07 16:34:49 +02:00
badrAZ
42620323a9 fix(xo-web/Backup-ng): add label to Edit action (#3045)
Fixes #3043
2018-06-07 16:21:31 +02:00
Julien Fontanet
4d91006994 feat(xo-server/backupNg.getAllLogs): sort tasks (#3041) 2018-06-07 14:04:46 +02:00
badrAZ
a81f0b9a93 feat(xo-web/Backup NG/logs): display whether the export is delta/full (#3023)
See #2711
2018-06-07 12:30:44 +02:00
Julien Fontanet
2cee413ae1 chore(PR template): should reference issue 2018-06-07 12:14:58 +02:00
Nicolas Raynaud
53099eacc8 chore(xo-vmdk-to-vhd): split a file and rename some consts (#2966) 2018-06-06 16:49:18 +02:00
badrAZ
b628c5c07e fix(xo-server-backup-reports): handle the case when a transfer/merge fail (#3020) 2018-06-06 15:21:46 +02:00
Julien Fontanet
12889b6a09 fix(xo-server/Xapi#importDeltaVm): correctly copy task prop (#3034) 2018-06-06 14:26:19 +02:00
badrAZ
0c23ca5b66 feat(xo-web/Backup NG/logs): details are now dynamic (#3031) 2018-06-06 14:25:31 +02:00
Pierre Donias
d732ee3ade fix(xo-web/file restore NG): restrict to Premium (#3032) 2018-06-06 13:37:28 +02:00
badrAZ
65cb0bc4cf fix(xo-server-backup-reports): correctly send status to Nagios (#3019)
Fixes #2991
2018-06-05 17:55:06 +02:00
badrAZ
1ba68a94e3 fix(xo-server): vm.xenTools.* must be numbers (#3022) 2018-06-05 14:30:30 +02:00
Julien Fontanet
084430451a feat(xo-web): 5.20.1 2018-05-31 21:00:34 +02:00
Julien Fontanet
458a4d4efe fix(xo-web/backup NG logs): dont display size/speed if size is 0 2018-05-31 20:59:47 +02:00
Julien Fontanet
62eeab2a74 feat(xo-web): 5.20.0 2018-05-31 18:33:13 +02:00
Julien Fontanet
790b43910d feat(xo-server): 5.20.0 2018-05-31 18:33:13 +02:00
Julien Fontanet
ba65461c4d feat(xo-server-usage-report): 0.5.0 2018-05-31 18:29:45 +02:00
Julien Fontanet
5bd468791f feat(xo-server-backup-reports): 0.12.0 2018-05-31 18:28:21 +02:00
Julien Fontanet
37f71bb36c feat(xo-acl-resolver): 0.2.4 2018-05-31 18:26:53 +02:00
Julien Fontanet
2ed4b7ad3f feat(vhd-lib): 0.1.1 2018-05-31 17:59:02 +02:00
Julien Fontanet
7eb970f22a feat(fs): 0.0.1 2018-05-31 17:57:13 +02:00
badrAZ
13db4a8411 feat(Backup NG): improve logs (#3013) 2018-05-31 17:54:35 +02:00
badrAZ
49a7a89bbf feat(xo-web/new-vm): ability to use template vars in the CloudConfig (#3006)
Fixes #2140
2018-05-31 17:44:52 +02:00
Rajaa.BARHTAOUI
0af8a60c1c fix(xo-web/SR/disks): show VM templates attached to VDIs (#3012)
Fixes #2974
2018-05-31 17:32:34 +02:00
Rajaa.BARHTAOUI
e1650b376c fix(xo-web/Self new VM): do not auto-select resource set's 1st SR (#3007)
New behaviour: either auto-select the template's pool's default SR if
it's in the resource set or do not auto-select any SR at all

Fixes #3001
2018-05-31 16:57:52 +02:00
Rajaa.BARHTAOUI
873b40cc70 feat(xo-server,xo-web): allow setting remote syslog host (#2958)
Fixes #2900
2018-05-31 16:22:39 +02:00
Rajaa.BARHTAOUI
d45265b180 feat(xo-web): create single-server private network (#3004)
Fixes #2944
2018-05-31 11:25:08 +02:00
badrAZ
ff50b2848e feat(xo-server,xo-web): allow pool admins to create VMs (#2995)
Fixes #2350
2018-05-31 10:42:03 +02:00
Julien Fontanet
d67fae22ab chore: initial PR template 2018-05-30 18:10:11 +02:00
badrAZ
d809002558 feat(Backup NG): ability to retry a single failed VM backup (#3009)
Fixes 2912
2018-05-30 17:04:50 +02:00
Pierre Donias
5c30559d15 feat(xo-server,xo-web): handle XCP-ng patches (#3005) 2018-05-30 16:03:43 +02:00
Rajaa.BARHTAOUI
cbb5b011e1 fix(xo-web/SR/disks): show control domain VMs attached to VDIs (#3002)
Fixes #2999
2018-05-30 15:55:44 +02:00
Julien Fontanet
f5bff408a8 feat(xo-server/exportDeltaVm): dont check chain if no snapshot
Should fix a race condition with XenServer DB during Delta Backup NG.
2018-05-30 12:22:10 +02:00
Nicolas Raynaud
d7cfe4d3dc fix(vhd/merge): fix performance enhancement (#2980) 2018-05-30 10:26:15 +02:00
badrAZ
7be8f38c6b fix(xo-web/jobs/edit): support all types of inputs (#2997) 2018-05-30 10:20:22 +02:00
badrAZ
08a7e605ce feat(xo-web/backup logs): show # of calls for each state (#2860) 2018-05-30 09:58:48 +02:00
Julien Fontanet
4b57db5893 feat(xo-server/delta NG): validate parent VHD 2018-05-29 16:36:33 +02:00
Julien Fontanet
8b1ae3f3c9 fix(xo-server/delta NG): dont select tmp file as parent 2018-05-29 16:36:10 +02:00
Julien Fontanet
77d35a5928 chore(fs/Handler#list): prepend dir after filtering 2018-05-29 16:34:51 +02:00
Julien Fontanet
323d409e6c chore(package): require yarn >1.7
Previous releases prevented xo-web from being built to an incorrect resolution of dependencies.
2018-05-29 16:29:14 +02:00
Pierre Donias
9f2f2b7b69 fix(xo-web/render-xo-item): show SR container name (#3003)
Fixes #3000
2018-05-29 12:01:50 +02:00
Julien Fontanet
b44fa7beca chore(xo-server): bump xo:perf threshold to 500ms 2018-05-29 10:53:14 +02:00
Julien Fontanet
6d4e310b8e chore(vhd-cli/build): migrate to Babel 7 2018-05-28 18:18:44 +02:00
Julien Fontanet
6726530229 fix(tests): run all test with Babel 7
Temporarily disable xo-web tests which uses Babel 6.
2018-05-28 18:18:44 +02:00
Julien Fontanet
8351352541 chore(xo-remote-parser/build): migrate to Babel 7 2018-05-28 18:18:44 +02:00
Julien Fontanet
3f9e8d79ea chore(xo-collection/build): migrate to Babel 7 2018-05-28 18:18:44 +02:00
Julien Fontanet
685f2328bd chore(package): update dependencies 2018-05-28 14:54:55 +02:00
Pierre Donias
746567a8a7 fix(xo-acl-resolver,xo-web): SR & PIF ACLs inheritance (#2994) 2018-05-25 15:26:45 +02:00
Julien Fontanet
c116c41c42 chore(package): update dependencies 2018-05-25 11:42:55 +02:00
Pierre Donias
3768a7de37 fix(xo-web/select): do not auto-select disabled option (#2992) 2018-05-25 11:11:21 +02:00
Julien Fontanet
11ef0ee54f feat(rolling snapshots legacy): check VDI chains (#2986) 2018-05-24 16:39:06 +02:00
Julien Fontanet
33ae531e3a fix(package): update hashy to 0.7.1
Correctly handle hash with identifier `2b`.
2018-05-24 14:52:38 +02:00
Julien Fontanet
8cc9924751 feat(xo-server/importDeltaVm): add UUID of missing base VM 2018-05-24 00:04:52 +02:00
Julien Fontanet
c329ab863b feat(Backup NG): configurable concurrency (#2918) 2018-05-23 16:29:31 +02:00
Julien Fontanet
41820ea316 fix(xo-server/backup legacy): fix reports (#2979) 2018-05-23 15:56:40 +02:00
badrAZ
bf00f80716 fix(xo-web): update @julien-f/freactal to 0.1.1 (#2972)
This fix prevent the cursor from jumping to the end of the input when editing the backup's name.
2018-05-23 14:47:28 +02:00
Julien Fontanet
9baf0c74e4 chore(vhd-lib): move some deps to devDeps 2018-05-23 11:49:59 +02:00
Julien Fontanet
b59ccdf26f chore(package): update dependencies 2018-05-23 11:23:25 +02:00
Julien Fontanet
9cae978923 feat(xo-server): 5.19.9 2018-05-22 19:43:19 +02:00
Julien Fontanet
311d914b96 feat(xo-web/import): remove restriction for Free 2018-05-22 16:27:17 +02:00
Julien Fontanet
592cb4ef9e feat(xo-web): 5.19.8 2018-05-22 15:53:45 +02:00
Julien Fontanet
ec2db7f2d0 feat(xo-vmdk-to-vhd): 0.1.2 2018-05-22 15:53:03 +02:00
Julien Fontanet
71eab7ba9b feat(xo-web): 5.19.7 2018-05-22 15:36:54 +02:00
badrAZ
5e07171d60 fix(xo-server/backup-ng): fix incorrect condition (#2971) 2018-05-22 11:45:02 +02:00
Rajaa.BARHTAOUI
3f73e3d964 fix(xo-server,xo-web): message when no Xen tools (#2916)
Fixes #2911
2018-05-22 09:57:46 +02:00
badrAZ
0ebe78b4a2 feat(xo-server-usage-report): improve the report (#2970)
Fixes #2968
2018-05-21 16:25:40 +02:00
Julien Fontanet
61c3379298 feat(xo-server): 5.19.8 2018-05-21 11:00:19 +02:00
Julien Fontanet
44866f3316 feat(xo-web): 5.19.6 2018-05-21 10:42:37 +02:00
Julien Fontanet
4bb8ce8779 feat(vhd-lib): 0.1.0 2018-05-21 10:03:37 +02:00
Julien Fontanet
58eb6a8b5f feat(xo-server): 5.19.7 2018-05-18 18:44:34 +02:00
Julien Fontanet
52f6a79e01 fix(xo-server/backupNg/logs): include merge/transfer size (#2965) 2018-05-18 18:44:07 +02:00
Julien Fontanet
129f79d44b feat(xo-web): 5.19.5 2018-05-18 18:42:31 +02:00
Julien Fontanet
385c3eb563 feat(xo-vmdk-to-vhd): 0.1.1 2018-05-18 18:40:26 +02:00
Julien Fontanet
e56be51b45 chore(xo-server/backups-ng): remove incorrect TODO 2018-05-18 17:14:50 +02:00
Olivier Lambert
24ae65b254 fix(xo-server/sr.createNfs): nfsVersion → nfsOptions (#2904) 2018-05-18 16:28:02 +02:00
badrAZ
d5dffbacbd fix(xo-web/FormattedDuration): handle duration < 0 seconds (#2964) 2018-05-18 15:06:23 +02:00
Julien Fontanet
c6ae969a82 fix(xo-server/https): ask for passphrase (#2963)
Fixes #2962
2018-05-18 15:05:49 +02:00
Nicolas Raynaud
005a9fdc01 fix(xo-vmdk-to-vhd): various bugs (#2961) 2018-05-18 14:02:19 +02:00
Jerome Charaoui
f505d4d911 Fix SR creation when using options or NFSv4 (#2960) 2018-05-17 22:12:09 +02:00
badrAZ
8ada6b121e fix(backup-ng/logs): handle the case when transfer duration equals 0 (#2954) 2018-05-17 16:58:29 +02:00
Julien Fontanet
b9a87efb0d fix(xo-server/backupNg): dont fail on corrupted VHDs (#2957)
Corrupted VHD files (usually uncleaned temporary) could fail the job.
2018-05-17 11:27:02 +02:00
Pierre Donias
89485a82d2 feat(xo-web): make many objects' UUID copiable (#2955)
Fixes #2925

- host/tab-network
- pool/tab-network
- vm/tab-disks
- vm/tab-network
- vm/tab-snapshots
2018-05-16 17:39:47 +02:00
Pierre Donias
451f87c6b4 feat(xo-web/servers): allow unauthorized cert. when adding server (#2953)
Fixes #2926
2018-05-16 13:44:27 +02:00
Rajaa.BARHTAOUI
c3cb5a3221 feat(xo-server,xo-web): VM HA options (#2946)
Fixes #2917
2018-05-16 13:27:40 +02:00
Julien Fontanet
458609ed2e feat(xo-server): 5.19.6 2018-05-16 10:32:59 +02:00
Julien Fontanet
fcec8113f3 fix(xo-server/backupNg): await writeStream (#2951) 2018-05-16 10:32:38 +02:00
Julien Fontanet
ebbd882ee4 feat(xo-web): 5.19.4 2018-05-15 17:44:25 +02:00
Julien Fontanet
0506e19a66 chore(xo-server/backups-ng): update todo list 2018-05-15 17:44:09 +02:00
Pierre Donias
ecc62e4f54 fix(xo-web/xosan): install packs button condition (#2950) 2018-05-15 17:40:40 +02:00
Julien Fontanet
2b95eb4e4d feat(xo-web): 5.19.3 2018-05-15 16:11:53 +02:00
Julien Fontanet
bcde9e0f74 feat(xo-server): 5.19.5 2018-05-15 16:11:34 +02:00
Pierre Donias
114501ebc7 feat(XOSAN): allow user to update packs (#2782) 2018-05-15 16:11:04 +02:00
badrAZ
ebab7c0867 fix(backup-ng/logs): handle the case when transfer/merge duration equals 0 (#2949) 2018-05-15 16:10:17 +02:00
Julien Fontanet
0e2270fb6e feat(xo-web): 5.19.2 2018-05-15 14:46:33 +02:00
Julien Fontanet
593493ec0c feat(xo-server): 5.19.4 2018-05-15 14:46:07 +02:00
Julien Fontanet
d92898a806 feat(xo-vmdk-to-vhd): 0.1.0 2018-05-15 14:45:19 +02:00
Julien Fontanet
7890e46551 feat(xo-server-backup-reports): 0.11.0 2018-05-15 14:42:32 +02:00
Julien Fontanet
ef942a6209 feat(Backup NG): implrtment logs and reports (#2869) 2018-05-15 14:40:11 +02:00
Nicolas Raynaud
fdde916388 feat(xo-web/vms-import): redirect to VM or home page (#2942)
If a single VM has been imported, redirect to its page.

If multiple VMs has been imported, redirect to the homepage with all other VMs filtered out.
2018-05-14 17:42:11 +02:00
Julien Fontanet
31314d201b fix(xo-server/backupNg/delta): await deletion/merge 2018-05-14 15:38:11 +02:00
Julien Fontanet
a29a949c51 fix(xo-server/backupNg/delta): deleteFirst iff retention > 1 2018-05-14 15:37:09 +02:00
Julien Fontanet
cc1ce8c5f8 chore: update yarn.lock 2018-05-14 13:53:03 +02:00
Nicolas Raynaud
a21bf4ebe5 chore: major VHD code restructuring (#2808)
Related to #2792
2018-05-14 13:48:16 +02:00
Julien Fontanet
3d0420dbd9 fix(xo-server/backupNg): clean metadata on VM iself (#2945) 2018-05-14 11:47:34 +02:00
Julien Fontanet
04c74dd30f fix: missing bit of last commit 2018-05-11 20:17:02 +02:00
Julien Fontanet
2f256291ae fix(xo-server/backup legacy/delta import): autofix path (#2941)
Because the path might be incorrect and be `_full.vhd` instead of `_delta.vhd`.

I know…
2018-05-11 20:16:00 +02:00
Julien Fontanet
bcb66a4145 fix(xo-server/backup NG/listReplicatedVms): avoid templates and snapshots 2018-05-11 18:45:47 +02:00
Fabrice Marsaud
2d9368062e fix(xo-web/xoa-updater): dont block paying plans when updater is on error (#2939) 2018-05-11 17:18:24 +02:00
Pierre Donias
b110bacf61 fix(xo-server/patches): bulk install (#2935) 2018-05-09 17:32:11 +02:00
Julien Fontanet
78afdc0af5 feat(xo-web): 5.19.1 2018-05-07 13:19:59 +02:00
Julien Fontanet
ad6cd7985a feat(xo-server): 5.19.3 2018-05-07 13:07:21 +02:00
Julien Fontanet
a61661776d fix(xo-server/listVmBackupsNg): handle missing vhds field
This field is only present on delta backups
2018-05-07 11:02:14 +02:00
Julien Fontanet
1a9ebddcab fix(xo-server/listBackupNgPartitionFiles): missing await
Strange things though, it works in dev compilation mode…

Fixes #2929
2018-05-07 10:33:44 +02:00
Julien Fontanet
7ab907a854 feat(xo-server/backup NG): file restore (#2889) 2018-05-06 18:38:47 +02:00
Olivier Lambert
68a34f7cdb feat(changelog): update changelog 2018-05-05 12:05:56 +02:00
Rajaa.BARHTAOUI
da4ff3082d feat(xo-web/health): list VM snapshots related to non-existing backup jobs (#2899)
Fixes #2828
2018-05-04 15:59:11 +02:00
Rajaa.BARHTAOUI
9c05a59b5f feat(xo-web/SR/advanced): add VDI UUID in coalesce table (#2919)
Fixes #2903
2018-05-04 12:28:15 +02:00
Rajaa.BARHTAOUI
6780146505 feat(xo-web/patches): better "restart host" warnings (#2909)
Fixes #2866
2018-05-04 10:41:02 +02:00
Julien Fontanet
2758833fc6 feat(xo-server): 5.19.2 2018-05-03 19:12:48 +02:00
Julien Fontanet
2786d7ec46 fix(vhd/createSyntheticReadStream): sectorsPerBlock → sectorsPerBlockData 2018-05-03 19:11:01 +02:00
Julien Fontanet
945a2006c9 feat(xo-server/legacy backup/import): use synthetic stream (#2913) 2018-05-02 17:48:13 +02:00
badrAZ
b9e574e32f fix(SR/tab-stats): fix IOPS's and IOwait's values format (#2914) 2018-05-02 14:17:13 +02:00
Julien Fontanet
34f1ef1680 feat(xo-server): 5.19.1 2018-05-01 17:26:15 +02:00
Julien Fontanet
4ac4310bc1 fix(xo-server/importDeltaVm): remove extra return
It prevented the VBD creation in case of a new VDI.
2018-05-01 17:25:46 +02:00
Julien Fontanet
a10997ca66 feat(xo-web): 5.19.0 2018-05-01 16:13:20 +02:00
Julien Fontanet
0e52a4c7dc feat(xo-server): 5.19.0 2018-05-01 16:12:44 +02:00
Julien Fontanet
a4b3e22c2b feat(xo-server-perf-alert): 0.1.0 2018-05-01 16:10:10 +02:00
Julien Fontanet
441bd7c754 feat(xo-server-auth-saml): 0.5.2 2018-05-01 16:09:06 +02:00
badrAZ
ddbd32d1cb chore(xo-web/backup-ng/new): single effect to toggle modes (#2908) 2018-04-30 11:19:45 +02:00
Pierre Donias
a5b0cbeaea feat(xo-server-perf-alert): SR disk space (#2905) 2018-04-27 17:38:19 +02:00
Rajaa.BARHTAOUI
c6f3b2b1ce feat(xo-web/XOA update): display "Downgrade" when trial is over (#2845)
Fixes #1483
2018-04-27 10:05:27 +02:00
Pierre Donias
3d869d9fa1 chore(xo-web/health): remove irrelevant actions on VDIs (#2882) 2018-04-26 17:37:28 +02:00
Julien Fontanet
7a5229741f chore: disable tests on Node 9 due to upath error 2018-04-26 17:21:00 +02:00
Rajaa.BARHTAOUI
78e0c2d8fa feat(xo-web/SortedTable): support link actions (#2893) 2018-04-26 15:46:10 +02:00
Pierre Donias
5928984069 feat(xo-web/home): sort by container name (#2901)
Fixes #2680
2018-04-26 15:21:48 +02:00
Julien Fontanet
61a472f108 chore(xo-server/vhd/createReadStream): improve genericity (#2865)
It should now be pretty easy to make it work to generate a delta VHD, which should be very useful when mergin multiple deltas together (during deletion.
2018-04-24 18:06:58 +02:00
Julien Fontanet
e45f78ea20 fix(xo-web/backup-ng): delete backups sequentially (#2855)
- sequentially: to limit merge issues
- from newest to oldest: to avoid unnecessary merges
2018-04-23 16:35:34 +02:00
Olivier Lambert
b3ae9d88eb feat(vm): expose vendor device features in advanced tab. Fixes #2883 (#2894) 2018-04-23 15:02:40 +02:00
Pierre Donias
f7f26537be fix(xo-web/vm/network): bad import after file name change (#2892)
Fixes #2891

Introduced by 769c32a1b1
2018-04-23 14:08:56 +02:00
Julien Fontanet
96848fc6d4 fix(xo-server/importDeltaVm): create VBDs earlier (#2885)
To avoid orphan VDIs AMAP.
2018-04-20 17:33:36 +02:00
Julien Fontanet
51e6f0c79f feat(xo-server-usage-report): 0.4.2 2018-04-19 09:39:47 +02:00
badrAZ
4f94ad40b7 fix(xo-server-usage-report): handle missing metrics (#2880) 2018-04-18 16:30:30 +02:00
Pierre Donias
43e1eb9939 fix(xo-web/Ellipsis): handle patchedRender wrapping (#2881)
Also use Ellipsis on resource set name in home/VM view
2018-04-18 11:00:09 +02:00
Julien Fontanet
1f6d7de861 feat(xo-server-usage-report): 0.4.1 2018-04-17 14:03:32 +02:00
Julien Fontanet
bd623c2daf feat(xo-web): 5.18.3 2018-04-17 11:36:00 +02:00
Pierre Donias
40c71c2102 fix(xo-web/SelectSr): "pool" typo (#2878)
Fixes #2875
2018-04-17 11:05:56 +02:00
Nicolas Raynaud
72a1580eff fix(xo-server/vhd-merge.integ.spec): increase timeout (#2874) 2018-04-17 11:04:30 +02:00
Julien Fontanet
9e2404a0d7 feat(xo-web): 5.18.2 2018-04-16 17:32:24 +02:00
Julien Fontanet
7dd84d1518 feat(xo-server): 5.18.3 2018-04-16 17:29:02 +02:00
Julien Fontanet
d800db5d09 fix(xo-web/backup-ng/new): fix empty srs & remotes
Send `undefined` instead of `false`.
2018-04-16 17:26:27 +02:00
Julien Fontanet
2714ccff38 fix(xo-server/backupNg.{create,edit}Job): check srs param 2018-04-16 17:15:35 +02:00
Julien Fontanet
1d493e411b fix(xo-server/backups-ng): correctly detect delta exports
Fixes #2833
2018-04-16 16:54:55 +02:00
Julien Fontanet
2a0c222f2d chore(xo-server): use xen-api 0.16.9 2018-04-16 16:30:59 +02:00
Julien Fontanet
641d68de0e feat(xen-api): 0.16.9 2018-04-16 16:29:41 +02:00
Julien Fontanet
2dd0fd660b chore(xo-server/backups-ng): update todo list 2018-04-16 16:28:09 +02:00
badrAZ
bb5441c7bc feat(xo-web/SelectSr): add container name to SRs that have the same names (#2824)
Fixes #1762
2018-04-16 16:16:55 +02:00
badrAZ
eeea9e662b fix(xo-web/backup-ng/new): rename edit button and change cancel icon (#2858)
See #2711
2018-04-16 15:49:54 +02:00
badrAZ
8d4874e240 fix(xo-web/backupNg/new): make the default retention equals 1 (#2872)
See #2711
2018-04-16 15:27:55 +02:00
badrAZ
a8ba4a1a8e feat(xo-web): stats for SRs (#2847) 2018-04-16 14:40:00 +02:00
Julien Fontanet
0c027247ec fix(normalize-packages): homepage for scoped packages 2018-04-15 23:41:27 +02:00
badrAZ
164cb39c1b fix(xo-web/backup/new): schedules values can be null (#2773) 2018-04-13 17:10:03 +02:00
Julien Fontanet
52503de645 fix(xo-web/initial fetch): support path prefix
Related to #2775
2018-04-13 17:01:43 +02:00
Julien Fontanet
83b8b5de61 fix(xo-web/updater): support path prefix
Related to #2775
2018-04-13 17:01:43 +02:00
Rajaa.BARHTAOUI
3e326c4e62 feat(xo-web/updater): disable upgrade button when not needed (#2816)
Fixes #1594
2018-04-13 16:46:58 +02:00
Julien Fontanet
a6b0690416 fix(xo-server): unmanaged VDI snapshots are VDI-unmanaged 2018-04-13 11:36:10 +02:00
Julien Fontanet
dcd007c5c7 fix(xen-api): fix sync test in watchTask (#2868) 2018-04-12 18:02:51 +02:00
Julien Fontanet
eb090e4874 fix(xen-api): getObject* should not return null 2018-04-12 11:06:08 +02:00
Julien Fontanet
4b716584f7 feat(xo-server): 5.18.2 2018-04-11 17:48:59 +02:00
Julien Fontanet
4bc348f39f fix(xo-server/vhd/createReadStream): emit empty if missing sectors 2018-04-11 17:47:43 +02:00
Julien Fontanet
9c75992fe4 feat(xo-web): 5.18.1 2018-04-11 17:32:58 +02:00
Julien Fontanet
4bb2702ac5 feat(xo-server): 5.18.1 2018-04-11 17:32:58 +02:00
Julien Fontanet
ea8133cb41 fix(xo-server/vhd/createReadStream): handle unallocated blocks (#2859)
Fixes #2857
2018-04-11 17:24:46 +02:00
Pierre Donias
fc40c7b03d fix(xo-web/new SR): create button not showing (#2854)
Fixes #2853
2018-04-11 10:21:06 +02:00
Julien Fontanet
7fe5b66fdb feat(xo-server-auth-saml): log profile when no name found 2018-04-10 19:09:30 +02:00
Julien Fontanet
0f1d052493 chore: update dependencies 2018-04-09 18:11:53 +02:00
badrAZ
56a182f795 fix(xo-web/backup-ng/new): dont add a target more than once (#2849)
Fixes #2848
2018-04-09 17:22:38 +02:00
Julien Fontanet
e8da1b943b fix(xo-server/backups-ng): create all forks at the same time (#2842)
Fixes #2790
2018-04-09 16:42:05 +02:00
190 changed files with 11303 additions and 6984 deletions

3
.gitignore vendored
View File

@@ -8,6 +8,9 @@
/packages/*/dist/
/packages/*/node_modules/
/packages/vhd-cli/src/commands/index.js
/packages/xen-api/examples/node_modules/
/packages/xen-api/plot.dat
/packages/xo-server/.xo-server.*

View File

@@ -1,6 +1,6 @@
language: node_js
node_js:
- stable
#- stable # disable for now due to an issue of indirect dep upath with Node 9
- 8
- 6
@@ -12,6 +12,7 @@ addons:
packages:
- qemu-utils
- blktap-utils
- vmdk-stream-converter
before_install:
- curl -o- -L https://yarnpkg.com/install.sh | bash

View File

@@ -1,40 +1,56 @@
'use strict'
const PLUGINS_RE = /^(?:@babel\/plugin-.+|babel-plugin-lodash)$/
const PLUGINS_RE = /^(?:@babel\/|babel-)plugin-.+$/
const PRESETS_RE = /^@babel\/preset-.+$/
const NODE_ENV = process.env.NODE_ENV || 'development'
const __PROD__ = NODE_ENV === 'production'
const __TEST__ = NODE_ENV === 'test'
const configs = {
'@babel/plugin-proposal-decorators': {
legacy: true,
},
'@babel/preset-env' (pkg) {
return {
debug: !__TEST__,
loose: true,
shippedProposals: true,
targets: __PROD__
? (() => {
let node = (pkg.engines || {}).node
if (node !== undefined) {
const trimChars = '^=>~'
while (trimChars.includes(node[0])) {
node = node.slice(1)
}
return { node: node }
}
})()
: { browsers: '', node: 'current' },
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
}
},
}
const getConfig = (key, ...args) => {
const config = configs[key]
return config === undefined
? {}
: typeof config === 'function'
? config(...args)
: config
}
module.exports = function (pkg, plugins, presets) {
plugins === undefined && (plugins = {})
presets === undefined && (presets = {})
presets['@babel/preset-env'] = {
debug: !__TEST__,
loose: true,
shippedProposals: true,
targets: __PROD__
? (() => {
let node = (pkg.engines || {}).node
if (node !== undefined) {
const trimChars = '^=>~'
while (trimChars.includes(node[0])) {
node = node.slice(1)
}
return { node: node }
}
})()
: { browsers: '', node: 'current' },
useBuiltIns: '@babel/polyfill' in (pkg.dependencies || {}) && 'usage',
}
Object.keys(pkg.devDependencies || {}).forEach(name => {
if (!(name in presets) && PLUGINS_RE.test(name)) {
plugins[name] = {}
plugins[name] = getConfig(name, pkg)
} else if (!(name in presets) && PRESETS_RE.test(name)) {
presets[name] = {}
presets[name] = getConfig(name, pkg)
}
})

View File

@@ -2,7 +2,7 @@
"private": true,
"name": "@xen-orchestra/babel-config",
"version": "0.0.0",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/babel-config",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/babel-config",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",

View File

@@ -14,7 +14,7 @@
"scheduling",
"task"
],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/@xen-orchestra/cron",
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/cron",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
@@ -41,10 +41,10 @@
"moment-timezone": "^0.5.14"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,54 @@
{
"name": "@xen-orchestra/fs",
"version": "0.1.0",
"license": "AGPL-3.0",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/@xen-orchestra/fs",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.49",
"@marsaud/smb2-promise": "^0.2.1",
"execa": "^0.10.0",
"fs-extra": "^6.0.1",
"get-stream": "^3.0.0",
"lodash": "^4.17.4",
"promise-toolbox": "^0.9.5",
"through2": "^2.0.3",
"tmp": "^0.0.33",
"xo-remote-parser": "^0.3"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -1,11 +1,11 @@
// @flow
import getStream from 'get-stream'
import { randomBytes } from 'crypto'
import { fromCallback, fromEvent, ignoreErrors } from 'promise-toolbox'
import { type Readable, type Writable } from 'stream'
import { fromEvent, ignoreErrors } from 'promise-toolbox'
import { parse } from 'xo-remote-parser'
import { getPseudoRandomBytes, streamToBuffer } from '../utils'
import { createChecksumStream, validChecksumOfReadStream } from './checksum'
type Data = Buffer | Readable | string
@@ -54,7 +54,7 @@ export default class RemoteHandlerAbstract {
async test (): Promise<Object> {
const testFileName = `${Date.now()}.test`
const data = getPseudoRandomBytes(1024 * 1024)
const data = await fromCallback(cb => randomBytes(1024 * 1024, cb))
let step = 'write'
try {
await this.outputFile(testFileName, data)
@@ -92,12 +92,28 @@ export default class RemoteHandlerAbstract {
await promise
}
async read (
file: File,
buffer: Buffer,
position?: number
): Promise<{| bytesRead: number, buffer: Buffer |}> {
return this._read(file, buffer, position)
}
_read (
file: File,
buffer: Buffer,
position?: number
): Promise<{| bytesRead: number, buffer: Buffer |}> {
throw new Error('Not implemented')
}
async readFile (file: string, options?: Object): Promise<Buffer> {
return this._readFile(file, options)
}
_readFile (file: string, options?: Object): Promise<Buffer> {
return this.createReadStream(file, options).then(streamToBuffer)
return this.createReadStream(file, options).then(getStream.buffer)
}
async rename (
@@ -126,7 +142,10 @@ export default class RemoteHandlerAbstract {
prependDir = false,
}: { filter?: (name: string) => boolean, prependDir?: boolean } = {}
): Promise<string[]> {
const entries = await this._list(dir)
let entries = await this._list(dir)
if (filter !== undefined) {
entries = entries.filter(filter)
}
if (prependDir) {
entries.forEach((entry, i) => {
@@ -134,7 +153,7 @@ export default class RemoteHandlerAbstract {
})
}
return filter === undefined ? entries : entries.filter(filter)
return entries
}
async _list (dir: string): Promise<string[]> {

View File

@@ -0,0 +1,26 @@
/* eslint-env jest */
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import { getHandler } from '.'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test("fs test doesn't crash", async () => {
const handler = getHandler({ url: 'file://' + process.cwd() })
const result = await handler.test()
expect(result.success).toBeTruthy()
})

View File

@@ -5,6 +5,7 @@ import RemoteHandlerLocal from './local'
import RemoteHandlerNfs from './nfs'
import RemoteHandlerSmb from './smb'
export type { default as RemoteHandler } from './abstract'
export type Remote = { url: string }
const HANDLERS = {

View File

@@ -50,6 +50,24 @@ export default class LocalHandler extends RemoteHandlerAbstract {
await fs.writeFile(path, data, options)
}
async _read (file, buffer, position) {
const needsClose = typeof file === 'string'
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd
try {
return await fs.read(
file,
buffer,
0,
buffer.length,
position === undefined ? null : position
)
} finally {
if (needsClose) {
await fs.close(file)
}
}
}
async _readFile (file, options) {
return fs.readFile(this._getFilePath(file), options)
}

View File

@@ -1,7 +1,9 @@
import Smb2 from '@marsaud/smb2-promise'
import { lastly as pFinally } from 'promise-toolbox'
import RemoteHandlerAbstract from './abstract'
import { noop, pFinally } from '../utils'
const noop = () => {}
// Normalize the error code for file not found.
const normalizeError = error => {

View File

@@ -1,9 +1,122 @@
# ChangeLog
## *next*
### Enhancements
- Hide legacy backup creation view [#2956](https://github.com/vatesfr/xen-orchestra/issues/2956)
- [Delta Backup NG logs] Display wether the export is a full or a delta [#2711](https://github.com/vatesfr/xen-orchestra/issues/2711)
- Copy VDIs' UUID from SR/disks view [#3051](https://github.com/vatesfr/xen-orchestra/issues/3051)
- [Backup NG] New option to shutdown VMs before snapshotting them [#3058](https://github.com/vatesfr/xen-orchestra/issues/3058#event-1673756438)
- [Backup NG form] Improve feedback [#2711](https://github.com/vatesfr/xen-orchestra/issues/2711)
- [Backup NG] Different retentions for backup and replication [#2895](https://github.com/vatesfr/xen-orchestra/issues/2895)
- Possibility to use a fast clone when creating a VM from a snapshot [#2937](https://github.com/vatesfr/xen-orchestra/issues/2937)
### Bugs
- update the xentools search item to return the version number of installed xentools [#3015](https://github.com/vatesfr/xen-orchestra/issues/3015)
- Fix Nagios backup reports [#2991](https://github.com/vatesfr/xen-orchestra/issues/2991)
- Fix the retry of a single failed/interrupted VM backup [#2912](https://github.com/vatesfr/xen-orchestra/issues/2912#issuecomment-395480321)
- New VM with Self: filter out networks that are not in the template's pool [#3011](https://github.com/vatesfr/xen-orchestra/issues/3011)
- [Backup NG] Auto-detect when a full export is necessary.
## **5.20.0** (2018-05-31)
### Enhancements
- Add VDI UUID in SR coalesce view [#2903](https://github.com/vatesfr/xen-orchestra/issues/2903)
- Create new VDI from SR view not attached to any VM [#2229](https://github.com/vatesfr/xen-orchestra/issues/2229)
- [Patches] ignore XS upgrade in missing patches counter [#2866](https://github.com/vatesfr/xen-orchestra/issues/2866)
- [Health] List VM snapshots related to non-existing backup jobs/schedules [#2828](https://github.com/vatesfr/xen-orchestra/issues/2828)
## **5.19.0** (2018-05-01)
### Enhancements
- Expose vendor device in VM advanced tab [#2883](https://github.com/vatesfr/xen-orchestra/issues/2883)
- Networks created in XO are missing the "automatic" parameter [#2818](https://github.com/vatesfr/xen-orchestra/issues/2818)
- Performance alert disk space monitoring XS [#2737](https://github.com/vatesfr/xen-orchestra/issues/2737)
- Add ability to create NFSv4 storage repository [#2706](https://github.com/vatesfr/xen-orchestra/issues/2706)
- [SortedTable] Support link actions [#2691](https://github.com/vatesfr/xen-orchestra/issues/2691)
- Additional sort option: by host name [#2680](https://github.com/vatesfr/xen-orchestra/issues/2680)
- Expose XenTools version numbers in data model and UI [#2650](https://github.com/vatesfr/xen-orchestra/issues/2650)
- RRDs stats for SR object [#2644](https://github.com/vatesfr/xen-orchestra/issues/2644)
- composite jobs [#2367](https://github.com/vatesfr/xen-orchestra/issues/2367)
- Better error message [#2344](https://github.com/vatesfr/xen-orchestra/issues/2344)
- Avoid using backup tag with special characters [#2336](https://github.com/vatesfr/xen-orchestra/issues/2336)
- Prefix/suffix for temporary files [#2333](https://github.com/vatesfr/xen-orchestra/issues/2333)
- Continuous Replication - better interface matching on destination [#2093](https://github.com/vatesfr/xen-orchestra/issues/2093)
- Creation of LVMoHBA SRs [#1992](https://github.com/vatesfr/xen-orchestra/issues/1992)
- [Delta backup] Improve restoration by creating a virtual full VHD [#1943](https://github.com/vatesfr/xen-orchestra/issues/1943)
- VM Backups should be done in a dedicated remote directory [#1752](https://github.com/vatesfr/xen-orchestra/issues/1752)
- Add Pool / SR filter in backup view [#1762](https://github.com/vatesfr/xen-orchestra/issues/1762)
- Hide/Disable upgrade button when no upgrade exists [#1594](https://github.com/vatesfr/xen-orchestra/issues/1594)
- "Upgrade" button should display "Downgrade" when trial is over [#1483](https://github.com/vatesfr/xen-orchestra/issues/1483)
### Bugs
- Allowed-ips don't works displaying index.js:1 Uncaught TypeError: (0 , z.isIp) is not a function [#2891](https://github.com/vatesfr/xen-orchestra/issues/2891)
- Error on "usage-report" [#2876](https://github.com/vatesfr/xen-orchestra/issues/2876)
- SR selection combo only listing local storage [#2875](https://github.com/vatesfr/xen-orchestra/issues/2875)
- [Backup NG - Delta] Issue while importing delta [#2857](https://github.com/vatesfr/xen-orchestra/issues/2857)
- Create New SR page broken with past commit [#2853](https://github.com/vatesfr/xen-orchestra/issues/2853)
- [Backup NG] a target should only be preset once [#2848](https://github.com/vatesfr/xen-orchestra/issues/2848)
- Auth Method iSCSI [#2835](https://github.com/vatesfr/xen-orchestra/issues/2835)
- [Backup NG] ENOENT with Delta Backup [#2833](https://github.com/vatesfr/xen-orchestra/issues/2833)
- Different backup logs [#2732](https://github.com/vatesfr/xen-orchestra/issues/2732)
- Creating network fails silently when omitting Description [#2719](https://github.com/vatesfr/xen-orchestra/issues/2719)
- Can't create ISO NFS SR via XOA [#1845](https://github.com/vatesfr/xen-orchestra/issues/1845)
## **5.18.0** (2018-03-31)
### Enhancements
- Support huge VHDs [#2785](https://github.com/vatesfr/xen-orchestra/issues/2785)
- Usage report extended usage [#2770](https://github.com/vatesfr/xen-orchestra/issues/2770)
- Improve host available RAM display [#2750](https://github.com/vatesfr/xen-orchestra/issues/2750)
- Hide IP field during VM creation if not configured [#2739](https://github.com/vatesfr/xen-orchestra/issues/2739)
- [Home] Delete VMs modal should autofocus the input field [#2736](https://github.com/vatesfr/xen-orchestra/issues/2736)
- Backup restore view load icon [#2692](https://github.com/vatesfr/xen-orchestra/issues/2692)
- Deleting default templates doesn't work [#2666](https://github.com/vatesfr/xen-orchestra/issues/2666)
- DR clean previous "failed" snapshots [#2656](https://github.com/vatesfr/xen-orchestra/issues/2656)
- [Home] Put sort criteria in URL like the filter [#2585](https://github.com/vatesfr/xen-orchestra/issues/2585)
- Allow disconnect VDI in SR disk view [#2505](https://github.com/vatesfr/xen-orchestra/issues/2505)
- Add confirmation modal for manual backup run [#2355](https://github.com/vatesfr/xen-orchestra/issues/2355)
- Multiple schedule for backup jobs [#2286](https://github.com/vatesfr/xen-orchestra/issues/2286)
- Checks before web update [#2250](https://github.com/vatesfr/xen-orchestra/issues/2250)
- Backup logs should truly reflect if the job is running [#2206](https://github.com/vatesfr/xen-orchestra/issues/2206)
- Hook/action if an export stream is cut [#1929](https://github.com/vatesfr/xen-orchestra/issues/1929)
- Backup paths should not contain tags but job ids [#1854](https://github.com/vatesfr/xen-orchestra/issues/1854)
- Add a button to delete a backup [#1751](https://github.com/vatesfr/xen-orchestra/issues/1751)
- Dashboard available for Pool and Host level [#1631](https://github.com/vatesfr/xen-orchestra/issues/1631)
- UI Enhancement - VM list - Allways show the Toolbar [#1581](https://github.com/vatesfr/xen-orchestra/issues/1581)
- xoa-updater --register: unable to define proxy using the CLI [#873](https://github.com/vatesfr/xen-orchestra/issues/873)
### Bugs
- [Backup NG] CR/DR fail with multiple VMs [#2807](https://github.com/vatesfr/xen-orchestra/issues/2807)
- HTTPS Crash [#2803](https://github.com/vatesfr/xen-orchestra/issues/2803)
- Backup NG "cannot fork the stream after it has been created" [#2790](https://github.com/vatesfr/xen-orchestra/issues/2790)
- [XOSAN] Make temporary `boundObjectId` unique [#2758](https://github.com/vatesfr/xen-orchestra/issues/2758)
- First VIF ignored at VM creation [#2794](https://github.com/vatesfr/xen-orchestra/issues/2794)
- VM creation from snapshot does not work [#2748](https://github.com/vatesfr/xen-orchestra/issues/2748)
- Error: no such object with CentOS 7 template [#2747](https://github.com/vatesfr/xen-orchestra/issues/2747)
- [Tasks] Filter does not work [#2740](https://github.com/vatesfr/xen-orchestra/issues/2740)
- Pagination broken when listing pool VMs [#2730](https://github.com/vatesfr/xen-orchestra/issues/2730)
- All jobs show error icon with message "This backup's creator no longer exists" [#2728](https://github.com/vatesfr/xen-orchestra/issues/2728)
- [Basic backup] Continous Replication VM names [#2727](https://github.com/vatesfr/xen-orchestra/issues/2727)
- Continuous replication clone removed [#2724](https://github.com/vatesfr/xen-orchestra/issues/2724)
- [Backup] "See matching VMs" issue [#2704](https://github.com/vatesfr/xen-orchestra/issues/2704)
- How to exclude CR targets from a smart backup using tags? [#2613](https://github.com/vatesfr/xen-orchestra/issues/2613)
- Successful VM import reported as failed [#2056](https://github.com/vatesfr/xen-orchestra/issues/2056)
- Delta backup: issue if a disk is once again backed up [#1824](https://github.com/vatesfr/xen-orchestra/issues/1824)
## **5.17.0** (2018-03-02)
### Enhancements
- Username field labeled inconsistently [#2651](https://github.com/vatesfr/xen-orchestra/issues/2651)
- Add modal confirmation for host emergency mode [#2230](https://github.com/vatesfr/xen-orchestra/issues/2230)
- Authorize stats fetching in RO mode [#2678](https://github.com/vatesfr/xen-orchestra/issues/2678)
- Limit VM.export concurrency [#2669](https://github.com/vatesfr/xen-orchestra/issues/2669)
@@ -22,6 +135,9 @@
- Cant attach XenTools on User side. [#2503](https://github.com/vatesfr/xen-orchestra/issues/2503)
- Pool filter for health view [#2302](https://github.com/vatesfr/xen-orchestra/issues/2302)
- [Smart Backup] Improve feedback [#2253](https://github.com/vatesfr/xen-orchestra/issues/2253)
- Backup jobs stuck if no space left on NFS remote [#2116](https://github.com/vatesfr/xen-orchestra/issues/2116)
- Link between backup and XS tasks [#1193](https://github.com/vatesfr/xen-orchestra/issues/1193)
- Move delta backup grouping to server side [#1008](https://github.com/vatesfr/xen-orchestra/issues/1008)
### Bugs
@@ -41,6 +157,7 @@
- Self-service: allow VIF create [#2593](https://github.com/vatesfr/xen-orchestra/issues/2593)
- Ghost tasks [#2579](https://github.com/vatesfr/xen-orchestra/issues/2579)
- Autopatching: ignore 7.3 update patch for 7.2 [#2564](https://github.com/vatesfr/xen-orchestra/issues/2564)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- Allow deleting VMs for which `destroy` is blocked [#2525](https://github.com/vatesfr/xen-orchestra/issues/2525)
- Better confirmation on mass destructive actions [#2522](https://github.com/vatesfr/xen-orchestra/issues/2522)
- Move VM In to/Out of Self Service Group [#1913](https://github.com/vatesfr/xen-orchestra/issues/1913)
@@ -50,6 +167,8 @@
- Remove CoffeeScript in xo-server [#189](https://github.com/vatesfr/xen-orchestra/issues/189)
- Better Handling of suspending VMs from the Home screen [#2547](https://github.com/vatesfr/xen-orchestra/issues/2547)
- [xen-api] Stronger reconnection policy [#2410](https://github.com/vatesfr/xen-orchestra/issues/2410)
- home view - allow selecting more than 25 items [#1210](https://github.com/vatesfr/xen-orchestra/issues/1210)
- Performances alerts [#511](https://github.com/vatesfr/xen-orchestra/issues/511)
### Bugs
@@ -79,6 +198,7 @@
- Graphs in VM list view [#2469](https://github.com/vatesfr/xen-orchestra/issues/2469)
- [Delta Backups] Do not include merge duration in transfer speed stat [#2426](https://github.com/vatesfr/xen-orchestra/issues/2426)
- Warning for disperse mode [#2537](https://github.com/vatesfr/xen-orchestra/issues/2537)
- Select components: auto select value if only 1 choice possible [#1479](https://github.com/vatesfr/xen-orchestra/issues/1479)
### Bugs

19
PULL_REQUEST_TEMPLATE.md Normal file
View File

@@ -0,0 +1,19 @@
### Check list
> Check items when done or if not relevant
- [ ] PR reference the relevant issue (e.g. `Fixes #007`)
- [ ] if UI changes, a screenshot has been added to the PR
- [ ] CHANGELOG updated
- [ ] documentation updated
### Process
1. create a PR as soon as possible
1. mark it as `WiP:` (Work in Progress) if not ready to be merged
1. when you want a review, add a reviewer
1. if necessary, update your PR, and readd a reviewer
### List of packages to release
> No need to mention xo-server and xo-web.

5
babel.config.js Normal file
View File

@@ -0,0 +1,5 @@
module.exports = {
// Necessary for jest to be able to find the `.babelrc.js` closest to the file
// instead of only the one in this directory.
babelrcRoots: true,
}

View File

@@ -0,0 +1,6 @@
declare module 'limit-concurrency-decorator' {
declare function limitConcurrencyDecorator(
concurrency: number
): <T: Function>(T) => T
declare export default typeof limitConcurrencyDecorator
}

17
flow-typed/lodash.js vendored
View File

@@ -1,4 +1,16 @@
declare module 'lodash' {
declare export function countBy<K, V>(
object: { [K]: V },
iteratee: K | ((V, K) => string)
): { [string]: number }
declare export function forEach<K, V>(
object: { [K]: V },
iteratee: (V, K) => void
): void
declare export function groupBy<K, V>(
object: { [K]: V },
iteratee: K | ((V, K) => string)
): { [string]: V[] }
declare export function invert<K, V>(object: { [K]: V }): { [V]: K }
declare export function isEmpty(mixed): boolean
declare export function keyBy<T>(array: T[], iteratee: string): boolean
@@ -12,5 +24,10 @@ declare module 'lodash' {
iteratee: (V1, K) => V2
): { [K]: V2 }
declare export function noop(...args: mixed[]): void
declare export function some<T>(
collection: T[],
iteratee: (T, number) => boolean
): boolean
declare export function sum(values: number[]): number
declare export function values<K, V>(object: { [K]: V }): V[]
}

View File

@@ -5,6 +5,9 @@ declare module 'promise-toolbox' {
reject: T => void,
resolve: T => void,
|}
declare export function fromCallback<T>(
(cb: (error: any, value: T) => void) => void
): Promise<T>
declare export function fromEvent(emitter: mixed, string): Promise<mixed>
declare export function ignoreErrors(): Promise<void>
declare export function timeout<T>(delay: number): Promise<T>

View File

@@ -1,8 +1,10 @@
{
"devDependencies": {
"@babel/register": "^7.0.0-beta.44",
"babel-7-jest": "^21.3.2",
"@babel/core": "^7.0.0-beta.49",
"@babel/register": "^7.0.0-beta.49",
"babel-core": "^7.0.0-0",
"babel-eslint": "^8.1.2",
"babel-jest": "^23.0.1",
"benchmark": "^2.1.4",
"eslint": "^4.14.0",
"eslint-config-standard": "^11.0.0-beta.0",
@@ -13,37 +15,30 @@
"eslint-plugin-react": "^7.6.1",
"eslint-plugin-standard": "^3.0.1",
"exec-promise": "^0.7.0",
"flow-bin": "^0.69.0",
"flow-bin": "^0.73.0",
"globby": "^8.0.0",
"husky": "^0.14.3",
"jest": "^22.0.4",
"jest": "^23.0.1",
"lodash": "^4.17.4",
"prettier": "^1.10.2",
"promise-toolbox": "^0.9.5",
"sorted-object": "^2.0.1"
},
"engines": {
"yarn": "^1.2.1"
"yarn": "^1.7.0"
},
"jest": {
"collectCoverage": true,
"projects": [
"<rootDir>",
"<rootDir>/packages/xo-web"
"<rootDir>"
],
"testEnvironment": "node",
"testPathIgnorePatterns": [
"/dist/",
"/xo-vmdk-to-vhd/",
"/xo-web/"
],
"testRegex": "\\.spec\\.js$",
"transform": {
"/@xen-orchestra/cron/.+\\.jsx?$": "babel-7-jest",
"/packages/complex-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/value-matcher/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-cli/.+\\.jsx?$": "babel-7-jest",
"/packages/xo-server/.+\\.jsx?$": "babel-7-jest",
"\\.jsx?$": "babel-jest"
}
},

View File

@@ -30,9 +30,9 @@
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.1",
"rimraf": "^2.6.2"

View File

@@ -28,10 +28,10 @@
},
"dependencies": {},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"
},

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -1,6 +1,6 @@
{
"name": "vhd-cli",
"version": "0.0.0",
"version": "0.0.1",
"license": "ISC",
"description": "",
"keywords": [],
@@ -23,45 +23,32 @@
"dist/"
],
"engines": {
"node": ">=4"
"node": ">=6"
},
"dependencies": {
"@xen-orchestra/fs": "^0.1.0",
"exec-promise": "^0.7.0",
"struct-fu": "^1.2.0",
"@nraynaud/xo-fs": "^0.0.5",
"babel-runtime": "^6.22.0",
"exec-promise": "^0.7.0"
"vhd-lib": "^0.1.2"
},
"devDependencies": {
"babel-cli": "^6.24.1",
"@babel/cli": "^7.0.0-beta.49",
"@babel/core": "^7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "^7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
"execa": "^0.10.0",
"index-modules": "^0.3.0",
"promise-toolbox": "^0.9.5",
"rimraf": "^2.6.1",
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "rimraf dist/",
"prebuild": "rimraf dist/ && index-modules --cjs-lazy src/commands",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"babel": {
"plugins": [
"lodash",
"transform-runtime"
],
"presets": [
[
"env",
{
"targets": {
"node": 4
}
}
],
"stage-3"
]
"prepare": "yarn run build"
}
}

View File

@@ -0,0 +1,15 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const handler = getHandler({ url: 'file:///' })
for (const vhd of args) {
try {
await new Vhd(handler, resolve(vhd)).readHeaderAndFooter()
console.log('ok:', vhd)
} catch (error) {
console.error('nok:', vhd, error)
}
}
}

View File

@@ -0,0 +1,12 @@
import Vhd from 'vhd-lib'
import { getHandler } from '@xen-orchestra/fs'
import { resolve } from 'path'
export default async args => {
const vhd = new Vhd(getHandler({ url: 'file:///' }), resolve(args[0]))
await vhd.readHeaderAndFooter()
console.log(vhd.header)
console.log(vhd.footer)
}

View File

@@ -0,0 +1,21 @@
import path from 'path'
import { createSyntheticStream } from 'vhd-lib'
import { createWriteStream } from 'fs'
import { getHandler } from '@xen-orchestra/fs'
export default async function main (args) {
if (args.length < 2 || args.some(_ => _ === '-h' || _ === '--help')) {
return `Usage: ${this.command} <input VHD> <output VHD>`
}
const handler = getHandler({ url: 'file:///' })
return new Promise((resolve, reject) => {
createSyntheticStream(handler, path.resolve(args[0]))
.on('error', reject)
.pipe(
createWriteStream(args[1])
.on('error', reject)
.on('finish', resolve)
)
})
}

View File

@@ -1,19 +1,44 @@
#!/usr/bin/env node
import execPromise from 'exec-promise'
import { RemoteHandlerLocal } from '@nraynaud/xo-fs'
import { resolve } from 'path'
import Vhd from './vhd'
import commands from './commands'
execPromise(async args => {
const vhd = new Vhd(
new RemoteHandlerLocal({ url: 'file:///' }),
resolve(args[0])
function runCommand (commands, [command, ...args]) {
if (command === undefined || command === '-h' || command === '--help') {
command = 'help'
}
const fn = commands[command]
if (fn === undefined) {
if (command === 'help') {
return `Usage:
${Object.keys(commands)
.filter(command => command !== 'help')
.map(command => ` ${this.command} ${command}`)
.join('\n\n')}`
}
throw `invalid command ${command}` // eslint-disable-line no-throw-literal
}
return fn.call(
{
__proto__: this,
command: `${this.command} ${command}`,
},
args
)
}
await vhd.readHeaderAndFooter()
console.log(vhd._header)
console.log(vhd._footer)
})
execPromise(
runCommand.bind(
{
command: 'vhd-cli',
runCommand,
},
commands
)
)

View File

@@ -0,0 +1,28 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { fromCallback as pFromCallback } from 'promise-toolbox'
import command from './commands/info'
const initialDir = process.cwd()
jest.setTimeout(10000)
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('can run the command', async () => {
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1G'])
await command(['empty.vhd'])
})

View File

@@ -1,461 +0,0 @@
import assert from 'assert'
import fu from 'struct-fu'
import { dirname } from 'path'
// ===================================================================
//
// Spec:
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
//
// C implementation:
// https://github.com/rubiojr/vhd-util-convert
//
// ===================================================================
/* eslint-disable no-unused-vars */
const HARD_DISK_TYPE_DIFFERENCING = 4
const HARD_DISK_TYPE_DYNAMIC = 3
const HARD_DISK_TYPE_FIXED = 2
const PLATFORM_CODE_NONE = 0
export const SECTOR_SIZE = 512
/* eslint-enable no-unused vars */
// ===================================================================
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
fu.struct('dataOffset', [
fu.uint32('high'), // 16
fu.uint32('low'), // 20
]),
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
fu.struct('originalSize', [
// At the creation, current size of the hard disk.
fu.uint32('high'), // 40
fu.uint32('low'), // 44
]),
fu.struct('currentSize', [
// Current size of the virtual disk. At the creation: currentSize = originalSize.
fu.uint32('high'), // 48
fu.uint32('low'), // 52
]),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85
fu.byte('reserved', 426), // 86
])
const FOOTER_SIZE = fuFooter.size
const fuHeader = fu.struct([
fu.char('cookie', 8),
fu.struct('dataOffset', [fu.uint32('high'), fu.uint32('low')]),
fu.struct('tableOffset', [
// Absolute byte offset of the Block Allocation Table.
fu.uint32('high'),
fu.uint32('low'),
]),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size (without bitmap) in bytes.
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.byte('reserved1', 4),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
fu.struct('platformDataOffset', [
// Absolute byte offset of the locator data.
fu.uint32('high'),
fu.uint32('low'),
]),
],
8
),
fu.byte('reserved2', 256),
])
const HEADER_SIZE = fuHeader.size
// ===================================================================
// Helpers
// ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint32ToUint64 = fu => fu.high * SIZE_OF_32_BITS + fu.low
// Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
// bytes[] bit manipulation
const testBit = (map, bit) => map[bit >> 3] & (1 << (bit & 7))
const setBit = (map, bit) => {
map[bit >> 3] |= 1 << (bit & 7)
}
const unsetBit = (map, bit) => {
map[bit >> 3] &= ~(1 << (bit & 7))
}
const addOffsets = (...offsets) =>
offsets.reduce(
(a, b) =>
b == null
? a
: typeof b === 'object'
? { bytes: a.bytes + b.bytes, bits: a.bits + b.bits }
: { bytes: a.bytes + b, bits: a.bits },
{ bytes: 0, bits: 0 }
)
const pack = (field, value, buf, offset) => {
field.pack(value, buf, addOffsets(field.offset, offset))
}
const unpack = (field, buf, offset) =>
field.unpack(buf, addOffsets(field.offset, offset))
// ===================================================================
const streamToNewBuffer = stream =>
new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
const streamToExistingBuffer = (
stream,
buffer,
offset = 0,
end = buffer.length
) =>
new Promise((resolve, reject) => {
assert(offset >= 0)
assert(end > offset)
assert(end <= buffer.length)
let i = offset
const onData = chunk => {
const prev = i
i += chunk.length
if (i > end) {
return onError(new Error('too much data'))
}
chunk.copy(buffer, prev)
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(i - offset)
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
// ===================================================================
// Returns the checksum of a raw struct.
const computeChecksum = (struct, buf, offset = 0) => {
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumField = struct.fields.checksum
const checksumOffset = offset + checksumField.offset
for (let i = offset, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = offset + struct.size;
i < n;
++i
) {
sum += buf[i]
}
return ~sum >>> 0
}
const verifyChecksum = (struct, buf, offset) =>
unpack(struct.fields.checksum, buf, offset) ===
computeChecksum(struct, buf, offset)
const getParentLocatorSize = parentLocatorEntry => {
const { platformDataSpace } = parentLocatorEntry
if (platformDataSpace < SECTOR_SIZE) {
return platformDataSpace * SECTOR_SIZE
}
return platformDataSpace % SECTOR_SIZE === 0 ? platformDataSpace : 0
}
// ===================================================================
// Euclidean division, returns the quotient and the remainder of a / b.
const div = (a, b) => [Math.floor(a / b), a % b]
export default class Vhd {
constructor (handler, path) {
this._handler = handler
this._path = path
this._blockAllocationTable = null
this._blockBitmapSize = null
this._footer = null
this._header = null
this._parent = null
this._sectorsPerBlock = null
}
// Read `length` bytes starting from `begin`.
//
// - if `buffer`: it is filled starting from `offset`, and the
// number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_read (begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
return this._handler
.createReadStream(this._path, {
end: begin + length - 1,
start: begin,
})
.then(
buf
? stream =>
streamToExistingBuffer(
stream,
buf,
offset,
(offset || 0) + length
)
: streamToNewBuffer
)
}
// - if `buffer`: it is filled with 0 starting from `offset`, and
// the number of written bytes is returned;
// - otherwise: a new buffer is allocated and returned.
_zeroes (length, buf, offset = 0) {
if (buf) {
assert(offset >= 0)
assert(length > 0)
const end = offset + length
assert(end <= buf.length)
buf.fill(0, offset, end)
return Promise.resolve(length)
}
return Promise.resolve(Buffer.alloc(length))
}
// Return the position of a block in the VHD or undefined if not found.
_getBlockAddress (block) {
assert(block >= 0)
assert(block < this._header.maxTableEntries)
const blockAddr = this._blockAllocationTable[block]
if (blockAddr !== 0xffffffff) {
return blockAddr * SECTOR_SIZE
}
}
// -----------------------------------------------------------------
async readHeaderAndFooter () {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
if (!verifyChecksum(fuFooter, buf)) {
throw new Error('footer checksum does not match')
}
if (!verifyChecksum(fuHeader, buf, FOOTER_SIZE)) {
throw new Error('header checksum does not match')
}
return this._initMetadata(
unpack(fuHeader, buf, FOOTER_SIZE),
unpack(fuFooter, buf)
)
}
async _initMetadata (header, footer) {
const sectorsPerBlock = header.blockSize / SECTOR_SIZE
assert(sectorsPerBlock % 1 === 0)
// 1 bit per sector, rounded up to full sectors
this._blockBitmapSize =
Math.ceil(sectorsPerBlock / 8 / SECTOR_SIZE) * SECTOR_SIZE
assert(this._blockBitmapSize === SECTOR_SIZE)
this._footer = footer
this._header = header
this.size = uint32ToUint64(this._footer.currentSize)
if (footer.diskType === HARD_DISK_TYPE_DIFFERENCING) {
const parent = new Vhd(
this._handler,
`${dirname(this._path)}/${header.parentUnicodeName}`
)
await parent.readHeaderAndFooter()
await parent.readBlockAllocationTable()
this._parent = parent
}
}
// -----------------------------------------------------------------
async readBlockAllocationTable () {
const { maxTableEntries, tableOffset } = this._header
const fuTable = fu.uint32(maxTableEntries)
this._blockAllocationTable = unpack(
fuTable,
await this._read(uint32ToUint64(tableOffset), fuTable.size)
)
}
// -----------------------------------------------------------------
// read a single sector in a block
async _readBlockSector (block, sector, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
assert(begin + length <= SECTOR_SIZE)
const blockAddr = this._getBlockAddress(block)
const blockBitmapSize = this._blockBitmapSize
const parent = this._parent
if (
blockAddr &&
(!parent || testBit(await this._read(blockAddr, blockBitmapSize), sector))
) {
return this._read(
blockAddr + blockBitmapSize + sector * SECTOR_SIZE + begin,
length,
buf,
offset
)
}
return parent
? parent._readBlockSector(block, sector, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
_readBlock (block, begin, length, buf, offset) {
assert(begin >= 0)
assert(length > 0)
const { blockSize } = this._header
assert(begin + length <= blockSize)
const blockAddr = this._getBlockAddress(block)
const parent = this._parent
if (!blockAddr) {
return parent
? parent._readBlock(block, begin, length, buf, offset)
: this._zeroes(length, buf, offset)
}
if (!parent) {
return this._read(
blockAddr + this._blockBitmapSize + begin,
length,
buf,
offset
)
}
// FIXME: we should read as many sectors in a single pass as
// possible for maximum perf.
const [sector, beginInSector] = div(begin, SECTOR_SIZE)
return this._readBlockSector(
block,
sector,
beginInSector,
Math.min(length, SECTOR_SIZE - beginInSector),
buf,
offset
)
}
read (buf, begin, length = buf.length, offset) {
assert(Buffer.isBuffer(buf))
assert(begin >= 0)
const { size } = this
if (begin >= size) {
return Promise.resolve(0)
}
const { blockSize } = this._header
const [block, beginInBlock] = div(begin, blockSize)
return this._readBlock(
block,
beginInBlock,
Math.min(length, blockSize - beginInBlock, size - begin),
buf,
offset
)
}
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -0,0 +1,56 @@
{
"name": "vhd-lib",
"version": "0.1.2",
"license": "AGPL-3.0",
"description": "Primitives for VHD file handling",
"keywords": [],
"homepage": "https://github.com/vatesfr/xen-orchestra/tree/master/packages/vhd-lib",
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"repository": {
"type": "git",
"url": "https://github.com/vatesfr/xen-orchestra.git"
},
"preferGlobal": true,
"main": "dist/",
"bin": {},
"files": [
"dist/"
],
"engines": {
"node": ">=6"
},
"dependencies": {
"@babel/runtime": "^7.0.0-beta.49",
"async-iterator-to-stream": "^1.0.2",
"from2": "^2.3.0",
"fs-extra": "^6.0.1",
"limit-concurrency-decorator": "^0.4.0",
"promise-toolbox": "^0.9.5",
"struct-fu": "^1.2.0",
"uuid": "^3.0.1"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"@xen-orchestra/fs": "^0.1.0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"execa": "^0.10.0",
"fs-promise": "^2.0.0",
"get-stream": "^3.0.0",
"index-modules": "^0.3.0",
"rimraf": "^2.6.2",
"tmp": "^0.0.33"
},
"scripts": {
"build": "cross-env NODE_ENV=production babel --source-maps --out-dir=dist/ src/",
"clean": "rimraf dist/",
"dev": "cross-env NODE_ENV=development babel --watch --source-maps --out-dir=dist/ src/",
"prebuild": "yarn run clean",
"predev": "yarn run clean",
"prepare": "yarn run build"
}
}

View File

@@ -0,0 +1,7 @@
const MASK = 0x80
export const set = (map, bit) => {
map[bit >> 3] |= MASK >> (bit & 7)
}
export const test = (map, bit) => ((map[bit >> 3] << (bit & 7)) & MASK) !== 0

View File

@@ -0,0 +1,37 @@
import { SECTOR_SIZE } from './_constants'
export default function computeGeometryForSize (size) {
const totalSectors = Math.ceil(size / 512)
let sectorsPerTrackCylinder
let heads
let cylinderTimesHeads
if (totalSectors > 65535 * 16 * 255) {
throw Error('disk is too big')
}
// straight copypasta from the file spec appendix on CHS Calculation
if (totalSectors >= 65535 * 16 * 63) {
sectorsPerTrackCylinder = 255
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
} else {
sectorsPerTrackCylinder = 17
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
heads = Math.floor((cylinderTimesHeads + 1023) / 1024)
if (heads < 4) {
heads = 4
}
if (cylinderTimesHeads >= heads * 1024 || heads > 16) {
sectorsPerTrackCylinder = 31
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
if (cylinderTimesHeads >= heads * 1024) {
sectorsPerTrackCylinder = 63
heads = 16
cylinderTimesHeads = totalSectors / sectorsPerTrackCylinder
}
}
const cylinders = Math.ceil(cylinderTimesHeads / heads)
const actualSize = cylinders * heads * sectorsPerTrackCylinder * SECTOR_SIZE
return { cylinders, heads, sectorsPerTrackCylinder, actualSize }
}

View File

@@ -0,0 +1,30 @@
export const BLOCK_UNUSED = 0xffffffff
// This lib has been extracted from the Xen Orchestra project.
export const CREATOR_APPLICATION = 'xo '
// Sizes in bytes.
export const FOOTER_SIZE = 512
export const HEADER_SIZE = 1024
export const SECTOR_SIZE = 512
export const DEFAULT_BLOCK_SIZE = 0x00200000 // from the spec
export const FOOTER_COOKIE = 'conectix'
export const HEADER_COOKIE = 'cxsparse'
export const DISK_TYPE_FIXED = 2
export const DISK_TYPE_DYNAMIC = 3
export const DISK_TYPE_DIFFERENCING = 4
export const PARENT_LOCATOR_ENTRIES = 8
export const PLATFORM_NONE = 0
export const PLATFORM_WI2R = 0x57693272
export const PLATFORM_WI2K = 0x5769326b
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
export const FILE_FORMAT_VERSION = 1 << 16
export const HEADER_VERSION = 1 << 16

View File

@@ -0,0 +1,56 @@
import { v4 as generateUuid } from 'uuid'
import { checksumStruct, fuFooter, fuHeader } from './_structs'
import {
CREATOR_APPLICATION,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_FIXED,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PLATFORM_WI2K,
} from './_constants'
export function createFooter (
size,
timestamp,
geometry,
dataOffset,
diskType = DISK_TYPE_FIXED
) {
const footer = fuFooter.pack({
cookie: FOOTER_COOKIE,
features: 2,
fileFormatVersion: FILE_FORMAT_VERSION,
dataOffset,
timestamp,
creatorApplication: CREATOR_APPLICATION,
creatorHostOs: PLATFORM_WI2K, // it looks like everybody is using Wi2k
originalSize: size,
currentSize: size,
diskGeometry: geometry,
diskType,
uuid: generateUuid(null, []),
})
checksumStruct(footer, fuFooter)
return footer
}
export function createHeader (
maxTableEntries,
tableOffset = HEADER_SIZE + FOOTER_SIZE,
blockSize = VHD_BLOCK_SIZE_BYTES
) {
const header = fuHeader.pack({
cookie: HEADER_COOKIE,
tableOffset,
headerVersion: HEADER_VERSION,
maxTableEntries,
blockSize,
})
checksumStruct(header, fuHeader)
return header
}

View File

@@ -0,0 +1,121 @@
import assert from 'assert'
import fu from 'struct-fu'
import { FOOTER_SIZE, HEADER_SIZE, PARENT_LOCATOR_ENTRIES } from './_constants'
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const uint64Undefinable = fu.derive(
fu.uint32(2),
number =>
number === undefined
? [0xffffffff, 0xffffffff]
: [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ =>
_[0] === 0xffffffff && _[1] === 0xffffffff
? undefined
: _[0] * SIZE_OF_32_BITS + _[1]
)
export const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
uint64Undefinable('dataOffset'), // offset of the header
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
uint64('originalSize'),
uint64('currentSize'),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85 TODO: should probably be merged in reserved
fu.char('reserved', 426), // 86
])
assert.strictEqual(fuFooter.size, FOOTER_SIZE)
export const fuHeader = fu.struct([
fu.char('cookie', 8),
uint64Undefinable('dataOffset'),
uint64('tableOffset'),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
],
PARENT_LOCATOR_ENTRIES
),
fu.char('reserved2', 256),
])
assert.strictEqual(fuHeader.size, HEADER_SIZE)
export const packField = (field, value, buf) => {
const { offset } = field
field.pack(
value,
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
export const unpackField = (field, buf) => {
const { offset } = field
return field.unpack(
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
export function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
}
sum = ~sum >>> 0
// Write new sum.
packField(checksumField, sum, buf)
return sum
}

View File

@@ -0,0 +1,37 @@
import { dirname, relative } from 'path'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING } from './_constants'
export default async function chain (
parentHandler,
parentPath,
childHandler,
childPath,
force = false
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
if (footer.diskType !== DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = DISK_TYPE_DIFFERENCING
}
await Promise.all([
childVhd.readBlockAllocationTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
await childVhd.setUniqueParentLocator(parentName)
await childVhd.writeHeader()
await childVhd.writeFooter()
}

View File

@@ -0,0 +1,42 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter } from './_createFooterHeader'
export default asyncIteratorToStream(async function * (size, blockParser) {
const geometry = computeGeometryForSize(size)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry
)
let position = 0
function * filePadding (paddingLength) {
if (paddingLength > 0) {
const chunkSize = 1024 * 1024 // 1Mo
for (
let paddingPosition = 0;
paddingPosition + chunkSize < paddingLength;
paddingPosition += chunkSize
) {
yield Buffer.alloc(chunkSize)
}
yield Buffer.alloc(paddingLength % chunkSize)
}
}
let next
while ((next = await blockParser.next()) !== null) {
const paddingLength = next.offsetBytes - position
if (paddingLength < 0) {
throw new Error('Received out of order blocks')
}
yield * filePadding(paddingLength)
yield next.data
position = next.offsetBytes + next.data.length
}
yield * filePadding(actualSize - position)
yield footer
})

View File

@@ -0,0 +1,128 @@
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import computeGeometryForSize from './_computeGeometryForSize'
import { createFooter, createHeader } from './_createFooterHeader'
import {
BLOCK_UNUSED,
DEFAULT_BLOCK_SIZE as VHD_BLOCK_SIZE_BYTES,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { set as setBitmap } from './_bitmap'
const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
/**
* @returns {Array} an array of occupation bitmap, each bit mapping an input block size of bytes
*/
function createBAT (
firstBlockPosition,
blockAddressList,
ratio,
bat,
bitmapSize
) {
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
blockAddressList.forEach(blockPosition => {
assert.strictEqual(blockPosition % SECTOR_SIZE, 0)
const vhdTableIndex = Math.floor(blockPosition / VHD_BLOCK_SIZE_BYTES)
if (bat.readUInt32BE(vhdTableIndex * 4) === BLOCK_UNUSED) {
bat.writeUInt32BE(currentVhdPositionSector, vhdTableIndex * 4)
currentVhdPositionSector +=
(bitmapSize + VHD_BLOCK_SIZE_BYTES) / SECTOR_SIZE
}
})
}
export default asyncIteratorToStream(async function * (
diskSize,
incomingBlockSize,
blockAddressList,
blockIterator
) {
const ratio = VHD_BLOCK_SIZE_BYTES / incomingBlockSize
if (ratio % 1 !== 0) {
throw new Error(
`Can't import file, grain size (${incomingBlockSize}) is not a divider of VHD block size ${VHD_BLOCK_SIZE_BYTES}`
)
}
if (ratio > 53) {
throw new Error(
`Can't import file, grain size / block size ratio is > 53 (${ratio})`
)
}
const maxTableEntries = Math.ceil(diskSize / VHD_BLOCK_SIZE_BYTES) + 1
const tablePhysicalSizeBytes =
Math.ceil(maxTableEntries * 4 / SECTOR_SIZE) * SECTOR_SIZE
const batPosition = FOOTER_SIZE + HEADER_SIZE
const firstBlockPosition = batPosition + tablePhysicalSizeBytes
const geometry = computeGeometryForSize(diskSize)
const actualSize = geometry.actualSize
const footer = createFooter(
actualSize,
Math.floor(Date.now() / 1000),
geometry,
FOOTER_SIZE,
DISK_TYPE_DYNAMIC
)
const header = createHeader(
maxTableEntries,
batPosition,
VHD_BLOCK_SIZE_BYTES
)
const bitmapSize =
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
createBAT(firstBlockPosition, blockAddressList, ratio, bat, bitmapSize)
let position = 0
function * yieldAndTrack (buffer, expectedPosition) {
if (expectedPosition !== undefined) {
assert.strictEqual(position, expectedPosition)
}
if (buffer.length > 0) {
yield buffer
position += buffer.length
}
}
async function * generateFileContent (blockIterator, bitmapSize, ratio) {
let currentBlock = -1
let currentVhdBlockIndex = -1
let currentBlockWithBitmap = Buffer.alloc(0)
for await (const next of blockIterator) {
currentBlock++
assert.strictEqual(blockAddressList[currentBlock], next.offsetBytes)
const batIndex = Math.floor(next.offsetBytes / VHD_BLOCK_SIZE_BYTES)
if (batIndex !== currentVhdBlockIndex) {
if (currentVhdBlockIndex >= 0) {
yield * yieldAndTrack(
currentBlockWithBitmap,
bat.readUInt32BE(currentVhdBlockIndex * 4) * SECTOR_SIZE
)
}
currentBlockWithBitmap = Buffer.alloc(bitmapSize + VHD_BLOCK_SIZE_BYTES)
currentVhdBlockIndex = batIndex
}
const blockOffset =
(next.offsetBytes / SECTOR_SIZE) % VHD_BLOCK_SIZE_SECTORS
for (let bitPos = 0; bitPos < VHD_BLOCK_SIZE_SECTORS / ratio; bitPos++) {
setBitmap(currentBlockWithBitmap, blockOffset + bitPos)
}
next.data.copy(
currentBlockWithBitmap,
bitmapSize + next.offsetBytes % VHD_BLOCK_SIZE_BYTES
)
}
yield * yieldAndTrack(currentBlockWithBitmap)
}
yield * yieldAndTrack(footer, 0)
yield * yieldAndTrack(header, FOOTER_SIZE)
yield * yieldAndTrack(bat, FOOTER_SIZE + HEADER_SIZE)
yield * generateFileContent(blockIterator, bitmapSize, ratio)
yield * yieldAndTrack(footer)
})

View File

@@ -0,0 +1,153 @@
import asyncIteratorToStream from 'async-iterator-to-stream'
import { dirname, resolve } from 'path'
import Vhd from './vhd'
import {
BLOCK_UNUSED,
DISK_TYPE_DYNAMIC,
FOOTER_SIZE,
HEADER_SIZE,
SECTOR_SIZE,
} from './_constants'
import { fuFooter, fuHeader, checksumStruct } from './_structs'
import { test as mapTestBit } from './_bitmap'
const resolveRelativeFromFile = (file, path) =>
resolve('/', dirname(file), path).slice(1)
export default asyncIteratorToStream(function * (handler, path) {
const fds = []
try {
const vhds = []
while (true) {
const fd = yield handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
vhds.push(vhd)
yield vhd.readHeaderAndFooter()
yield vhd.readBlockAllocationTable()
if (vhd.footer.diskType === DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// this is the root VHD
const rootVhd = vhds[nVhds - 1]
// data of our synthetic VHD
// TODO: set parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: FOOTER_SIZE + HEADER_SIZE,
parentTimestamp: rootVhd.header.parentTimestamp,
parentUnicodeName: rootVhd.header.parentUnicodeName,
parentUuid: rootVhd.header.parentUuid,
}
const bat = Buffer.allocUnsafe(vhd.batSize)
let footer = {
...vhd.footer,
dataOffset: FOOTER_SIZE,
diskType: rootVhd.footer.diskType,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock = sectorsPerBlockData + vhd.bitmapSize / SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
for (
let iBlock = 0,
blockOffset = Math.ceil(
(header.tableOffset + bat.length) / SECTOR_SIZE
);
iBlock < nBlocks;
++iBlock
) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
// TODO: for generic usage the bitmap needs to be properly computed for each block
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function * (iVhd, i, n) {
const vhd = vhds[iVhd]
const isRootVhd = vhd === rootVhd
if (!vhd.containsBlock(iBlock)) {
if (isRootVhd) {
yield Buffer.alloc((n - i) * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, i, n)
}
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (isRootVhd) {
yield data.slice(i * SECTOR_SIZE, n * SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * SECTOR_SIZE, i * SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield * emitBlockSectors(owner, 0, sectorsPerBlockData)
}
yield footer
} finally {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
console.warn('createReadStream, closeFd', i, error)
})
}
}
})

View File

@@ -0,0 +1,8 @@
export { default } from './vhd'
export { default as chainVhd } from './chain'
export { default as createReadableRawStream } from './createReadableRawStream'
export {
default as createReadableSparseStream,
} from './createReadableSparseStream'
export { default as createSyntheticStream } from './createSyntheticStream'
export { default as mergeVhd } from './merge'

View File

@@ -2,25 +2,25 @@
import execa from 'execa'
import fs from 'fs-extra'
import getStream from 'get-stream'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { getHandler } from '@xen-orchestra/fs'
import { randomBytes } from 'crypto'
import { fromEvent } from 'promise-toolbox'
import { fromEvent, fromCallback as pFromCallback } from 'promise-toolbox'
import LocalHandler from './remote-handlers/local'
import vhdMerge, {
chainVhd,
createReadStream,
Vhd,
VHD_SECTOR_SIZE,
} from './vhd-merge'
import { pFromCallback, streamToBuffer, tmpDir } from './utils'
import chainVhd from './chain'
import createReadStream from './createSyntheticStream'
import Vhd from './vhd'
import vhdMerge from './merge'
import { SECTOR_SIZE } from './_constants'
const initialDir = process.cwd()
jest.setTimeout(10000)
jest.setTimeout(60000)
beforeEach(async () => {
const dir = await tmpDir()
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
@@ -57,11 +57,11 @@ test('blocks can be moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd._freeFirstBlockSpace(8000000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
@@ -70,20 +70,18 @@ test('blocks can be moved', async () => {
})
test('the BAT MSB is not used for sign', async () => {
const randomBuffer = await pFromCallback(cb =>
randomBytes(VHD_SECTOR_SIZE, cb)
)
const randomBuffer = await pFromCallback(cb => randomBytes(SECTOR_SIZE, cb))
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', '1.8T'])
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const vhd = new Vhd(handler, 'empty.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
await vhd.readBlockAllocationTable()
// we want the bit 31 to be on, to prove it's not been used for sign
const hugeWritePositionSectors = Math.pow(2, 31) + 200
await vhd.writeData(hugeWritePositionSectors, randomBuffer)
await checkFile('empty.vhd')
// here we are moving the first sector very far in the VHD to prove the BAT doesn't use signed int32
const hugePositionBytes = hugeWritePositionSectors * VHD_SECTOR_SIZE
const hugePositionBytes = hugeWritePositionSectors * SECTOR_SIZE
await vhd._freeFirstBlockSpace(hugePositionBytes)
// we recover the data manually for speed reasons.
@@ -93,7 +91,7 @@ test('the BAT MSB is not used for sign', async () => {
try {
const vhd2 = new Vhd(handler, 'empty.vhd')
await vhd2.readHeaderAndFooter()
await vhd2.readBlockTable()
await vhd2.readBlockAllocationTable()
for (let i = 0; i < vhd.header.maxTableEntries; i++) {
const entry = vhd._getBatEntry(i)
if (entry !== 0xffffffff) {
@@ -110,7 +108,7 @@ test('the BAT MSB is not used for sign', async () => {
} finally {
fs.close(recoveredFile)
}
const recovered = await streamToBuffer(
const recovered = await getStream.buffer(
await fs.createReadStream('recovered', {
start: hugePositionBytes,
end: hugePositionBytes + randomBuffer.length - 1,
@@ -124,11 +122,11 @@ test('writeData on empty file', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(0, randomData)
await recoverRawContent('empty.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(randomData)
@@ -139,11 +137,11 @@ test('writeData in 2 non-overlaping operations', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
const splitPointSectors = 2
await newVhd.writeData(0, randomData.slice(0, splitPointSectors * 512))
await newVhd.writeData(
@@ -159,11 +157,11 @@ test('writeData in 2 overlaping operations', async () => {
await createRandomFile('randomfile', mbOfRandom)
await execa('qemu-img', ['create', '-fvpc', 'empty.vhd', mbOfRandom + 'M'])
const randomData = await fs.readFile('randomfile')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'empty.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
const endFirstWrite = 3
const startSecondWrite = 2
await newVhd.writeData(0, randomData.slice(0, endFirstWrite * 512))
@@ -179,11 +177,11 @@ test('BAT can be extended and blocks moved', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler.getSize('randomfile')
const newVhd = new Vhd(handler, 'randomfile.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.ensureBatSize(2000)
await recoverRawContent('randomfile.vhd', 'recovered', originalSize)
expect(await fs.readFile('recovered')).toEqual(
@@ -203,7 +201,7 @@ test('coalesce works with empty parent files', async () => {
])
await checkFile('randomfile.vhd')
await checkFile('empty.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd', true)
await checkFile('randomfile.vhd')
@@ -226,11 +224,11 @@ test('coalesce works in normal cases', async () => {
mbOfRandom + 1 + 'M',
])
await convertFromRawToVhd('randomfile', 'child1.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
await execa('vhd-util', ['snapshot', '-n', 'child2.vhd', '-p', 'child1.vhd'])
const vhd = new Vhd(handler, 'child2.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
await vhd.readBlockAllocationTable()
vhd.footer.creatorApplication = 'xoa'
await vhd.writeFooter()
@@ -242,7 +240,7 @@ test('coalesce works in normal cases', async () => {
const smallRandom = await fs.readFile('small_randomfile')
const newVhd = new Vhd(handler, 'child2.vhd')
await newVhd.readHeaderAndFooter()
await newVhd.readBlockTable()
await newVhd.readBlockAllocationTable()
await newVhd.writeData(5, smallRandom)
await checkFile('child2.vhd')
await checkFile('child1.vhd')
@@ -261,7 +259,7 @@ test('coalesce works in normal cases', async () => {
await execa('cp', ['randomfile', 'randomfile2'])
const fd = await fs.open('randomfile2', 'r+')
try {
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * VHD_SECTOR_SIZE)
await fs.write(fd, smallRandom, 0, smallRandom.length, 5 * SECTOR_SIZE)
} finally {
await fs.close(fd)
}
@@ -270,15 +268,16 @@ test('coalesce works in normal cases', async () => {
)
})
test('createReadStream passes vhd-util check', async () => {
test('createSyntheticStream passes vhd-util check', async () => {
const initalSize = 4
await createRandomFile('randomfile', initalSize)
await convertFromRawToVhd('randomfile', 'randomfile.vhd')
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const handler = getHandler({ url: 'file://' + process.cwd() })
const stream = createReadStream(handler, 'randomfile.vhd')
await fromEvent(
stream.pipe(await fs.createWriteStream('recovered.vhd')),
'finish'
)
await checkFile('recovered.vhd')
await execa('qemu-img', ['compare', 'recovered.vhd', 'randomfile'])
})

View File

@@ -0,0 +1,77 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import concurrency from 'limit-concurrency-decorator'
import Vhd from './vhd'
import { DISK_TYPE_DIFFERENCING, DISK_TYPE_DYNAMIC } from './_constants'
// Merge vhd child into vhd parent.
export default concurrency(2)(async function merge (
parentHandler,
parentPath,
childHandler,
childPath
) {
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
const parentDiskType = parentVhd.footer.diskType
assert(
parentDiskType === DISK_TYPE_DIFFERENCING ||
parentDiskType === DISK_TYPE_DYNAMIC
)
assert.strictEqual(childVhd.footer.diskType, DISK_TYPE_DIFFERENCING)
// Read allocation table of child/parent.
await Promise.all([
parentVhd.readBlockAllocationTable(),
childVhd.readBlockAllocationTable(),
])
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
let mergedDataSize = 0
for (
let blockId = 0;
blockId < childVhd.header.maxTableEntries;
blockId++
) {
if (childVhd.containsBlock(blockId)) {
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
}
} finally {
await parentHandler.closeFile(parentFd)
}
})

View File

@@ -0,0 +1,134 @@
/* eslint-env jest */
import execa from 'execa'
import rimraf from 'rimraf'
import tmp from 'tmp'
import { createWriteStream, readFile } from 'fs-promise'
import { fromCallback as pFromCallback, fromEvent } from 'promise-toolbox'
import { createFooter } from './_createFooterHeader'
import createReadableRawVHDStream from './createReadableRawStream'
import createReadableSparseVHDStream from './createReadableSparseStream'
const initialDir = process.cwd()
beforeEach(async () => {
const dir = await pFromCallback(cb => tmp.dir(cb))
process.chdir(dir)
})
afterEach(async () => {
const tmpDir = process.cwd()
process.chdir(initialDir)
await pFromCallback(cb => rimraf(tmpDir, cb))
})
test('createFooter() does not crash', () => {
createFooter(104448, Math.floor(Date.now() / 1000), {
cylinders: 3,
heads: 4,
sectorsPerTrack: 17,
})
})
test('ReadableRawVHDStream does not crash', async () => {
const data = [
{
offsetBytes: 100,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 700,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
const fileSize = 1000
const stream = createReadableRawVHDStream(fileSize, mockParser)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
})
test('ReadableRawVHDStream detects when blocks are out of order', async () => {
const data = [
{
offsetBytes: 700,
data: Buffer.from('azerzaerazeraze', 'ascii'),
},
{
offsetBytes: 100,
data: Buffer.from('gdfslkdfguer', 'ascii'),
},
]
let index = 0
const mockParser = {
next: () => {
if (index < data.length) {
const result = data[index]
index++
return result
} else {
return null
}
},
}
return expect(
new Promise((resolve, reject) => {
const stream = createReadableRawVHDStream(100000, mockParser)
stream.on('error', reject)
const pipe = stream.pipe(createWriteStream('outputStream'))
pipe.on('finish', resolve)
pipe.on('error', reject)
})
).rejects.toThrow('Received out of order blocks')
})
test('ReadableSparseVHDStream can handle a sparse file', async () => {
const blockSize = Math.pow(2, 16)
const blocks = [
{
offsetBytes: blockSize * 3,
data: Buffer.alloc(blockSize, 'azerzaerazeraze', 'ascii'),
},
{
offsetBytes: blockSize * 100,
data: Buffer.alloc(blockSize, 'gdfslkdfguer', 'ascii'),
},
]
const fileSize = blockSize * 110
const stream = createReadableSparseVHDStream(
fileSize,
blockSize,
blocks.map(b => b.offsetBytes),
blocks
)
const pipe = stream.pipe(createWriteStream('output.vhd'))
await fromEvent(pipe, 'finish')
await execa('vhd-util', ['check', '-t', '-i', '-n', 'output.vhd'])
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-O',
'raw',
'output.vhd',
'out1.raw',
])
const out1 = await readFile('out1.raw')
const expected = Buffer.alloc(fileSize)
blocks.forEach(b => {
b.data.copy(expected, b.offsetBytes)
})
await expect(out1.slice(0, expected.length)).toEqual(expected)
})

624
packages/vhd-lib/src/vhd.js Normal file
View File

@@ -0,0 +1,624 @@
import assert from 'assert'
import { fromEvent } from 'promise-toolbox'
import constantStream from './_constant-stream'
import { fuFooter, fuHeader, checksumStruct, unpackField } from './_structs'
import { set as mapSetBit, test as mapTestBit } from './_bitmap'
import {
BLOCK_UNUSED,
DISK_TYPE_DIFFERENCING,
DISK_TYPE_DYNAMIC,
FILE_FORMAT_VERSION,
FOOTER_COOKIE,
FOOTER_SIZE,
HEADER_COOKIE,
HEADER_SIZE,
HEADER_VERSION,
PARENT_LOCATOR_ENTRIES,
PLATFORM_NONE,
PLATFORM_W2KU,
SECTOR_SIZE,
} from './_constants'
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG
? str => console.log(`[vhd-merge]${str}`)
: () => null
// ===================================================================
//
// Spec:
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
//
// C implementation:
// https://github.com/rubiojr/vhd-util-convert
//
// ===================================================================
const computeBatSize = entries =>
sectorsToBytes(sectorsRoundUpNoZero(entries * 4))
// Sectors conversions.
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * SECTOR_SIZE
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
if (actual !== expected) {
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
}
}
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(4)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
// Format:
//
// 1. Footer (512)
// 2. Header (1024)
// 3. Unordered entries
// - BAT (batSize @ header.tableOffset)
// - Blocks (@ blockOffset(i))
// - bitmap (blockBitmapSize)
// - data (header.blockSize)
// - Parent locators (parentLocatorSize(i) @ parentLocatorOffset(i))
// 4. Footer (512 @ vhdSize - 512)
//
// Variables:
//
// - batSize = min(1, ceil(header.maxTableEntries * 4 / sectorSize)) * sectorSize
// - blockBitmapSize = ceil(header.blockSize / sectorSize / 8 / sectorSize) * sectorSize
// - blockOffset(i) = bat[i] * sectorSize
// - nBlocks = ceil(footer.currentSize / header.blockSize)
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512
export default class Vhd {
get batSize () {
return computeBatSize(this.header.maxTableEntries)
}
constructor (handler, path) {
this._handler = handler
this._path = path
}
// =================================================================
// Read functions.
// =================================================================
async _read (start, n) {
const { bytesRead, buffer } = await this._handler.read(
this._path,
Buffer.alloc(n),
start
)
assert.equal(bytesRead, n)
return buffer
}
containsBlock (id) {
return this._getBatEntry(id) !== BLOCK_UNUSED
}
// Returns the first address after metadata. (In bytes)
getEndOfHeaders () {
const { header } = this
let end = FOOTER_SIZE + HEADER_SIZE
// Max(end, block allocation table end)
end = Math.max(end, header.tableOffset + this.batSize)
for (let i = 0; i < PARENT_LOCATOR_ENTRIES; i++) {
const entry = header.parentLocatorEntry[i]
if (entry.platformCode !== PLATFORM_NONE) {
end = Math.max(
end,
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
)
}
}
debug(`End of headers: ${end}.`)
return end
}
// Returns the first sector after data.
getEndOfData () {
let end = Math.ceil(this.getEndOfHeaders() / SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
for (let i = 0; i < maxTableEntries; i++) {
const blockAddr = this._getBatEntry(i)
if (blockAddr !== BLOCK_UNUSED) {
end = Math.max(end, blockAddr + fullBlockSize)
}
}
debug(`End of data: ${end}.`)
return sectorsToBytes(end)
}
// TODO: extract the checks into reusable functions:
// - better human reporting
// - auto repair if possible
async readHeaderAndFooter (checkSecondFooter = true) {
const buf = await this._read(0, FOOTER_SIZE + HEADER_SIZE)
const bufFooter = buf.slice(0, FOOTER_SIZE)
const bufHeader = buf.slice(FOOTER_SIZE)
assertChecksum('footer', bufFooter, fuFooter)
assertChecksum('header', bufHeader, fuHeader)
if (checkSecondFooter) {
const size = await this._handler.getSize(this._path)
assert(
bufFooter.equals(await this._read(size - FOOTER_SIZE, FOOTER_SIZE)),
'footer1 !== footer2'
)
}
const footer = (this.footer = fuFooter.unpack(bufFooter))
assert.strictEqual(footer.cookie, FOOTER_COOKIE, 'footer cookie')
assert.strictEqual(footer.dataOffset, FOOTER_SIZE)
assert.strictEqual(footer.fileFormatVersion, FILE_FORMAT_VERSION)
assert(footer.originalSize <= footer.currentSize)
assert(
footer.diskType === DISK_TYPE_DIFFERENCING ||
footer.diskType === DISK_TYPE_DYNAMIC
)
const header = (this.header = fuHeader.unpack(bufHeader))
assert.strictEqual(header.cookie, HEADER_COOKIE)
assert.strictEqual(header.dataOffset, undefined)
assert.strictEqual(header.headerVersion, HEADER_VERSION)
assert(header.maxTableEntries >= footer.currentSize / header.blockSize)
assert(Number.isInteger(Math.log2(header.blockSize / SECTOR_SIZE)))
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
const sectorsPerBlock = (this.sectorsPerBlock =
header.blockSize / SECTOR_SIZE)
// Compute bitmap size in sectors.
// Default: 1.
const sectorsOfBitmap = (this.sectorsOfBitmap = sectorsRoundUpNoZero(
sectorsPerBlock >> 3
))
// Full block size => data block size + bitmap size.
this.fullBlockSize = sectorsToBytes(sectorsPerBlock + sectorsOfBitmap)
// In bytes.
// Default: 512.
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
}
// Returns a buffer that contains the block allocation table of a vhd file.
async readBlockAllocationTable () {
const { header } = this
this.blockTable = await this._read(
header.tableOffset,
header.maxTableEntries * 4
)
}
// return the first sector (bitmap) of a block
_getBatEntry (block) {
return this.blockTable.readUInt32BE(block * 4)
}
_readBlock (blockId, onlyBitmap = false) {
const blockAddr = this._getBatEntry(blockId)
if (blockAddr === BLOCK_UNUSED) {
throw new Error(`no such block ${blockId}`)
}
return this._read(
sectorsToBytes(blockAddr),
onlyBitmap ? this.bitmapSize : this.fullBlockSize
).then(
buf =>
onlyBitmap
? { id: blockId, bitmap: buf }
: {
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
)
}
// get the identifiers and first sectors of the first and last block
// in the file
//
_getFirstAndLastBlocks () {
const n = this.header.maxTableEntries
const bat = this.blockTable
let i = 0
let j = 0
let first, firstSector, last, lastSector
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += 4
if (i === n) {
const error = new Error('no allocated block found')
error.noBlock = true
throw error
}
}
lastSector = firstSector
first = last = i
while (i < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += 4
}
return { first, firstSector, last, lastSector }
}
// =================================================================
// Write functions.
// =================================================================
// Write a buffer/stream at a given position in a vhd file.
async _write (data, offset) {
debug(
`_write offset=${offset} size=${
Buffer.isBuffer(data) ? data.length : '???'
}`
)
// TODO: could probably be merged in remote handlers.
const stream = await this._handler.createOutputStream(this._path, {
flags: 'r+',
start: offset,
})
return Buffer.isBuffer(data)
? new Promise((resolve, reject) => {
stream.on('error', reject)
stream.end(data, resolve)
})
: fromEvent(data.pipe(stream), 'finish')
}
async _freeFirstBlockSpace (spaceNeededBytes) {
try {
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / SECTOR_SIZE
)
if (
tableOffset + batSize + spaceNeededBytes >=
sectorsToBytes(firstSector)
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / SECTOR_SIZE,
newMinSector
)
debug(
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
)
// copy the first block at the end
const block = await this._read(
sectorsToBytes(firstSector),
fullBlockSize
)
await this._write(block, sectorsToBytes(newFirstSector))
await this._setBatEntry(first, newFirstSector)
await this.writeFooter(true)
spaceNeededBytes -= this.fullBlockSize
if (spaceNeededBytes > 0) {
return this._freeFirstBlockSpace(spaceNeededBytes)
}
}
} catch (e) {
if (!e.noBlock) {
throw e
}
}
}
async ensureBatSize (entries) {
const { header } = this
const prevMaxTableEntries = header.maxTableEntries
if (prevMaxTableEntries >= entries) {
return
}
const newBatSize = computeBatSize(entries)
await this._freeFirstBlockSpace(newBatSize - this.batSize)
const maxTableEntries = (header.maxTableEntries = entries)
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * 4)
debug(
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
await this._write(
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
header.tableOffset + prevBat.length
)
await this.writeHeader()
}
// set the first sector (bitmap) of a block
_setBatEntry (block, blockSector) {
const i = block * 4
const { blockTable } = this
blockTable.writeUInt32BE(blockSector, i)
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
}
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async createBlock (blockId) {
const blockAddr = Math.ceil(this.getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(
constantStream([0], this.fullBlockSize),
sectorsToBytes(blockAddr)
),
this._setBatEntry(blockId, blockAddr),
])
return blockAddr
}
// Write a bitmap at a block address.
async writeBlockBitmap (blockAddr, bitmap) {
const { bitmapSize } = this
if (bitmap.length !== bitmapSize) {
throw new Error(`Bitmap length is not correct ! ${bitmap.length}`)
}
const offset = sectorsToBytes(blockAddr)
debug(
`Write bitmap at: ${offset}. (size=${bitmapSize}, data=${bitmap.toString(
'hex'
)})`
)
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async writeEntireBlock (block) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id)
}
await this._write(block.buffer, sectorsToBytes(blockAddr))
}
async writeBlockSectors (block, beginSectorId, endSectorId, parentBitmap) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id)
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
} else if (parentBitmap === undefined) {
parentBitmap = (await this._readBlock(block.id, true)).bitmap
}
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
debug(
`writeBlockSectors at ${offset} block=${
block.id
}, sectors=${beginSectorId}...${endSectorId}`
)
for (let i = beginSectorId; i < endSectorId; ++i) {
mapSetBit(parentBitmap, i)
}
await this.writeBlockBitmap(blockAddr, parentBitmap)
await this._write(
block.data.slice(
sectorsToBytes(beginSectorId),
sectorsToBytes(endSectorId)
),
sectorsToBytes(offset)
)
}
async coalesceBlock (child, blockId) {
const block = await child._readBlock(blockId)
const { bitmap, data } = block
debug(`coalesceBlock block=${blockId}`)
// For each sector of block data...
const { sectorsPerBlock } = child
let parentBitmap = null
for (let i = 0; i < sectorsPerBlock; i++) {
// If no changes on one sector, skip.
if (!mapTestBit(bitmap, i)) {
continue
}
let endSector = i + 1
// Count changed sectors.
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
++endSector
}
// Write n sectors into parent.
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this._readBlock(blockId, true)).bitmap
}
await this.writeBlockSectors(block, i, endSector, parentBitmap)
}
i = endSector
}
// Return the merged data size
return data.length
}
// Write a context footer. (At the end and beginning of a vhd file.)
async writeFooter (onlyEndFooter = false) {
const { footer } = this
const rawFooter = fuFooter.pack(footer)
const eof = await this._handler.getSize(this._path)
// sometimes the file is longer than anticipated, we still need to put the footer at the end
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
footer.checksum = checksumStruct(rawFooter, fuFooter)
debug(
`Write footer at: ${offset} (checksum=${
footer.checksum
}). (data=${rawFooter.toString('hex')})`
)
if (!onlyEndFooter) {
await this._write(rawFooter, 0)
}
await this._write(rawFooter, offset)
}
writeHeader () {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
const offset = FOOTER_SIZE
debug(
`Write header at: ${offset} (checksum=${
header.checksum
}). (data=${rawHeader.toString('hex')})`
)
return this._write(rawHeader, offset)
}
async writeData (offsetSectors, buffer) {
const bufferSizeSectors = Math.ceil(buffer.length / SECTOR_SIZE)
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
const endBufferSectors = offsetSectors + bufferSizeSectors
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
await this.ensureBatSize(lastBlock)
const blockSizeBytes = this.sectorsPerBlock * SECTOR_SIZE
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
for (
let currentBlock = startBlock;
currentBlock <= lastBlock;
currentBlock++
) {
const offsetInBlockSectors = Math.max(
0,
offsetSectors - currentBlock * this.sectorsPerBlock
)
const endInBlockSectors = Math.min(
endBufferSectors - currentBlock * this.sectorsPerBlock,
this.sectorsPerBlock
)
const startInBuffer = Math.max(
0,
(currentBlock * this.sectorsPerBlock - offsetSectors) * SECTOR_SIZE
)
const endInBuffer = Math.min(
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
SECTOR_SIZE,
buffer.length
)
let inputBuffer
if (coversWholeBlock(offsetInBlockSectors, endInBlockSectors)) {
inputBuffer = buffer.slice(startInBuffer, endInBuffer)
} else {
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
buffer.copy(
inputBuffer,
offsetInBlockSectors * SECTOR_SIZE,
startInBuffer,
endInBuffer
)
}
await this.writeBlockSectors(
{ id: currentBlock, data: inputBuffer },
offsetInBlockSectors,
endInBlockSectors
)
}
await this.writeFooter()
}
async ensureSpaceForParentLocators (neededSectors) {
const firstLocatorOffset = FOOTER_SIZE + HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / SECTOR_SIZE) -
firstLocatorOffset / SECTOR_SIZE
if (currentSpace < neededSectors) {
const deltaSectors = neededSectors - currentSpace
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
this.header.tableOffset += sectorsToBytes(deltaSectors)
await this._write(this.blockTable, this.header.tableOffset)
}
return firstLocatorOffset
}
async setUniqueParentLocator (fileNameString) {
const { header } = this
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(fileNameString, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / SECTOR_SIZE)
const position = await this.ensureSpaceForParentLocators(dataSpaceSectors)
await this._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace =
dataSpaceSectors * SECTOR_SIZE
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = PLATFORM_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
}
}

View File

@@ -4,7 +4,7 @@ process.env.DEBUG = '*'
const defer = require('golike-defer').default
const pump = require('pump')
const { fromCallback } = require('promise-toolbox')
const { CancelToken, fromCallback } = require('promise-toolbox')
const { createClient } = require('../')
@@ -30,8 +30,11 @@ defer(async ($defer, args) => {
await xapi.connect()
$defer(() => xapi.disconnect())
const { cancel, token } = CancelToken.source()
process.on('SIGINT', cancel)
// https://xapi-project.github.io/xen-api/snapshots.html#downloading-a-disk-or-snapshot
const exportStream = await xapi.getResource('/export_raw_vdi/', {
const exportStream = await xapi.getResource(token, '/export_raw_vdi/', {
query: {
format: raw ? 'raw' : 'vhd',
vdi: await resolveRef(xapi, 'VDI', args[1])

View File

@@ -4,7 +4,7 @@ process.env.DEBUG = '*'
const defer = require('golike-defer').default
const pump = require('pump')
const { fromCallback } = require('promise-toolbox')
const { CancelToken, fromCallback } = require('promise-toolbox')
const { createClient } = require('../')
@@ -24,8 +24,11 @@ defer(async ($defer, args) => {
await xapi.connect()
$defer(() => xapi.disconnect())
const { cancel, token } = CancelToken.source()
process.on('SIGINT', cancel)
// https://xapi-project.github.io/xen-api/importexport.html
const exportStream = await xapi.getResource('/export/', {
const exportStream = await xapi.getResource(token, '/export/', {
query: {
ref: await resolveRef(xapi, 'VM', args[1]),
use_compression: 'true'

View File

@@ -3,6 +3,7 @@
process.env.DEBUG = '*'
const defer = require('golike-defer').default
const { CancelToken } = require('promise-toolbox')
const { createClient } = require('../')
@@ -28,8 +29,11 @@ defer(async ($defer, args) => {
await xapi.connect()
$defer(() => xapi.disconnect())
const { cancel, token } = CancelToken.source()
process.on('SIGINT', cancel)
// https://xapi-project.github.io/xen-api/snapshots.html#uploading-a-disk-or-snapshot
await xapi.putResource(createInputStream(args[2]), '/import_raw_vdi/', {
await xapi.putResource(token, createInputStream(args[2]), '/import_raw_vdi/', {
query: {
format: raw ? 'raw' : 'vhd',
vdi: await resolveRef(xapi, 'VDI', args[1])

View File

@@ -3,6 +3,7 @@
process.env.DEBUG = '*'
const defer = require('golike-defer').default
const { CancelToken } = require('promise-toolbox')
const { createClient } = require('../')
@@ -22,8 +23,11 @@ defer(async ($defer, args) => {
await xapi.connect()
$defer(() => xapi.disconnect())
const { cancel, token } = CancelToken.source()
process.on('SIGINT', cancel)
// https://xapi-project.github.io/xen-api/importexport.html
await xapi.putResource(createInputStream(args[1]), '/import/', {
await xapi.putResource(token, createInputStream(args[1]), '/import/', {
query: args[2] && { sr_id: await resolveRef(xapi, 'SR', args[2]) }
})
})(process.argv.slice(2)).catch(

View File

@@ -1,6 +1,6 @@
{
"dependencies": {
"golike-defer": "^0.1.0",
"pump": "^1.0.2"
"golike-defer": "^0.4.1",
"pump": "^3.0.0"
}
}

View File

@@ -0,0 +1,30 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
end-of-stream@^1.1.0:
version "1.4.1"
resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.1.tgz#ed29634d19baba463b6ce6b80a37213eab71ec43"
dependencies:
once "^1.4.0"
golike-defer@^0.4.1:
version "0.4.1"
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.4.1.tgz#7a1cd435d61e461305805d980b133a0f3db4e1cc"
once@^1.3.1, once@^1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
dependencies:
wrappy "1"
pump@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64"
dependencies:
end-of-stream "^1.1.0"
once "^1.3.1"
wrappy@1:
version "1.0.2"
resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f"

View File

@@ -1,6 +1,6 @@
{
"name": "xen-api",
"version": "0.16.8",
"version": "0.16.10",
"license": "ISC",
"description": "Connector to the Xen API",
"keywords": [

View File

@@ -182,20 +182,20 @@ const EMPTY_ARRAY = freezeObject([])
// -------------------------------------------------------------------
const getTaskResult = (task, onSuccess, onFailure) => {
const getTaskResult = task => {
const { status } = task
if (status === 'cancelled') {
return [onFailure(new Cancel('task canceled'))]
return Promise.reject(new Cancel('task canceled'))
}
if (status === 'failure') {
return [onFailure(wrapError(task.error_info))]
return Promise.reject(wrapError(task.error_info))
}
if (status === 'success') {
// the result might be:
// - empty string
// - an opaque reference
// - an XML-RPC value
return [onSuccess(task.result)]
return Promise.resolve(task.result)
}
}
@@ -246,7 +246,7 @@ export class Xapi extends EventEmitter {
objects.getKey = getKey
this._objectsByRefs = createObject(null)
this._objectsByRefs['OpaqueRef:NULL'] = null
this._objectsByRefs['OpaqueRef:NULL'] = undefined
this._taskWatchers = Object.create(null)
@@ -595,7 +595,10 @@ export class Xapi extends EventEmitter {
if (error != null && (response = error.response) != null) {
response.req.abort()
const { headers: { location }, statusCode } = response
const {
headers: { location },
statusCode,
} = response
if (statusCode === 302 && location !== undefined) {
return doRequest(location)
}
@@ -642,11 +645,11 @@ export class Xapi extends EventEmitter {
let watcher = watchers[ref]
if (watcher === undefined) {
// sync check if the task is already settled
const task = this.objects.all[ref]
const task = this._objectsByRefs[ref]
if (task !== undefined) {
const result = getTaskResult(task, Promise.resolve, Promise.reject)
if (result) {
return result[0]
const result = getTaskResult(task)
if (result !== undefined) {
return result
}
}
@@ -777,15 +780,13 @@ export class Xapi extends EventEmitter {
this._pool = object
const eventWatchers = this._eventWatchers
if (eventWatchers !== undefined) {
forEach(object.other_config, (_, key) => {
const eventWatcher = eventWatchers[key]
if (eventWatcher !== undefined) {
delete eventWatchers[key]
eventWatcher(object)
}
})
}
Object.keys(object.other_config).forEach(key => {
const eventWatcher = eventWatchers[key]
if (eventWatcher !== undefined) {
delete eventWatchers[key]
eventWatcher(object)
}
})
} else if (type === 'task') {
if (prev === undefined) {
++this._nTasks
@@ -793,11 +794,12 @@ export class Xapi extends EventEmitter {
const taskWatchers = this._taskWatchers
const taskWatcher = taskWatchers[ref]
if (
taskWatcher !== undefined &&
getTaskResult(object, taskWatcher.resolve, taskWatcher.reject)
) {
delete taskWatchers[ref]
if (taskWatcher !== undefined) {
const result = getTaskResult(object)
if (result !== undefined) {
taskWatcher.resolve(result)
delete taskWatchers[ref]
}
}
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-acl-resolver",
"version": "0.2.3",
"version": "0.2.4",
"license": "ISC",
"description": "Xen-Orchestra internal: do ACLs resolution",
"keywords": [],

View File

@@ -50,7 +50,9 @@ const checkAuthorizationByTypes = {
network: or(checkSelf, checkMember('$pool')),
SR: or(checkSelf, checkMember('$pool')),
PIF: checkMember('$host'),
SR: or(checkSelf, checkMember('$container')),
task: checkMember('$host'),

View File

@@ -28,7 +28,7 @@
"node": ">=6"
},
"dependencies": {
"@babel/polyfill": "7.0.0-beta.44",
"@babel/polyfill": "7.0.0-beta.49",
"bluebird": "^3.5.1",
"chalk": "^2.2.0",
"event-to-promise": "^0.8.0",
@@ -49,10 +49,10 @@
"xo-lib": "^0.9.0"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -25,17 +25,16 @@
"node": ">=4"
},
"dependencies": {
"babel-runtime": "^6.18.0",
"@babel/runtime": "^7.0.0-beta.49",
"kindof": "^2.0.0",
"lodash": "^4.17.2",
"make-error": "^1.0.2"
},
"devDependencies": {
"babel-cli": "^6.24.1",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"@babel/cli": "^7.0.0-beta.49",
"@babel/core": "^7.0.0-beta.49",
"@babel/plugin-transform-runtime": "^7.0.0-beta.49",
"@babel/preset-env": "^7.0.0-beta.49",
"cross-env": "^5.1.3",
"event-to-promise": "^0.8.0",
"rimraf": "^2.6.1"
@@ -46,22 +45,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepublishOnly": "yarn run build"
},
"babel": {
"plugins": [
"lodash",
"transform-runtime"
],
"presets": [
[
"env",
{
"targets": {
"node": 4
}
}
],
"stage-3"
]
}
}

View File

@@ -0,0 +1,3 @@
module.exports = require('../../@xen-orchestra/babel-config')(
require('./package.json')
)

View File

@@ -27,10 +27,10 @@
"lodash": "^4.13.1"
},
"devDependencies": {
"babel-cli": "^6.24.1",
"@babel/cli": "^7.0.0-beta.49",
"@babel/core": "^7.0.0-beta.49",
"@babel/preset-env": "^7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"babel-preset-env": "^1.5.2",
"babel-preset-stage-3": "^6.24.1",
"cross-env": "^5.1.3",
"deep-freeze": "^0.0.1",
"rimraf": "^2.6.1"
@@ -41,22 +41,5 @@
"prebuild": "rimraf dist/",
"predev": "yarn run prebuild",
"prepare": "yarn run build"
},
"babel": {
"plugins": [
"lodash"
],
"presets": [
[
"env",
{
"targets": {
"browsers": "> 5%",
"node": 4
}
}
],
"stage-3"
]
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-auth-saml",
"version": "0.5.1",
"version": "0.5.2",
"license": "AGPL-3.0",
"description": "SAML authentication plugin for XO-Server",
"keywords": [

View File

@@ -52,6 +52,7 @@ class AuthSamlXoPlugin {
new Strategy(this._conf, async (profile, done) => {
const name = profile[this._usernameField]
if (!name) {
console.warn('xo-server-auth-saml:', profile)
done('no name found for this user')
return
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-backup-reports",
"version": "0.10.0",
"version": "0.12.2",
"license": "AGPL-3.0",
"description": "Backup reports plugin for XO-Server",
"keywords": [
@@ -35,6 +35,7 @@
"node": ">=4"
},
"dependencies": {
"babel-runtime": "^6.26.0",
"human-format": "^0.10.0",
"lodash": "^4.13.1",
"moment-timezone": "^0.5.13"
@@ -42,6 +43,7 @@
"devDependencies": {
"babel-cli": "^6.24.1",
"babel-plugin-lodash": "^3.3.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-preset-env": "^1.5.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.1"
@@ -56,7 +58,8 @@
},
"babel": {
"plugins": [
"lodash"
"lodash",
"transform-runtime"
],
"presets": [
[

View File

@@ -1,7 +1,6 @@
import humanFormat from 'human-format'
import moment from 'moment-timezone'
import { forEach, startCase } from 'lodash'
import { forEach, get, startCase } from 'lodash'
import pkg from '../package'
export const configurationSchema = {
@@ -37,13 +36,19 @@ const ICON_FAILURE = '🚨'
const ICON_SKIPPED = '⏩'
const ICON_SUCCESS = '✔'
const STATUS_ICON = {
skipped: ICON_SKIPPED,
success: ICON_SUCCESS,
failure: ICON_FAILURE,
}
const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a'
const createDateFormater = timezone =>
timezone !== undefined
? timestamp =>
moment(timestamp)
.tz(timezone)
.format(DATE_FORMAT)
moment(timestamp)
.tz(timezone)
.format(DATE_FORMAT)
: timestamp => moment(timestamp).format(DATE_FORMAT)
const formatDuration = milliseconds => moment.duration(milliseconds).humanize()
@@ -57,15 +62,18 @@ const formatSize = bytes =>
})
const formatSpeed = (bytes, milliseconds) =>
humanFormat(bytes * 1e3 / milliseconds, {
scale: 'binary',
unit: 'B/s',
})
milliseconds > 0
? humanFormat((bytes * 1e3) / milliseconds, {
scale: 'binary',
unit: 'B/s',
})
: 'N/A'
const logError = e => {
console.error('backup report error:', e)
}
const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern'
const NO_SUCH_OBJECT_ERROR = 'no such object'
const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain'
const UNHEALTHY_VDI_CHAIN_MESSAGE =
@@ -94,14 +102,376 @@ class BackupReportsXoPlugin {
this._xo.removeListener('job:terminated', this._report)
}
_wrapper (status) {
return new Promise(resolve => resolve(this._listener(status))).catch(
logError
)
_wrapper (status, job, schedule, runJobId) {
return new Promise(resolve =>
resolve(
job.type === 'backup'
? this._backupNgListener(status, job, schedule, runJobId)
: this._listener(status, job, schedule, runJobId)
)
).catch(logError)
}
async _backupNgListener (_1, _2, { timezone }, runJobId) {
const xo = this._xo
const log = await xo.getBackupNgLogs(runJobId)
const { reportWhen, mode } = log.data || {}
if (
reportWhen === 'never' ||
(log.status === 'success' && reportWhen === 'failure')
) {
return
}
const jobName = (await xo.getJob(log.jobId, 'backup')).name
const formatDate = createDateFormater(timezone)
if (
(log.status === 'failure' || log.status === 'skipped') &&
log.result !== undefined
) {
let markdown = [
`## Global status: ${log.status}`,
'',
`- **mode**: ${mode}`,
`- **Start time**: ${formatDate(log.start)}`,
`- **End time**: ${formatDate(log.end)}`,
`- **Duration**: ${formatDuration(log.end - log.start)}`,
`- **Error**: ${log.result.message}`,
'---',
'',
`*${pkg.name} v${pkg.version}*`,
]
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${
log.status
} Backup report for ${jobName} ${STATUS_ICON[log.status]}`,
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${
log.status
}] Backup report for ${jobName} - Error : ${log.result.message}`,
})
}
const failedVmsText = []
const skippedVmsText = []
const successfulVmsText = []
const nagiosText = []
let globalMergeSize = 0
let globalTransferSize = 0
let nFailures = 0
let nSkipped = 0
for (const taskLog of log.tasks) {
if (taskLog.status === 'success' && reportWhen === 'failure') {
return
}
const vmId = taskLog.data.id
let vm
try {
vm = xo.getObject(vmId)
} catch (e) {}
const text = [
`### ${vm !== undefined ? vm.name_label : 'VM not found'}`,
'',
`- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`,
`- **Start time**: ${formatDate(taskLog.start)}`,
`- **End time**: ${formatDate(taskLog.end)}`,
`- **Duration**: ${formatDuration(taskLog.end - taskLog.start)}`,
]
const failedSubTasks = []
const snapshotText = []
const srsText = []
const remotesText = []
for (const subTaskLog of taskLog.tasks || []) {
if (
subTaskLog.message !== 'export' &&
subTaskLog.message !== 'snapshot'
) {
continue
}
const icon = STATUS_ICON[subTaskLog.status]
const errorMessage = ` - **Error**: ${get(
subTaskLog.result,
'message'
)}`
if (subTaskLog.message === 'snapshot') {
snapshotText.push(
`- **Snapshot** ${icon}`,
` - **Start time**: ${formatDate(subTaskLog.start)}`,
` - **End time**: ${formatDate(subTaskLog.end)}`
)
} else if (subTaskLog.data.type === 'remote') {
const id = subTaskLog.data.id
const remote = await xo.getRemote(id).catch(() => {})
remotesText.push(
` - **${
remote !== undefined ? remote.name : `Remote Not found`
}** (${id}) ${icon}`,
` - **Start time**: ${formatDate(subTaskLog.start)}`,
` - **End time**: ${formatDate(subTaskLog.end)}`,
` - **Duration**: ${formatDuration(
subTaskLog.end - subTaskLog.start
)}`
)
if (subTaskLog.status === 'failure') {
failedSubTasks.push(remote !== undefined ? remote.name : id)
remotesText.push('', errorMessage)
}
} else {
const id = subTaskLog.data.id
let sr
try {
sr = xo.getObject(id)
} catch (e) {}
const [srName, srUuid] =
sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, id]
srsText.push(
` - **${srName}** (${srUuid}) ${icon}`,
` - **Start time**: ${formatDate(subTaskLog.start)}`,
` - **End time**: ${formatDate(subTaskLog.end)}`,
` - **Duration**: ${formatDuration(
subTaskLog.end - subTaskLog.start
)}`
)
if (subTaskLog.status === 'failure') {
failedSubTasks.push(sr !== undefined ? sr.name_label : id)
srsText.push('', errorMessage)
}
}
forEach(subTaskLog.tasks, operationLog => {
if (
operationLog.message !== 'merge' &&
operationLog.message !== 'transfer'
) {
return
}
const operationInfoText = []
if (operationLog.status === 'success') {
const size = operationLog.result.size
if (operationLog.message === 'merge') {
globalMergeSize += size
} else {
globalTransferSize += size
}
operationInfoText.push(
` - **Size**: ${formatSize(size)}`,
` - **Speed**: ${formatSpeed(
size,
operationLog.end - operationLog.start
)}`
)
} else {
operationInfoText.push(
` - **Error**: ${get(operationLog.result, 'message')}`
)
}
const operationText = [
` - **${operationLog.message}** ${
STATUS_ICON[operationLog.status]
}`,
` - **Start time**: ${formatDate(operationLog.start)}`,
` - **End time**: ${formatDate(operationLog.end)}`,
` - **Duration**: ${formatDuration(
operationLog.end - operationLog.start
)}`,
...operationInfoText,
].join('\n')
if (get(subTaskLog, 'data.type') === 'remote') {
remotesText.push(operationText)
remotesText.join('\n')
}
if (get(subTaskLog, 'data.type') === 'SR') {
srsText.push(operationText)
srsText.join('\n')
}
})
}
if (srsText.length !== 0) {
srsText.unshift(`- **SRs**`)
}
if (remotesText.length !== 0) {
remotesText.unshift(`- **Remotes**`)
}
const subText = [...snapshotText, '', ...srsText, '', ...remotesText]
if (taskLog.result !== undefined) {
if (taskLog.status === 'skipped') {
++nSkipped
skippedVmsText.push(
...text,
`- **Reason**: ${
taskLog.result.message === UNHEALTHY_VDI_CHAIN_ERROR
? UNHEALTHY_VDI_CHAIN_MESSAGE
: taskLog.result.message
}`,
''
)
nagiosText.push(
`[(Skipped) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
taskLog.result.message
} ]`
)
} else {
++nFailures
failedVmsText.push(
...text,
`- **Error**: ${taskLog.result.message}`,
''
)
nagiosText.push(
`[(Failed) ${vm !== undefined ? vm.name_label : 'undefined'} : ${
taskLog.result.message
} ]`
)
}
} else {
if (taskLog.status === 'failure') {
++nFailures
failedVmsText.push(...text, '', '', ...subText, '')
nagiosText.push(
`[${
vm !== undefined ? vm.name_label : 'undefined'
}: (failed)[${failedSubTasks.toString()}]]`
)
} else {
successfulVmsText.push(...text, '', '', ...subText, '')
}
}
}
const nVms = log.tasks.length
const nSuccesses = nVms - nFailures - nSkipped
let markdown = [
`## Global status: ${log.status}`,
'',
`- **mode**: ${mode}`,
`- **Start time**: ${formatDate(log.start)}`,
`- **End time**: ${formatDate(log.end)}`,
`- **Duration**: ${formatDuration(log.start - log.end)}`,
`- **Successes**: ${nSuccesses} / ${nVms}`,
]
if (globalTransferSize !== 0) {
markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`)
}
if (globalMergeSize !== 0) {
markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`)
}
markdown.push('')
if (nFailures !== 0) {
markdown.push(
'---',
'',
`## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`,
'',
...failedVmsText
)
}
if (nSkipped !== 0) {
markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText)
}
if (nSuccesses !== 0 && reportWhen !== 'failure') {
markdown.push(
'---',
'',
`## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`,
'',
...successfulVmsText
)
}
markdown.push('---', '', `*${pkg.name} v${pkg.version}*`)
markdown = markdown.join('\n')
return this._sendReport({
markdown,
subject: `[Xen Orchestra] ${log.status} Backup report for ${jobName} ${
STATUS_ICON[log.status]
}`,
nagiosStatus: log.status === 'success' ? 0 : 2,
nagiosMarkdown:
log.status === 'success'
? `[Xen Orchestra] [Success] Backup report for ${jobName}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`,
})
}
_sendReport ({ markdown, subject, nagiosStatus, nagiosMarkdown }) {
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: this._mailsReceivers,
subject,
markdown,
}),
xo.sendToXmppClient !== undefined &&
xo.sendToXmppClient({
to: this._xmppReceivers,
message: markdown,
}),
xo.sendSlackMessage !== undefined &&
xo.sendSlackMessage({
message: markdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: nagiosStatus,
message: nagiosMarkdown,
}),
])
}
_listener (status) {
const { calls } = status
const { calls, timezone, error } = status
const formatDate = createDateFormater(timezone)
if (status.error !== undefined) {
const [globalStatus, icon] =
error.message === NO_VMS_MATCH_THIS_PATTERN
? ['Skipped', ICON_SKIPPED]
: ['Failure', ICON_FAILURE]
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **Start time**: ${formatDate(status.start)}`,
`- **End time**: ${formatDate(status.end)}`,
`- **Duration**: ${formatDuration(status.end - status.start)}`,
`- **Error**: ${error.message}`,
'---',
'',
`*${pkg.name} v${pkg.version}*`,
]
markdown = markdown.join('\n')
return this._sendReport({
subject: `[Xen Orchestra] ${globalStatus} ${icon}`,
markdown,
nagiosStatus: 2,
nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${
error.message
}`,
})
}
const callIds = Object.keys(calls)
const nCalls = callIds.length
@@ -139,8 +509,6 @@ class BackupReportsXoPlugin {
const skippedBackupsText = []
const successfulBackupText = []
const formatDate = createDateFormater(status.timezone)
forEach(calls, call => {
const { id = call.params.vm } = call.params
@@ -226,20 +594,21 @@ class BackupReportsXoPlugin {
return
}
const { end, start } = status
const { tag } = oneCall.params
const duration = end - start
const duration = status.end - status.start
const nSuccesses = nCalls - nFailures - nSkipped
const globalStatus = globalSuccess
? `Success`
: nFailures !== 0 ? `Failure` : `Skipped`
: nFailures !== 0
? `Failure`
: `Skipped`
let markdown = [
`## Global status: ${globalStatus}`,
'',
`- **Type**: ${formatMethod(method)}`,
`- **Start time**: ${formatDate(start)}`,
`- **End time**: ${formatDate(end)}`,
`- **Start time**: ${formatDate(status.start)}`,
`- **End time**: ${formatDate(status.end)}`,
`- **Duration**: ${formatDuration(duration)}`,
`- **Successes**: ${nSuccesses} / ${nCalls}`,
]
@@ -285,37 +654,22 @@ class BackupReportsXoPlugin {
markdown = markdown.join('\n')
const xo = this._xo
return Promise.all([
xo.sendEmail !== undefined &&
xo.sendEmail({
to: this._mailsReceivers,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${tag} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED
}`,
markdown,
}),
xo.sendToXmppClient !== undefined &&
xo.sendToXmppClient({
to: this._xmppReceivers,
message: markdown,
}),
xo.sendSlackMessage !== undefined &&
xo.sendSlackMessage({
message: markdown,
}),
xo.sendPassiveCheck !== undefined &&
xo.sendPassiveCheck({
status: globalSuccess ? 0 : 2,
message: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
}),
])
return this._sendReport({
markdown,
subject: `[Xen Orchestra] ${globalStatus} Backup report for ${tag} ${
globalSuccess
? ICON_SUCCESS
: nFailures !== 0
? ICON_FAILURE
: ICON_SKIPPED
}`,
nagiosStatus: globalSuccess ? 0 : 2,
nagiosMarkdown: globalSuccess
? `[Xen Orchestra] [Success] Backup report for ${tag}`
: `[Xen Orchestra] [${
nFailures !== 0 ? 'Failure' : 'Skipped'
}] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`,
})
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-perf-alert",
"version": "0.0.0",
"version": "0.1.0",
"license": "AGPL-3.0",
"description": "",
"keywords": [],
@@ -26,10 +26,10 @@
"lodash": "^4.17.4"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "^7.0.0-beta.44",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "^7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"rimraf": "^2.6.2"

View File

@@ -1,10 +1,11 @@
import JSON5 from 'json5'
import { createSchedule } from '@xen-orchestra/cron'
import { forOwn, map, mean } from 'lodash'
import { assign, forOwn, map, mean } from 'lodash'
import { utcParse } from 'd3-time-format'
const VM_FUNCTIONS = {
cpuUsage: {
name: 'VM CPU usage',
description:
'Raises an alarm when the average usage of any CPU is higher than the threshold',
unit: '%',
@@ -31,6 +32,7 @@ const VM_FUNCTIONS = {
},
},
memoryUsage: {
name: 'VM memory usage',
description:
'Raises an alarm when the used memory % is higher than the threshold',
unit: '% used',
@@ -60,6 +62,7 @@ const VM_FUNCTIONS = {
const HOST_FUNCTIONS = {
cpuUsage: {
name: 'host CPU usage',
description:
'Raises an alarm when the average usage of any CPU is higher than the threshold',
unit: '%',
@@ -86,6 +89,7 @@ const HOST_FUNCTIONS = {
},
},
memoryUsage: {
name: 'host memory usage',
description:
'Raises an alarm when the used memory % is higher than the threshold',
unit: '% used',
@@ -105,9 +109,25 @@ const HOST_FUNCTIONS = {
)
},
getDisplayableValue,
shouldAlarm: () => {
return getDisplayableValue() > threshold
},
shouldAlarm: () => getDisplayableValue() > threshold,
}
},
},
}
const SR_FUNCTIONS = {
storageUsage: {
name: 'SR storage usage',
description:
'Raises an alarm when the used disk space % is higher than the threshold',
unit: '% used',
comparator: '>',
createGetter: threshold => sr => {
const getDisplayableValue = () =>
sr.physical_utilisation * 100 / sr.physical_size
return {
getDisplayableValue,
shouldAlarm: () => getDisplayableValue() > threshold,
}
},
},
@@ -116,6 +136,7 @@ const HOST_FUNCTIONS = {
const TYPE_FUNCTION_MAP = {
vm: VM_FUNCTIONS,
host: HOST_FUNCTIONS,
sr: SR_FUNCTIONS,
}
// list of currently ringing alarms, to avoid double notification
@@ -229,11 +250,52 @@ export const configurationSchema = {
required: ['uuids'],
},
},
srMonitors: {
type: 'array',
title: 'SR Monitors',
description:
'Alarms checking all SRs on all pools. The selected performance counter is sampled regularly and averaged. ' +
'The Average is compared to the threshold and an alarm is raised upon crossing',
items: {
type: 'object',
properties: {
uuids: {
title: 'SRs',
type: 'array',
items: {
type: 'string',
$type: 'SR',
},
},
variableName: {
title: 'Alarm Type',
description: Object.keys(SR_FUNCTIONS)
.map(
k =>
` * ${k} (${SR_FUNCTIONS[k].unit}): ${
SR_FUNCTIONS[k].description
}`
)
.join('\n'),
type: 'string',
default: Object.keys(SR_FUNCTIONS)[0],
enum: Object.keys(SR_FUNCTIONS),
},
alarmTriggerLevel: {
title: 'Threshold',
description:
'The direction of the crossing is given by the Alarm type',
type: 'number',
default: 80,
},
},
required: ['uuids'],
},
},
toEmails: {
type: 'array',
title: 'Email addresses',
description: 'Email addresses of the alert recipients',
items: {
type: 'string',
},
@@ -259,13 +321,11 @@ const raiseOrLowerAlarm = (
currentAlarms[alarmId] = true
raiseCallback(alarmId)
}
} else {
if (current) {
try {
lowerCallback(alarmId)
} finally {
delete currentAlarms[alarmId]
}
} else if (current) {
try {
lowerCallback(alarmId)
} finally {
delete currentAlarms[alarmId]
}
}
}
@@ -297,24 +357,38 @@ class PerfAlertXoPlugin {
clearCurrentAlarms()
}
load () {
this._job.start()
}
unload () {
this._job.stop()
}
_generateUrl (type, object) {
const map = {
vm: () => `${this._configuration.baseUrl}#/vms/${object.uuid}/stats`,
host: () => `${this._configuration.baseUrl}#/hosts/${object.uuid}/stats`,
const { baseUrl } = this._configuration
const { uuid } = object
switch (type) {
case 'vm':
return `${baseUrl}#/vms/${uuid}/stats`
case 'host':
return `${baseUrl}#/hosts/${uuid}/stats`
case 'sr':
return `${baseUrl}#/srs/${uuid}/general`
default:
return 'unknown type'
}
return map[type]()
}
async test () {
const hostMonitorPart2 = await Promise.all(
map(this._getMonitors(), async m => {
const tableBody = (await m.snapshot()).map(entry => entry.tableItem)
return `
const monitorBodies = await Promise.all(
map(
this._getMonitors(),
async m => `
## Monitor for ${m.title}
${m.tableHeader}
${tableBody.join('')}`
})
${(await m.snapshot()).map(entry => entry.listItem).join('')}`
)
)
this._sendAlertEmail(
@@ -322,18 +396,10 @@ ${tableBody.join('')}`
`
# Performance Alert Test
Your alarms and their current status:
${hostMonitorPart2.join('\n')}`
${monitorBodies.join('\n')}`
)
}
load () {
this._job.start()
}
unload () {
this._job.stop()
}
_parseDefinition (definition) {
const alarmId = `${definition.objectType}|${definition.variableName}|${
definition.alarmTriggerLevel
@@ -384,63 +450,67 @@ ${hostMonitorPart2.join('\n')}`
definition.alarmTriggerPeriod !== undefined
? definition.alarmTriggerPeriod
: 60
const typeText = definition.objectType === 'host' ? 'Host' : 'VM'
return {
...definition,
alarmId,
vmFunction: typeFunction,
title: `${typeText} ${definition.variableName} ${
typeFunction.comparator
} ${definition.alarmTriggerLevel}${typeFunction.unit}`,
tableHeader: `${typeText} | Value | Alert\n--- | -----:| ---:`,
title: `${typeFunction.name} ${typeFunction.comparator} ${
definition.alarmTriggerLevel
}${typeFunction.unit}`,
snapshot: async () => {
return Promise.all(
map(definition.uuids, async uuid => {
try {
const monitoredObject = this._xo.getXapi(uuid).getObject(uuid)
const objectLink = `[${
monitoredObject.name_label
}](${this._generateUrl(definition.objectType, monitoredObject)})`
const rrd = await this.getRrd(monitoredObject, observationPeriod)
const couldFindRRD = rrd !== null
const result = {
object: monitoredObject,
couldFindRRD,
objectLink: objectLink,
listItem: ` * ${typeText} ${objectLink} ${
definition.variableName
}: **Can't read performance counters**\n`,
tableItem: `${objectLink} | - | **Can't read performance counters**\n`,
uuid,
name: definition.name,
object: this._xo.getXapi(uuid).getObject(uuid),
}
if (!couldFindRRD) {
return result
if (result.object === undefined) {
throw new Error('object not found')
}
const data = parseData(rrd, monitoredObject.uuid)
const textValue =
data.getDisplayableValue().toFixed(1) + typeFunction.unit
const shouldAlarm = data.shouldAlarm()
return {
...result,
value: data.getDisplayableValue(),
shouldAlarm: shouldAlarm,
textValue: textValue,
listItem: ` * ${typeText} ${objectLink} ${
definition.variableName
}: ${textValue}\n`,
tableItem: `${objectLink} | ${textValue} | ${
shouldAlarm ? '**Alert Ongoing**' : 'no alert'
}\n`,
result.objectLink = `[${
result.object.name_label
}](${this._generateUrl(definition.objectType, result.object)})`
if (typeFunction.createGetter === undefined) {
// Stats via RRD
result.rrd = await this.getRrd(result.object, observationPeriod)
if (result.rrd !== null) {
const data = parseData(result.rrd, result.object.uuid)
assign(result, {
data,
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
}
} else {
// Stats via XAPI
const getter = typeFunction.createGetter(
definition.alarmTriggerLevel
)
const data = getter(result.object)
assign(result, {
value: data.getDisplayableValue(),
shouldAlarm: data.shouldAlarm(),
})
}
result.listItem = ` * ${result.objectLink}: ${
result.value === undefined
? "**Can't read performance counters**"
: result.value.toFixed(1) + typeFunction.unit
}\n`
return result
} catch (_) {
return {
uuid,
object: null,
couldFindRRD: false,
objectLink: `cannot find object ${uuid}`,
listItem: ` * ${typeText} ${uuid} ${
definition.variableName
}: **Can't read performance counters**\n`,
tableItem: `object ${uuid} | - | **Can't read performance counters**\n`,
listItem: ` * ${uuid}: **Can't read performance counters**\n`,
}
}
})
@@ -452,11 +522,17 @@ ${hostMonitorPart2.join('\n')}`
_getMonitors () {
return map(this._configuration.hostMonitors, def =>
this._parseDefinition({ ...def, objectType: 'host' })
).concat(
map(this._configuration.vmMonitors, def =>
this._parseDefinition({ ...def, objectType: 'vm' })
)
)
.concat(
map(this._configuration.vmMonitors, def =>
this._parseDefinition({ ...def, objectType: 'vm' })
)
)
.concat(
map(this._configuration.srMonitors, def =>
this._parseDefinition({ ...def, objectType: 'sr' })
)
)
}
async _checkMonitors () {
@@ -466,7 +542,7 @@ ${hostMonitorPart2.join('\n')}`
for (const entry of snapshot) {
raiseOrLowerAlarm(
`${monitor.alarmId}|${entry.uuid}|RRD`,
!entry.couldFindRRD,
entry.value === undefined,
() => {
this._sendAlertEmail(
'Secondary Issue',
@@ -477,9 +553,11 @@ ${entry.listItem}`
},
() => {}
)
if (!entry.couldFindRRD) {
if (entry.value === undefined) {
continue
}
const raiseAlarm = alarmId => {
// sample XenCenter message:
// value: 1.242087 config: <variable> <name value="mem_usage"/> </variable>
@@ -500,23 +578,24 @@ ${entry.listItem}`
this._sendAlertEmail(
'',
`
## ALERT ${monitor.title}
## ALERT: ${monitor.title}
${entry.listItem}
### Description
${monitor.vmFunction.description}`
)
}
const lowerAlarm = alarmId => {
console.log('lowering Alarm', alarmId)
this._sendAlertEmail(
'END OF ALERT',
`
## END OF ALERT ${monitor.title}
## END OF ALERT: ${monitor.title}
${entry.listItem}
### Description
${monitor.vmFunction.description}`
)
}
raiseOrLowerAlarm(
`${monitor.alarmId}|${entry.uuid}`,
entry.shouldAlarm,

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server-usage-report",
"version": "0.4.0",
"version": "0.5.0",
"license": "AGPL-3.0",
"description": "",
"keywords": [

View File

@@ -159,30 +159,30 @@
</tr>
<tr>
<td>CPU:</td>
<td>{{global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
<td>{{normaliseValue global.vms.cpu}} % {{normaliseEvolution global.vmsEvolution.cpu}}</td>
<tr>
<tr>
<td>RAM:</td>
<td>{{global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
<td>{{normaliseValue global.vms.ram}} GiB {{normaliseEvolution global.vmsEvolution.ram}}</td>
<tr>
<tr>
<td>Disk read:</td>
<td>{{global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
<td>{{normaliseValue global.vms.diskRead}} MiB {{normaliseEvolution global.vmsEvolution.diskRead}}
</td>
<tr>
<tr>
<td>Disk write:</td>
<td>{{global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
<td>{{normaliseValue global.vms.diskWrite}} MiB {{normaliseEvolution global.vmsEvolution.diskWrite}}
</td>
<tr>
<tr>
<td>Network RX:</td>
<td>{{global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
<td>{{normaliseValue global.vms.netReception}} KiB {{normaliseEvolution global.vmsEvolution.netReception}}
</td>
<tr>
<tr>
<td>Network TX:</td>
<td>{{global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
<td>{{normaliseValue global.vms.netTransmission}} KiB {{normaliseEvolution global.vmsEvolution.netTransmission}}
</td>
<tr>
</table>
@@ -205,7 +205,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} %</td>
<td>{{normaliseValue this.value}} %</td>
</tr>
{{/each}}
@@ -216,7 +216,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
<tr>
@@ -226,7 +226,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} MiB</td>
<td>{{normaliseValue this.value}} MiB</td>
</tr>
{{/each}}
<tr>
@@ -236,7 +236,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} MiB</td>
<td>{{normaliseValue this.value}} MiB</td>
</tr>
{{/each}}
<tr>
@@ -246,7 +246,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
<tr>
@@ -256,7 +256,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
</table>
@@ -275,28 +275,28 @@
</tr>
<tr>
<td>CPU:</td>
<td>{{global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
<td>{{normaliseValue global.hosts.cpu}} % {{normaliseEvolution global.hostsEvolution.cpu}}
</td>
<tr>
<tr>
<td>RAM:</td>
<td>{{global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
<td>{{normaliseValue global.hosts.ram}} GiB {{normaliseEvolution global.hostsEvolution.ram}}
</td>
</td>
<tr>
<tr>
<td>Load average:</td>
<td>{{global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
<td>{{normaliseValue global.hosts.load}} {{normaliseEvolution global.hostsEvolution.load}}
</td>
<tr>
<tr>
<td>Network RX:</td>
<td>{{global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
<td>{{normaliseValue global.hosts.netReception}} KiB {{normaliseEvolution global.hostsEvolution.netReception}}
</td>
<tr>
<tr>
<td>Network TX:</td>
<td>{{global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
<td>{{normaliseValue global.hosts.netTransmission}} KiB {{normaliseEvolution global.hostsEvolution.netTransmission}}
</td>
<tr>
</table>
@@ -318,7 +318,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} %</td>
<td>{{normaliseValue this.value}} %</td>
</tr>
{{/each}}
<tr>
@@ -328,7 +328,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
<tr>
@@ -338,7 +338,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} </td>
<td>{{normaliseValue this.value}} </td>
</tr>
{{/each}}
<tr>
@@ -348,7 +348,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
<tr>
@@ -358,7 +358,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} KiB</td>
<td>{{normaliseValue this.value}} KiB</td>
</tr>
{{/each}}
</table>
@@ -378,7 +378,7 @@
<tr>
<td>{{shortUUID this.uuid}}</td>
<td>{{this.name}}</td>
<td>{{this.value}} GiB</td>
<td>{{normaliseValue this.value}} GiB</td>
</tr>
{{/each}}
</table>

View File

@@ -8,6 +8,7 @@ import {
filter,
find,
forEach,
get,
isFinite,
map,
orderBy,
@@ -137,9 +138,9 @@ Handlebars.registerHelper(
value =>
new Handlebars.SafeString(
isFinite(+value) && +value !== 0
? value > 0
? `(<b style="color: green;">▲ ${value}</b>)`
: `(<b style="color: red;">▼ ${String(value).slice(1)}</b>)`
? (value = round(value, 2)) > 0
? `(<b style="color: green;">▲ ${value}%</b>)`
: `(<b style="color: red;">▼ ${String(value).slice(1)}%</b>)`
: ''
)
)
@@ -164,7 +165,7 @@ const computeDoubleMean = val => computeMean(map(val, computeMean))
function computeMeans (objects, options) {
return zipObject(
options,
map(options, opt => round(computeMean(map(objects, opt)), 2))
map(options, opt => computeMean(map(objects, opt)), 2)
)
}
@@ -185,7 +186,7 @@ function getTop (objects, options) {
obj => ({
uuid: obj.uuid,
name: obj.name,
value: round(obj[opt], 2),
value: obj[opt],
})
)
)
@@ -200,7 +201,7 @@ function computePercentage (curr, prev, options) {
opt =>
prev[opt] === 0 || prev[opt] === null
? 'NONE'
: `${round((curr[opt] - prev[opt]) * 100 / prev[opt], 2)}`
: `${(curr[opt] - prev[opt]) * 100 / prev[opt]}`
)
)
}
@@ -228,10 +229,14 @@ async function getVmsStats ({ runningVms, xo }) {
name: vm.name_label,
cpu: computeDoubleMean(vmStats.stats.cpus),
ram: computeMean(getMemoryUsedMetric(vmStats.stats)) / gibPower,
diskRead: computeDoubleMean(values(vmStats.stats.xvds.r)) / mibPower,
diskWrite: computeDoubleMean(values(vmStats.stats.xvds.w)) / mibPower,
netReception: computeDoubleMean(vmStats.stats.vifs.rx) / kibPower,
netTransmission: computeDoubleMean(vmStats.stats.vifs.tx) / kibPower,
diskRead:
computeDoubleMean(values(get(vmStats.stats.xvds, 'r'))) / mibPower,
diskWrite:
computeDoubleMean(values(get(vmStats.stats.xvds, 'w'))) / mibPower,
netReception:
computeDoubleMean(get(vmStats.stats.vifs, 'rx')) / kibPower,
netTransmission:
computeDoubleMean(get(vmStats.stats.vifs, 'tx')) / kibPower,
}
})
),
@@ -251,9 +256,10 @@ async function getHostsStats ({ runningHosts, xo }) {
cpu: computeDoubleMean(hostStats.stats.cpus),
ram: computeMean(getMemoryUsedMetric(hostStats.stats)) / gibPower,
load: computeMean(hostStats.stats.load),
netReception: computeDoubleMean(hostStats.stats.pifs.rx) / kibPower,
netReception:
computeDoubleMean(get(hostStats.stats.pifs, 'rx')) / kibPower,
netTransmission:
computeDoubleMean(hostStats.stats.pifs.tx) / kibPower,
computeDoubleMean(get(hostStats.stats.pifs, 'tx')) / kibPower,
}
})
),
@@ -264,12 +270,16 @@ async function getHostsStats ({ runningHosts, xo }) {
function getSrsStats (xoObjects) {
return orderBy(
map(filter(xoObjects, { type: 'SR' }), sr => {
map(filter(xoObjects, obj => obj.type === 'SR' && obj.size > 0), sr => {
const total = sr.size / gibPower
const used = sr.physical_usage / gibPower
let name = sr.name_label
if (!sr.shared) {
name += ` (${find(xoObjects, { id: sr.$container }).name_label})`
}
return {
uuid: sr.uuid,
name: sr.name_label,
name,
total,
used,
free: total - used,

View File

@@ -1,6 +1,6 @@
{
"name": "xo-server",
"version": "5.18.0",
"version": "5.20.1",
"license": "AGPL-3.0",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -31,14 +31,15 @@
"node": ">=6"
},
"dependencies": {
"@babel/polyfill": "7.0.0-beta.44",
"@babel/polyfill": "7.0.0-beta.49",
"@marsaud/smb2-promise": "^0.2.1",
"@xen-orchestra/cron": "^1.0.3",
"@xen-orchestra/fs": "^0.1.0",
"ajv": "^6.1.1",
"app-conf": "^0.5.0",
"archiver": "^2.1.0",
"async-iterator-to-stream": "^1.0.1",
"base64url": "^2.0.0",
"base64url": "^3.0.0",
"bind-property-descriptor": "^1.0.0",
"blocked": "^1.2.1",
"bluebird": "^3.5.1",
@@ -58,9 +59,10 @@
"express-session": "^1.15.6",
"fatfs": "^0.10.4",
"from2": "^2.3.0",
"fs-extra": "^5.0.0",
"fs-extra": "^6.0.1",
"get-stream": "^3.0.0",
"golike-defer": "^0.4.1",
"hashy": "^0.6.2",
"hashy": "^0.7.1",
"helmet": "^3.9.0",
"highland": "^2.11.1",
"http-proxy": "^1.16.2",
@@ -68,14 +70,14 @@
"http-server-plus": "^0.10.0",
"human-format": "^0.10.0",
"is-redirect": "^1.0.0",
"jest-worker": "^22.4.3",
"jest-worker": "^23.0.0",
"js-yaml": "^3.10.0",
"json-rpc-peer": "^0.15.3",
"json5": "^1.0.0",
"julien-f-source-map-support": "0.1.0",
"julien-f-unzip": "^0.2.1",
"kindof": "^2.0.0",
"level": "^3.0.0",
"level": "^4.0.0",
"level-party": "^3.0.4",
"level-sublevel": "^6.6.1",
"limit-concurrency-decorator": "^0.4.0",
@@ -91,16 +93,16 @@
"partial-stream": "0.0.0",
"passport": "^0.4.0",
"passport-local": "^1.0.0",
"pretty-format": "^22.0.3",
"pretty-format": "^23.0.0",
"promise-toolbox": "^0.9.5",
"proxy-agent": "^2.1.0",
"proxy-agent": "^3.0.0",
"pug": "^2.0.0-rc.4",
"pw": "^0.0.4",
"redis": "^2.8.0",
"schema-inspector": "^1.6.8",
"semver": "^5.4.1",
"serve-static": "^1.13.1",
"split-lines": "^1.1.0",
"split-lines": "^2.0.0",
"stack-chain": "^2.0.0",
"stoppable": "^1.0.5",
"struct-fu": "^1.2.0",
@@ -109,27 +111,29 @@
"tmp": "^0.0.33",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.1.2",
"ws": "^5.0.0",
"xen-api": "^0.16.8",
"xen-api": "^0.16.10",
"xml2js": "^0.4.19",
"xo-acl-resolver": "^0.2.3",
"xo-acl-resolver": "^0.2.4",
"xo-collection": "^0.4.1",
"xo-common": "^0.1.1",
"xo-remote-parser": "^0.3",
"xo-vmdk-to-vhd": "0.0.12"
"xo-vmdk-to-vhd": "^0.1.3",
"yazl": "^2.4.3"
},
"devDependencies": {
"@babel/cli": "7.0.0-beta.44",
"@babel/core": "7.0.0-beta.44",
"@babel/plugin-proposal-decorators": "7.0.0-beta.44",
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.44",
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.44",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.44",
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.44",
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.44",
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.44",
"@babel/preset-env": "7.0.0-beta.44",
"@babel/preset-flow": "7.0.0-beta.44",
"@babel/cli": "7.0.0-beta.49",
"@babel/core": "7.0.0-beta.49",
"@babel/plugin-proposal-decorators": "7.0.0-beta.49",
"@babel/plugin-proposal-export-default-from": "7.0.0-beta.49",
"@babel/plugin-proposal-export-namespace-from": "7.0.0-beta.49",
"@babel/plugin-proposal-function-bind": "7.0.0-beta.49",
"@babel/plugin-proposal-optional-chaining": "^7.0.0-beta.49",
"@babel/plugin-proposal-pipeline-operator": "^7.0.0-beta.49",
"@babel/plugin-proposal-throw-expressions": "^7.0.0-beta.49",
"@babel/preset-env": "7.0.0-beta.49",
"@babel/preset-flow": "7.0.0-beta.49",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^5.1.3",
"index-modules": "^0.3.0",

View File

@@ -1,3 +1,8 @@
import { basename } from 'path'
import { isEmpty, pickBy } from 'lodash'
import { safeDateFormat } from '../utils'
export function createJob ({ schedules, ...job }) {
job.userId = this.user.id
return this.createBackupNgJob(job, schedules)
@@ -27,6 +32,10 @@ createJob.params = {
settings: {
type: 'object',
},
srs: {
type: 'object',
optional: true,
},
vms: {
type: 'object',
},
@@ -81,6 +90,10 @@ editJob.params = {
type: 'object',
optional: true,
},
srs: {
type: 'object',
optional: true,
},
vms: {
type: 'object',
optional: true,
@@ -105,8 +118,8 @@ getJob.params = {
},
}
export async function runJob ({ id, schedule }) {
return this.runJobSequence([id], await this.getSchedule(schedule))
export async function runJob ({ id, schedule, vm }) {
return this.runJobSequence([id], await this.getSchedule(schedule), vm)
}
runJob.permission = 'admin'
@@ -118,10 +131,23 @@ runJob.params = {
schedule: {
type: 'string',
},
vm: {
type: 'string',
optional: true,
},
}
// -----------------------------------------------------------------------------
export async function getAllLogs (filter) {
const logs = await this.getBackupNgLogs()
return isEmpty(filter) ? logs : pickBy(logs, filter)
}
getAllLogs.permission = 'admin'
// -----------------------------------------------------------------------------
export function deleteVmBackup ({ id }) {
return this.deleteVmBackupNg(id)
}
@@ -163,3 +189,88 @@ importVmBackup.params = {
type: 'string',
},
}
// -----------------------------------------------------------------------------
export function listPartitions ({ remote, disk }) {
return this.listBackupNgDiskPartitions(remote, disk)
}
listPartitions.permission = 'admin'
listPartitions.params = {
disk: {
type: 'string',
},
remote: {
type: 'string',
},
}
export function listFiles ({ remote, disk, partition, path }) {
return this.listBackupNgPartitionFiles(remote, disk, partition, path)
}
listFiles.permission = 'admin'
listFiles.params = {
disk: {
type: 'string',
},
partition: {
type: 'string',
optional: true,
},
path: {
type: 'string',
},
remote: {
type: 'string',
},
}
async function handleFetchFiles (req, res, { remote, disk, partition, paths }) {
const zipStream = await this.fetchBackupNgPartitionFiles(
remote,
disk,
partition,
paths
)
res.setHeader('content-disposition', 'attachment')
res.setHeader('content-type', 'application/octet-stream')
return zipStream
}
export async function fetchFiles (params) {
const { paths } = params
let filename = `restore_${safeDateFormat(new Date())}`
if (paths.length === 1) {
filename += `_${basename(paths[0])}`
}
filename += '.zip'
return this.registerHttpRequest(handleFetchFiles, params, {
suffix: encodeURI(`/${filename}`),
}).then(url => ({ $getFrom: url }))
}
fetchFiles.permission = 'admin'
fetchFiles.params = {
disk: {
type: 'string',
},
partition: {
optional: true,
type: 'string',
},
paths: {
items: { type: 'string' },
minLength: 1,
type: 'array',
},
remote: {
type: 'string',
},
}

View File

@@ -76,6 +76,21 @@ export { restartAgent as restart_agent } // eslint-disable-line camelcase
// -------------------------------------------------------------------
export function setRemoteSyslogHost ({ host, syslogDestination }) {
return this.getXapi(host).setRemoteSyslogHost(host._xapiId, syslogDestination)
}
setRemoteSyslogHost.params = {
id: { type: 'string' },
syslogDestination: { type: 'string' },
}
setRemoteSyslogHost.resolve = {
host: ['id', 'host', 'administrate'],
}
// -------------------------------------------------------------------
export function start ({ host }) {
return this.getXapi(host).powerOnHost(host._xapiId)
}

View File

@@ -1,5 +1,12 @@
// FIXME so far, no acls for jobs
export function cancel ({ runId }) {
return this.cancelJobRun(runId)
}
cancel.permission = 'admin'
cancel.description = 'Cancel a current run'
export async function getAll () {
return /* await */ this.getAllJobs('call')
}

View File

@@ -1,19 +1,5 @@
export async function get ({ namespace }) {
const logger = await this.getLogger(namespace)
return new Promise((resolve, reject) => {
const logs = {}
logger
.createReadStream()
.on('data', data => {
logs[data.key] = data.value
})
.on('end', () => {
resolve(logs)
})
.on('error', reject)
})
export function get ({ namespace }) {
return this.getLogs(namespace)
}
get.description = 'returns logs list for one namespace'

View File

@@ -204,8 +204,8 @@ export async function createNfs ({
}
// if NFS options given
if (nfsVersion) {
deviceConfig.options = nfsVersion
if (nfsOptions) {
deviceConfig.options = nfsOptions
}
const srRef = await xapi.call(
@@ -838,3 +838,23 @@ getUnhealthyVdiChainsLength.params = {
getUnhealthyVdiChainsLength.resolve = {
sr: ['id', 'SR', 'operate'],
}
// -------------------------------------------------------------------
export function stats ({ sr, granularity }) {
return this.getXapiSrStats(sr._xapiId, granularity)
}
stats.description = 'returns statistic of the sr'
stats.params = {
id: { type: 'string' },
granularity: {
type: 'string',
optional: true,
},
}
stats.resolve = {
sr: ['id', 'SR', 'view'],
}

View File

@@ -12,6 +12,10 @@ import { forEach, map, mapFilter, parseSize } from '../utils'
// ===================================================================
export function getHaValues () {
return ['best-effort', 'restart', '']
}
function checkPermissionOnSrs (vm, permission = 'operate') {
const permissions = []
forEach(vm.$VBDs, vbdId => {
@@ -21,8 +25,10 @@ function checkPermissionOnSrs (vm, permission = 'operate') {
if (vbd.is_cd_drive || !vdiId) {
return
}
return permissions.push([this.getObject(vdiId, 'VDI').$SR, permission])
return permissions.push([
this.getObject(vdiId, ['VDI', 'VDI-snapshot']).$SR,
permission,
])
})
return this.hasPermissions(this.session.get('user_id'), permissions).then(
@@ -46,11 +52,16 @@ const extract = (obj, prop) => {
export async function create (params) {
const { user } = this
const resourceSet = extract(params, 'resourceSet')
if (resourceSet === undefined && user.permission !== 'admin') {
const template = extract(params, 'template')
if (
resourceSet === undefined &&
!(await this.hasPermissions(this.user.id, [
[template.$pool, 'administrate'],
]))
) {
throw unauthorized()
}
const template = extract(params, 'template')
params.template = template._xapiId
const xapi = this.getXapi(template)
@@ -151,10 +162,10 @@ export async function create (params) {
await Promise.all([
params.share
? Promise.all(
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
this.addAcl(subjectId, vm.id, 'admin')
map((await this.getResourceSet(resourceSet)).subjects, subjectId =>
this.addAcl(subjectId, vm.id, 'admin')
)
)
)
: this.addAcl(user.id, vm.id, 'admin'),
xapi.xo.setData(xapiVm.$id, 'resourceSet', resourceSet),
])
@@ -463,7 +474,7 @@ export async function migrate ({
})
}
if (!await this.hasPermissions(this.session.get('user_id'), permissions)) {
if (!(await this.hasPermissions(this.session.get('user_id'), permissions))) {
throw unauthorized()
}
@@ -556,11 +567,11 @@ set.params = {
name_description: { type: 'string', optional: true },
// TODO: provides better filtering of values for HA possible values: "best-
// effort" meaning "try to restart this VM if possible but don't consider the
// Pool to be overcommitted if this is not possible"; "restart" meaning "this
// VM should be restarted"; "" meaning "do not try to restart this VM"
high_availability: { type: 'boolean', optional: true },
high_availability: {
optional: true,
pattern: new RegExp(`^(${getHaValues().join('|')})$`),
type: 'string',
},
// Number of virtual CPUs to allocate.
CPUs: { type: 'integer', optional: true },
@@ -597,6 +608,9 @@ set.params = {
coresPerSocket: { type: ['string', 'number', 'null'], optional: true },
// Emulate HVM C000 PCI device for Windows Update to fetch or update PV drivers
hasVendorDevice: { type: 'boolean', optional: true },
// Move the vm In to/Out of Self Service
resourceSet: { type: ['string', 'null'], optional: true },
@@ -649,8 +663,7 @@ clone.params = {
}
clone.resolve = {
// TODO: is it necessary for snapshots?
vm: ['id', 'VM', 'administrate'],
vm: ['id', ['VM', 'VM-snapshot'], 'administrate'],
}
// -------------------------------------------------------------------
@@ -700,9 +713,9 @@ copy.resolve = {
export async function convertToTemplate ({ vm }) {
// Convert to a template requires pool admin permission.
if (
!await this.hasPermissions(this.session.get('user_id'), [
!(await this.hasPermissions(this.session.get('user_id'), [
[vm.$pool, 'administrate'],
])
]))
) {
throw unauthorized()
}
@@ -1005,13 +1018,12 @@ export async function stop ({ vm, force }) {
// Hard shutdown
if (force) {
await xapi.call('VM.hard_shutdown', vm._xapiRef)
return
return xapi.shutdownVm(vm._xapiRef, { hard: true })
}
// Clean shutdown
try {
await xapi.call('VM.clean_shutdown', vm._xapiRef)
await xapi.shutdownVm(vm._xapiRef)
} catch (error) {
const { code } = error
if (
@@ -1262,7 +1274,9 @@ export async function createInterface ({
await this.checkResourceSetConstraints(resourceSet, this.user.id, [
network.id,
])
} else if (!await this.hasPermissions(this.user.id, [[network.id, 'view']])) {
} else if (
!(await this.hasPermissions(this.user.id, [[network.id, 'view']]))
) {
throw unauthorized()
}

View File

@@ -1,7 +1,6 @@
import getStream from 'get-stream'
import { forEach } from 'lodash'
import { streamToBuffer } from '../utils'
// ===================================================================
export function clean () {
@@ -61,7 +60,7 @@ getAllObjects.params = {
export async function importConfig () {
return {
$sendTo: await this.registerHttpRequest(async (req, res) => {
await this.importConfig(JSON.parse(await streamToBuffer(req)))
await this.importConfig(JSON.parse(await getStream.buffer(req)))
res.end('config successfully imported')
}),

View File

@@ -7,6 +7,7 @@ import has from 'lodash/has'
import helmet from 'helmet'
import includes from 'lodash/includes'
import proxyConsole from './proxy-console'
import pw from 'pw'
import serveStatic from 'serve-static'
import startsWith from 'lodash/startsWith'
import stoppable from 'stoppable'
@@ -227,12 +228,12 @@ async function registerPlugin (pluginPath, pluginName) {
// instance.
const instance = isFunction(factory)
? factory({
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
xo: this,
getDataDir: () => {
const dir = `${this._config.datadir}/${pluginName}`
return ensureDir(dir).then(() => dir)
},
})
: factory
await this.registerPlugin(
@@ -311,6 +312,13 @@ async function makeWebServerListen (
) {
if (cert && key) {
;[opts.cert, opts.key] = await Promise.all([readFile(cert), readFile(key)])
if (opts.key.includes('ENCRYPTED')) {
opts.passphrase = await new Promise(resolve => {
console.log('Encrypted key %s', key)
process.stdout.write(`Enter pass phrase: `)
pw(resolve)
})
}
}
try {
const niceAddress = await webServer.listen(opts)
@@ -499,7 +507,7 @@ const setUpConsoleProxy = (webServer, xo) => {
const { token } = parseCookies(req.headers.cookie)
const user = await xo.authenticateUser({ token })
if (!await xo.hasPermissions(user.id, [[id, 'operate']])) {
if (!(await xo.hasPermissions(user.id, [[id, 'operate']]))) {
throw invalidCredentials()
}
@@ -543,7 +551,7 @@ export default async function main (args) {
debug('blocked for %sms', ms | 0)
},
{
threshold: 50,
threshold: 500,
}
)
}

View File

@@ -1,16 +1,15 @@
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser } from 'parse-pairs'
import { isArray, map } from 'lodash'
// ===================================================================
const parse = createParser({
keyTransform: key => key.slice(5).toLowerCase(),
})
const makeFunction = command => (fields, ...args) =>
execa
.stdout(command, [
const makeFunction = command => async (fields, ...args) => {
return splitLines(
await execa.stdout(command, [
'--noheading',
'--nosuffix',
'--nameprefixes',
@@ -21,17 +20,8 @@ const makeFunction = command => (fields, ...args) =>
String(fields),
...args,
])
.then(stdout =>
map(
splitLines(stdout),
isArray(fields)
? parse
: line => {
const data = parse(line)
return data[fields]
}
)
)
).map(Array.isArray(fields) ? parse : line => parse(line)[fields])
}
export const lvs = makeFunction('lvs')
export const pvs = makeFunction('pvs')

View File

@@ -13,9 +13,18 @@ export default {
type: 'string',
description: 'identifier of this job',
},
scheduleId: {
type: 'string',
description: 'identifier of the schedule which ran the job',
},
key: {
type: 'string',
},
type: {
default: 'call',
enum: ['backup', 'call'],
},
data: {},
},
required: ['event', 'userId', 'jobId', 'key'],
required: ['event', 'userId', 'jobId'],
}

View File

@@ -0,0 +1,18 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['task.end'],
},
taskId: {
type: 'string',
description: 'identifier of this task',
},
status: {
enum: ['canceled', 'failure', 'success'],
},
result: {},
},
required: ['event', 'taskId', 'status'],
}

View File

@@ -0,0 +1,15 @@
export default {
$schema: 'http://json-schema.org/draft-04/schema#',
type: 'object',
properties: {
event: {
enum: ['task.start'],
},
parentId: {
type: 'string',
description: 'identifier of the parent task or job',
},
data: {},
},
required: ['event'],
}

View File

@@ -1,28 +0,0 @@
const streamToNewBuffer = stream =>
new Promise((resolve, reject) => {
const chunks = []
let length = 0
const onData = chunk => {
chunks.push(chunk)
length += chunk.length
}
stream.on('data', onData)
const clean = () => {
stream.removeListener('data', onData)
stream.removeListener('end', onEnd)
stream.removeListener('error', onError)
}
const onEnd = () => {
resolve(Buffer.concat(chunks, length))
clean()
}
stream.on('end', onEnd)
const onError = error => {
reject(error)
clean()
}
stream.on('error', onError)
})
export { streamToNewBuffer as default }

View File

@@ -55,10 +55,6 @@ export const asyncMap = (collection, iteratee) => {
// -------------------------------------------------------------------
export streamToBuffer from './stream-to-new-buffer'
// -------------------------------------------------------------------
export function camelToSnakeCase (string) {
return string.replace(
/([a-z0-9])([A-Z])/g,

View File

@@ -1,990 +0,0 @@
// TODO: remove once completely merged in vhd.js
import assert from 'assert'
import asyncIteratorToStream from 'async-iterator-to-stream'
import concurrency from 'limit-concurrency-decorator'
import fu from 'struct-fu'
import { dirname, relative } from 'path'
import { fromEvent } from 'promise-toolbox'
import type RemoteHandler from './remote-handlers/abstract'
import constantStream from './constant-stream'
import { noop, resolveRelativeFromFile, streamToBuffer } from './utils'
const VHD_UTIL_DEBUG = 0
const debug = VHD_UTIL_DEBUG ? str => console.log(`[vhd-merge]${str}`) : noop
// ===================================================================
//
// Spec:
// https://www.microsoft.com/en-us/download/details.aspx?id=23850
//
// C implementation:
// https://github.com/rubiojr/vhd-util-convert
//
// ===================================================================
// Sizes in bytes.
const VHD_FOOTER_SIZE = 512
const VHD_HEADER_SIZE = 1024
export const VHD_SECTOR_SIZE = 512
// Block allocation table entry size. (Block addr)
const VHD_ENTRY_SIZE = 4
const VHD_PARENT_LOCATOR_ENTRIES = 8
const VHD_PLATFORM_CODE_NONE = 0
// Types of backup treated. Others are not supported.
export const HARD_DISK_TYPE_DYNAMIC = 3 // Full backup.
export const HARD_DISK_TYPE_DIFFERENCING = 4 // Delta backup.
export const PLATFORM_NONE = 0
export const PLATFORM_W2RU = 0x57327275
export const PLATFORM_W2KU = 0x57326b75
export const PLATFORM_MAC = 0x4d616320
export const PLATFORM_MACX = 0x4d616358
// Other.
const BLOCK_UNUSED = 0xffffffff
const BIT_MASK = 0x80
// unused block as buffer containing a uint32BE
const BUF_BLOCK_UNUSED = Buffer.allocUnsafe(VHD_ENTRY_SIZE)
BUF_BLOCK_UNUSED.writeUInt32BE(BLOCK_UNUSED, 0)
// ===================================================================
const SIZE_OF_32_BITS = Math.pow(2, 32)
const uint64 = fu.derive(
fu.uint32(2),
number => [Math.floor(number / SIZE_OF_32_BITS), number % SIZE_OF_32_BITS],
_ => _[0] * SIZE_OF_32_BITS + _[1]
)
const fuFooter = fu.struct([
fu.char('cookie', 8), // 0
fu.uint32('features'), // 8
fu.uint32('fileFormatVersion'), // 12
uint64('dataOffset'), // offset of the header, should always be 512
fu.uint32('timestamp'), // 24
fu.char('creatorApplication', 4), // 28
fu.uint32('creatorVersion'), // 32
fu.uint32('creatorHostOs'), // 36
uint64('originalSize'),
uint64('currentSize'),
fu.struct('diskGeometry', [
fu.uint16('cylinders'), // 56
fu.uint8('heads'), // 58
fu.uint8('sectorsPerTrackCylinder'), // 59
]),
fu.uint32('diskType'), // 60 Disk type, must be equal to HARD_DISK_TYPE_DYNAMIC/HARD_DISK_TYPE_DIFFERENCING.
fu.uint32('checksum'), // 64
fu.uint8('uuid', 16), // 68
fu.char('saved'), // 84
fu.char('hidden'), // 85
fu.char('reserved', 426), // 86
])
const fuHeader = fu.struct([
fu.char('cookie', 8),
fu.uint8('dataOffsetUnused', 8),
uint64('tableOffset'),
fu.uint32('headerVersion'),
fu.uint32('maxTableEntries'), // Max entries in the Block Allocation Table.
fu.uint32('blockSize'), // Block size in bytes. Default (2097152 => 2MB)
fu.uint32('checksum'),
fu.uint8('parentUuid', 16),
fu.uint32('parentTimestamp'),
fu.uint32('reserved1'),
fu.char16be('parentUnicodeName', 512),
fu.struct(
'parentLocatorEntry',
[
fu.uint32('platformCode'),
fu.uint32('platformDataSpace'),
fu.uint32('platformDataLength'),
fu.uint32('reserved'),
uint64('platformDataOffset'), // Absolute byte offset of the locator data.
],
VHD_PARENT_LOCATOR_ENTRIES
),
fu.char('reserved2', 256),
])
// ===================================================================
// Helpers
// ===================================================================
const computeBatSize = entries =>
sectorsToBytes(sectorsRoundUpNoZero(entries * VHD_ENTRY_SIZE))
// Returns a 32 bits integer corresponding to a Vhd version.
const getVhdVersion = (major, minor) => (major << 16) | (minor & 0x0000ffff)
// Sectors conversions.
const sectorsRoundUpNoZero = bytes => Math.ceil(bytes / VHD_SECTOR_SIZE) || 1
const sectorsToBytes = sectors => sectors * VHD_SECTOR_SIZE
// Check/Set a bit on a vhd map.
const mapTestBit = (map, bit) => ((map[bit >> 3] << (bit & 7)) & BIT_MASK) !== 0
const mapSetBit = (map, bit) => {
map[bit >> 3] |= BIT_MASK >> (bit & 7)
}
const packField = (field, value, buf) => {
const { offset } = field
field.pack(
value,
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
const unpackField = (field, buf) => {
const { offset } = field
return field.unpack(
buf,
typeof offset !== 'object' ? { bytes: offset, bits: 0 } : offset
)
}
// ===================================================================
// Returns the checksum of a raw struct.
// The raw struct (footer or header) is altered with the new sum.
function checksumStruct (buf, struct) {
const checksumField = struct.fields.checksum
let sum = 0
// Do not use the stored checksum to compute the new checksum.
const checksumOffset = checksumField.offset
for (let i = 0, n = checksumOffset; i < n; ++i) {
sum += buf[i]
}
for (
let i = checksumOffset + checksumField.size, n = struct.size;
i < n;
++i
) {
sum += buf[i]
}
sum = ~sum >>> 0
// Write new sum.
packField(checksumField, sum, buf)
return sum
}
const assertChecksum = (name, buf, struct) => {
const actual = unpackField(struct.fields.checksum, buf)
const expected = checksumStruct(buf, struct)
if (actual !== expected) {
throw new Error(`invalid ${name} checksum ${actual}, expected ${expected}`)
}
}
// ===================================================================
// Format:
//
// 1. Footer (512)
// 2. Header (1024)
// 3. Unordered entries
// - BAT (batSize @ header.tableOffset)
// - Blocks (@ blockOffset(i))
// - bitmap (blockBitmapSize)
// - data (header.blockSize)
// - Parent locators (parentLocatorSize(i) @ parentLocatorOffset(i))
// 4. Footer (512 @ vhdSize - 512)
//
// Variables:
//
// - batSize = min(1, ceil(header.maxTableEntries * 4 / sectorSize)) * sectorSize
// - blockBitmapSize = ceil(header.blockSize / sectorSize / 8 / sectorSize) * sectorSize
// - blockOffset(i) = bat[i] * sectorSize
// - nBlocks = ceil(footer.currentSize / header.blockSize)
// - parentLocatorOffset(i) = header.parentLocatorEntry[i].platformDataOffset
// - parentLocatorSize(i) = header.parentLocatorEntry[i].platformDataSpace * sectorSize
// - sectorSize = 512
export class Vhd {
get batSize () {
return computeBatSize(this.header.maxTableEntries)
}
constructor (handler, path) {
this._handler = handler
this._path = path
}
// =================================================================
// Read functions.
// =================================================================
_readStream (start, n) {
return this._handler.createReadStream(this._path, {
start,
end: start + n - 1, // end is inclusive
})
}
_read (start, n) {
return this._readStream(start, n).then(streamToBuffer)
}
containsBlock (id) {
return this._getBatEntry(id) !== BLOCK_UNUSED
}
// Returns the first address after metadata. (In bytes)
getEndOfHeaders () {
const { header } = this
let end = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
// Max(end, block allocation table end)
end = Math.max(end, header.tableOffset + this.batSize)
for (let i = 0; i < VHD_PARENT_LOCATOR_ENTRIES; i++) {
const entry = header.parentLocatorEntry[i]
if (entry.platformCode !== VHD_PLATFORM_CODE_NONE) {
end = Math.max(
end,
entry.platformDataOffset + sectorsToBytes(entry.platformDataSpace)
)
}
}
debug(`End of headers: ${end}.`)
return end
}
// Returns the first sector after data.
getEndOfData () {
let end = Math.ceil(this.getEndOfHeaders() / VHD_SECTOR_SIZE)
const fullBlockSize = this.sectorsOfBitmap + this.sectorsPerBlock
const { maxTableEntries } = this.header
for (let i = 0; i < maxTableEntries; i++) {
const blockAddr = this._getBatEntry(i)
if (blockAddr !== BLOCK_UNUSED) {
end = Math.max(end, blockAddr + fullBlockSize)
}
}
debug(`End of data: ${end}.`)
return sectorsToBytes(end)
}
// Get the beginning (footer + header) of a vhd file.
async readHeaderAndFooter () {
const buf = await this._read(0, VHD_FOOTER_SIZE + VHD_HEADER_SIZE)
const bufFooter = buf.slice(0, VHD_FOOTER_SIZE)
const bufHeader = buf.slice(VHD_FOOTER_SIZE)
assertChecksum('footer', bufFooter, fuFooter)
assertChecksum('header', bufHeader, fuHeader)
const footer = (this.footer = fuFooter.unpack(bufFooter))
assert.strictEqual(footer.dataOffset, VHD_FOOTER_SIZE)
const header = (this.header = fuHeader.unpack(bufHeader))
// Compute the number of sectors in one block.
// Default: One block contains 4096 sectors of 512 bytes.
const sectorsPerBlock = (this.sectorsPerBlock = Math.floor(
header.blockSize / VHD_SECTOR_SIZE
))
// Compute bitmap size in sectors.
// Default: 1.
const sectorsOfBitmap = (this.sectorsOfBitmap = sectorsRoundUpNoZero(
sectorsPerBlock >> 3
))
// Full block size => data block size + bitmap size.
this.fullBlockSize = sectorsToBytes(sectorsPerBlock + sectorsOfBitmap)
// In bytes.
// Default: 512.
this.bitmapSize = sectorsToBytes(sectorsOfBitmap)
}
// Check if a vhd object has a block allocation table.
hasBlockAllocationTableMap () {
return this.footer.fileFormatVersion > getVhdVersion(1, 0)
}
// Returns a buffer that contains the block allocation table of a vhd file.
async readBlockTable () {
const { header } = this
this.blockTable = await this._read(
header.tableOffset,
header.maxTableEntries * VHD_ENTRY_SIZE
)
}
// return the first sector (bitmap) of a block
_getBatEntry (block) {
return this.blockTable.readUInt32BE(block * VHD_ENTRY_SIZE)
}
_readBlock (blockId, onlyBitmap = false) {
const blockAddr = this._getBatEntry(blockId)
if (blockAddr === BLOCK_UNUSED) {
throw new Error(`no such block ${blockId}`)
}
return this._read(
sectorsToBytes(blockAddr),
onlyBitmap ? this.bitmapSize : this.fullBlockSize
).then(
buf =>
onlyBitmap
? { id: blockId, bitmap: buf }
: {
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
)
}
// get the identifiers and first sectors of the first and last block
// in the file
//
_getFirstAndLastBlocks () {
const n = this.header.maxTableEntries
const bat = this.blockTable
let i = 0
let j = 0
let first, firstSector, last, lastSector
// get first allocated block for initialization
while ((firstSector = bat.readUInt32BE(j)) === BLOCK_UNUSED) {
i += 1
j += VHD_ENTRY_SIZE
if (i === n) {
const error = new Error('no allocated block found')
error.noBlock = true
throw error
}
}
lastSector = firstSector
first = last = i
while (i < n) {
const sector = bat.readUInt32BE(j)
if (sector !== BLOCK_UNUSED) {
if (sector < firstSector) {
first = i
firstSector = sector
} else if (sector > lastSector) {
last = i
lastSector = sector
}
}
i += 1
j += VHD_ENTRY_SIZE
}
return { first, firstSector, last, lastSector }
}
// =================================================================
// Write functions.
// =================================================================
// Write a buffer/stream at a given position in a vhd file.
async _write (data, offset) {
debug(
`_write offset=${offset} size=${
Buffer.isBuffer(data) ? data.length : '???'
}`
)
// TODO: could probably be merged in remote handlers.
const stream = await this._handler.createOutputStream(this._path, {
flags: 'r+',
start: offset,
})
return Buffer.isBuffer(data)
? new Promise((resolve, reject) => {
stream.on('error', reject)
stream.end(data, resolve)
})
: fromEvent(data.pipe(stream), 'finish')
}
async _freeFirstBlockSpace (spaceNeededBytes) {
try {
const { first, firstSector, lastSector } = this._getFirstAndLastBlocks()
const tableOffset = this.header.tableOffset
const { batSize } = this
const newMinSector = Math.ceil(
(tableOffset + batSize + spaceNeededBytes) / VHD_SECTOR_SIZE
)
if (
tableOffset + batSize + spaceNeededBytes >=
sectorsToBytes(firstSector)
) {
const { fullBlockSize } = this
const newFirstSector = Math.max(
lastSector + fullBlockSize / VHD_SECTOR_SIZE,
newMinSector
)
debug(
`freeFirstBlockSpace: move first block ${firstSector} -> ${newFirstSector}`
)
// copy the first block at the end
const stream = await this._readStream(
sectorsToBytes(firstSector),
fullBlockSize
)
await this._write(stream, sectorsToBytes(newFirstSector))
await this._setBatEntry(first, newFirstSector)
await this.writeFooter(true)
spaceNeededBytes -= this.fullBlockSize
if (spaceNeededBytes > 0) {
return this._freeFirstBlockSpace(spaceNeededBytes)
}
}
} catch (e) {
if (!e.noBlock) {
throw e
}
}
}
async ensureBatSize (entries) {
const { header } = this
const prevMaxTableEntries = header.maxTableEntries
if (prevMaxTableEntries >= entries) {
return
}
const newBatSize = computeBatSize(entries)
await this._freeFirstBlockSpace(newBatSize - this.batSize)
const maxTableEntries = (header.maxTableEntries = entries)
const prevBat = this.blockTable
const bat = (this.blockTable = Buffer.allocUnsafe(newBatSize))
prevBat.copy(bat)
bat.fill(BUF_BLOCK_UNUSED, prevMaxTableEntries * VHD_ENTRY_SIZE)
debug(
`ensureBatSize: extend BAT ${prevMaxTableEntries} -> ${maxTableEntries}`
)
await this._write(
constantStream(BUF_BLOCK_UNUSED, maxTableEntries - prevMaxTableEntries),
header.tableOffset + prevBat.length
)
await this.writeHeader()
}
// set the first sector (bitmap) of a block
_setBatEntry (block, blockSector) {
const i = block * VHD_ENTRY_SIZE
const { blockTable } = this
blockTable.writeUInt32BE(blockSector, i)
return this._write(
blockTable.slice(i, i + VHD_ENTRY_SIZE),
this.header.tableOffset + i
)
}
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async createBlock (blockId) {
const blockAddr = Math.ceil(this.getEndOfData() / VHD_SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(
constantStream([0], this.fullBlockSize),
sectorsToBytes(blockAddr)
),
this._setBatEntry(blockId, blockAddr),
])
return blockAddr
}
// Write a bitmap at a block address.
async writeBlockBitmap (blockAddr, bitmap) {
const { bitmapSize } = this
if (bitmap.length !== bitmapSize) {
throw new Error(`Bitmap length is not correct ! ${bitmap.length}`)
}
const offset = sectorsToBytes(blockAddr)
debug(
`Write bitmap at: ${offset}. (size=${bitmapSize}, data=${bitmap.toString(
'hex'
)})`
)
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async writeEntireBlock (block) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id)
}
await this._write(block.buffer, sectorsToBytes(blockAddr))
}
async writeBlockSectors (block, beginSectorId, endSectorId, parentBitmap) {
let blockAddr = this._getBatEntry(block.id)
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this.createBlock(block.id)
parentBitmap = Buffer.alloc(this.bitmapSize, 0)
} else if (parentBitmap === undefined) {
parentBitmap = (await this._readBlock(block.id, true)).bitmap
}
const offset = blockAddr + this.sectorsOfBitmap + beginSectorId
debug(
`writeBlockSectors at ${offset} block=${
block.id
}, sectors=${beginSectorId}...${endSectorId}`
)
for (let i = beginSectorId; i < endSectorId; ++i) {
mapSetBit(parentBitmap, i)
}
await this.writeBlockBitmap(blockAddr, parentBitmap)
await this._write(
block.data.slice(
sectorsToBytes(beginSectorId),
sectorsToBytes(endSectorId)
),
sectorsToBytes(offset)
)
}
async coalesceBlock (child, blockId) {
const block = await child._readBlock(blockId)
const { bitmap, data } = block
debug(`coalesceBlock block=${blockId}`)
// For each sector of block data...
const { sectorsPerBlock } = child
for (let i = 0; i < sectorsPerBlock; i++) {
// If no changes on one sector, skip.
if (!mapTestBit(bitmap, i)) {
continue
}
let parentBitmap = null
let endSector = i + 1
// Count changed sectors.
while (endSector < sectorsPerBlock && mapTestBit(bitmap, endSector)) {
++endSector
}
// Write n sectors into parent.
debug(`coalesceBlock: write sectors=${i}...${endSector}`)
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this.writeEntireBlock(block)
} else {
if (parentBitmap === null) {
parentBitmap = (await this._readBlock(blockId, true)).bitmap
}
await this.writeBlockSectors(block, i, endSector, parentBitmap)
}
i = endSector
}
// Return the merged data size
return data.length
}
// Write a context footer. (At the end and beginning of a vhd file.)
async writeFooter (onlyEndFooter = false) {
const { footer } = this
const rawFooter = fuFooter.pack(footer)
const eof = await this._handler.getSize(this._path)
// sometimes the file is longer than anticipated, we still need to put the footer at the end
const offset = Math.max(this.getEndOfData(), eof - rawFooter.length)
footer.checksum = checksumStruct(rawFooter, fuFooter)
debug(
`Write footer at: ${offset} (checksum=${
footer.checksum
}). (data=${rawFooter.toString('hex')})`
)
if (!onlyEndFooter) {
await this._write(rawFooter, 0)
}
await this._write(rawFooter, offset)
}
writeHeader () {
const { header } = this
const rawHeader = fuHeader.pack(header)
header.checksum = checksumStruct(rawHeader, fuHeader)
const offset = VHD_FOOTER_SIZE
debug(
`Write header at: ${offset} (checksum=${
header.checksum
}). (data=${rawHeader.toString('hex')})`
)
return this._write(rawHeader, offset)
}
async writeData (offsetSectors, buffer) {
const bufferSizeSectors = Math.ceil(buffer.length / VHD_SECTOR_SIZE)
const startBlock = Math.floor(offsetSectors / this.sectorsPerBlock)
const endBufferSectors = offsetSectors + bufferSizeSectors
const lastBlock = Math.ceil(endBufferSectors / this.sectorsPerBlock) - 1
await this.ensureBatSize(lastBlock)
const blockSizeBytes = this.sectorsPerBlock * VHD_SECTOR_SIZE
const coversWholeBlock = (offsetInBlockSectors, endInBlockSectors) =>
offsetInBlockSectors === 0 && endInBlockSectors === this.sectorsPerBlock
for (
let currentBlock = startBlock;
currentBlock <= lastBlock;
currentBlock++
) {
const offsetInBlockSectors = Math.max(
0,
offsetSectors - currentBlock * this.sectorsPerBlock
)
const endInBlockSectors = Math.min(
endBufferSectors - currentBlock * this.sectorsPerBlock,
this.sectorsPerBlock
)
const startInBuffer = Math.max(
0,
(currentBlock * this.sectorsPerBlock - offsetSectors) * VHD_SECTOR_SIZE
)
const endInBuffer = Math.min(
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
VHD_SECTOR_SIZE,
buffer.length
)
let inputBuffer
if (coversWholeBlock(offsetInBlockSectors, endInBlockSectors)) {
inputBuffer = buffer.slice(startInBuffer, endInBuffer)
} else {
inputBuffer = Buffer.alloc(blockSizeBytes, 0)
buffer.copy(
inputBuffer,
offsetInBlockSectors * VHD_SECTOR_SIZE,
startInBuffer,
endInBuffer
)
}
await this.writeBlockSectors(
{ id: currentBlock, data: inputBuffer },
offsetInBlockSectors,
endInBlockSectors
)
}
await this.writeFooter()
}
async ensureSpaceForParentLocators (neededSectors) {
const firstLocatorOffset = VHD_FOOTER_SIZE + VHD_HEADER_SIZE
const currentSpace =
Math.floor(this.header.tableOffset / VHD_SECTOR_SIZE) -
firstLocatorOffset / VHD_SECTOR_SIZE
if (currentSpace < neededSectors) {
const deltaSectors = neededSectors - currentSpace
await this._freeFirstBlockSpace(sectorsToBytes(deltaSectors))
this.header.tableOffset += sectorsToBytes(deltaSectors)
await this._write(this.blockTable, this.header.tableOffset)
}
return firstLocatorOffset
}
}
// Merge vhd child into vhd parent.
//
// Child must be a delta backup !
// Parent must be a full backup !
//
// TODO: update the identifier of the parent VHD.
export default concurrency(2)(async function vhdMerge (
parentHandler,
parentPath,
childHandler,
childPath
) {
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
const childFd = await childHandler.openFile(childPath, 'r')
try {
const childVhd = new Vhd(childHandler, childFd)
// Reading footer and header.
await Promise.all([
parentVhd.readHeaderAndFooter(),
childVhd.readHeaderAndFooter(),
])
assert(childVhd.header.blockSize === parentVhd.header.blockSize)
// Child must be a delta.
if (childVhd.footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
throw new Error('Unable to merge, child is not a delta backup.')
}
// Allocation table map is not yet implemented.
if (
parentVhd.hasBlockAllocationTableMap() ||
childVhd.hasBlockAllocationTableMap()
) {
throw new Error('Unsupported allocation table map.')
}
// Read allocation table of child/parent.
await Promise.all([parentVhd.readBlockTable(), childVhd.readBlockTable()])
await parentVhd.ensureBatSize(childVhd.header.maxTableEntries)
let mergedDataSize = 0
for (
let blockId = 0;
blockId < childVhd.header.maxTableEntries;
blockId++
) {
if (childVhd.containsBlock(blockId)) {
mergedDataSize += await parentVhd.coalesceBlock(childVhd, blockId)
}
}
const cFooter = childVhd.footer
const pFooter = parentVhd.footer
pFooter.currentSize = cFooter.currentSize
pFooter.diskGeometry = { ...cFooter.diskGeometry }
pFooter.originalSize = cFooter.originalSize
pFooter.timestamp = cFooter.timestamp
pFooter.uuid = cFooter.uuid
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
}
} finally {
await parentHandler.closeFile(parentFd)
}
})
// returns true if the child was actually modified
export async function chainVhd (
parentHandler,
parentPath,
childHandler,
childPath,
force = false
) {
const parentVhd = new Vhd(parentHandler, parentPath)
const childVhd = new Vhd(childHandler, childPath)
await childVhd.readHeaderAndFooter()
const { header, footer } = childVhd
if (footer.diskType !== HARD_DISK_TYPE_DIFFERENCING) {
if (!force) {
throw new Error('cannot chain disk of type ' + footer.diskType)
}
footer.diskType = HARD_DISK_TYPE_DIFFERENCING
}
await Promise.all([
childVhd.readBlockTable(),
parentVhd.readHeaderAndFooter(),
])
const parentName = relative(dirname(childPath), parentPath)
header.parentUuid = parentVhd.footer.uuid
header.parentUnicodeName = parentName
header.parentLocatorEntry[0].platformCode = PLATFORM_W2KU
const encodedFilename = Buffer.from(parentName, 'utf16le')
const dataSpaceSectors = Math.ceil(encodedFilename.length / VHD_SECTOR_SIZE)
const position = await childVhd.ensureSpaceForParentLocators(dataSpaceSectors)
await childVhd._write(encodedFilename, position)
header.parentLocatorEntry[0].platformDataSpace = sectorsToBytes(
dataSpaceSectors
)
header.parentLocatorEntry[0].platformDataLength = encodedFilename.length
header.parentLocatorEntry[0].platformDataOffset = position
for (let i = 1; i < 8; i++) {
header.parentLocatorEntry[i].platformCode = VHD_PLATFORM_CODE_NONE
header.parentLocatorEntry[i].platformDataSpace = 0
header.parentLocatorEntry[i].platformDataLength = 0
header.parentLocatorEntry[i].platformDataOffset = 0
}
await childVhd.writeHeader()
await childVhd.writeFooter()
return true
}
export const createReadStream = asyncIteratorToStream(function * (handler, path) {
const fds = []
try {
const vhds = []
while (true) {
const fd = yield handler.openFile(path, 'r')
fds.push(fd)
const vhd = new Vhd(handler, fd)
vhds.push(vhd)
yield vhd.readHeaderAndFooter()
yield vhd.readBlockTable()
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
break
}
path = resolveRelativeFromFile(path, vhd.header.parentUnicodeName)
}
const nVhds = vhds.length
// this the VHD we want to synthetize
const vhd = vhds[0]
// data of our synthetic VHD
// TODO: empty parentUuid and parentLocatorEntry-s in header
let header = {
...vhd.header,
tableOffset: 512 + 1024,
parentUnicodeName: '',
}
const bat = Buffer.allocUnsafe(
Math.ceil(4 * header.maxTableEntries / VHD_SECTOR_SIZE) * VHD_SECTOR_SIZE
)
let footer = {
...vhd.footer,
diskType: HARD_DISK_TYPE_DYNAMIC,
}
const sectorsPerBlockData = vhd.sectorsPerBlock
const sectorsPerBlock =
sectorsPerBlockData + vhd.bitmapSize / VHD_SECTOR_SIZE
const nBlocks = Math.ceil(footer.currentSize / header.blockSize)
const blocksOwner = new Array(nBlocks)
for (
let iBlock = 0,
blockOffset = Math.ceil((512 + 1024 + bat.length) / VHD_SECTOR_SIZE);
iBlock < nBlocks;
++iBlock
) {
let blockSector = BLOCK_UNUSED
for (let i = 0; i < nVhds; ++i) {
if (vhds[i].containsBlock(iBlock)) {
blocksOwner[iBlock] = i
blockSector = blockOffset
blockOffset += sectorsPerBlock
break
}
}
bat.writeUInt32BE(blockSector, iBlock * 4)
}
footer = fuFooter.pack(footer)
checksumStruct(footer, fuFooter)
yield footer
header = fuHeader.pack(header)
checksumStruct(header, fuHeader)
yield header
yield bat
const bitmap = Buffer.alloc(vhd.bitmapSize, 0xff)
for (let iBlock = 0; iBlock < nBlocks; ++iBlock) {
const owner = blocksOwner[iBlock]
if (owner === undefined) {
continue
}
yield bitmap
const blocksByVhd = new Map()
const emitBlockSectors = function * (iVhd, i, n) {
const vhd = vhds[iVhd]
if (!vhd.containsBlock(iBlock)) {
yield * emitBlockSectors(iVhd + 1, i, n)
return
}
let block = blocksByVhd.get(vhd)
if (block === undefined) {
block = yield vhd._readBlock(iBlock)
blocksByVhd.set(vhd, block)
}
const { bitmap, data } = block
if (vhd.footer.diskType === HARD_DISK_TYPE_DYNAMIC) {
yield data.slice(i * VHD_SECTOR_SIZE, n * VHD_SECTOR_SIZE)
return
}
while (i < n) {
const hasData = mapTestBit(bitmap, i)
const start = i
do {
++i
} while (i < n && mapTestBit(bitmap, i) === hasData)
if (hasData) {
yield data.slice(start * VHD_SECTOR_SIZE, i * VHD_SECTOR_SIZE)
} else {
yield * emitBlockSectors(iVhd + 1, start, i)
}
}
}
yield * emitBlockSectors(owner, 0, sectorsPerBlock)
}
yield footer
} finally {
for (let i = 0, n = fds.length; i < n; ++i) {
handler.closeFile(fds[i]).catch(error => {
console.warn('createReadStream, closeFd', i, error)
})
}
}
})
export async function readVhdMetadata (handler: RemoteHandler, path: string) {
const vhd = new Vhd(handler, path)
await vhd.readHeaderAndFooter()
return {
footer: vhd.footer,
header: vhd.header,
}
}

View File

@@ -1,72 +0,0 @@
import execa from 'execa'
import vhdMerge, { chainVhd, Vhd } from './vhd-merge'
import LocalHandler from './remote-handlers/local.js'
async function testVhdMerge () {
console.log('before merge')
const moOfRandom = 4
await execa('bash', [
'-c',
`head -c ${moOfRandom}M < /dev/urandom >randomfile`,
])
await execa('bash', [
'-c',
`head -c ${moOfRandom / 2}M < /dev/urandom >small_randomfile`,
])
await execa('qemu-img', [
'convert',
'-f',
'raw',
'-Ovpc',
'randomfile',
'randomfile.vhd',
])
await execa('vhd-util', ['check', '-t', '-n', 'randomfile.vhd'])
await execa('vhd-util', ['create', '-s', moOfRandom, '-n', 'empty.vhd'])
// await execa('vhd-util', ['snapshot', '-n', 'randomfile_delta.vhd', '-p', 'randomfile.vhd'])
const handler = new LocalHandler({ url: 'file://' + process.cwd() })
const originalSize = await handler._getSize('randomfile')
await chainVhd(handler, 'empty.vhd', handler, 'randomfile.vhd')
const childVhd = new Vhd(handler, 'randomfile.vhd')
console.log('changing type')
await childVhd.readHeaderAndFooter()
console.log('child vhd', childVhd.footer.currentSize, originalSize)
await childVhd.readBlockTable()
childVhd.footer.diskType = 4 // Delta backup.
await childVhd.writeFooter()
console.log('chained')
await vhdMerge(handler, 'empty.vhd', handler, 'randomfile.vhd')
console.log('merged')
const parentVhd = new Vhd(handler, 'empty.vhd')
await parentVhd.readHeaderAndFooter()
console.log('parent vhd', parentVhd.footer.currentSize)
await execa('qemu-img', [
'convert',
'-f',
'vpc',
'-Oraw',
'empty.vhd',
'recovered',
])
await execa('truncate', ['-s', originalSize, 'recovered'])
console.log('ls', (await execa('ls', ['-lt'])).stdout)
console.log(
'diff',
(await execa('diff', ['-q', 'randomfile', 'recovered'])).stdout
)
/* const vhd = new Vhd(handler, 'randomfile_delta.vhd')
await vhd.readHeaderAndFooter()
await vhd.readBlockTable()
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
await vhd.ensureBatSize(300)
console.log('vhd.header.maxTableEntries', vhd.header.maxTableEntries)
*/
console.log(await handler.list())
console.log('lol')
}
export { testVhdMerge as default }

View File

@@ -146,6 +146,7 @@ const TRANSFORMS = {
license_params: obj.license_params,
license_server: obj.license_server,
license_expiry: toTimestamp(obj.license_params.expiry),
logging: obj.logging,
name_description: obj.name_description,
name_label: obj.name_label,
memory: (function () {
@@ -186,9 +187,14 @@ const TRANSFORMS = {
}
}),
agentStartTime: toTimestamp(otherConfig.agent_start_time),
rebootRequired: !isEmpty(obj.updates_requiring_reboot),
rebootRequired:
softwareVersion.product_brand === 'XCP-ng'
? toTimestamp(otherConfig.boot_time) <
+otherConfig.rpm_patch_installation_time
: !isEmpty(obj.updates_requiring_reboot),
tags: obj.tags,
version: softwareVersion.product_version,
productBrand: softwareVersion.product_brand,
// TODO: dedupe.
PIFs: link(obj, 'PIFs'),
@@ -227,15 +233,20 @@ const TRANSFORMS = {
return
}
if (!guestMetrics) {
if (guestMetrics === undefined) {
return false
}
const { major, minor } = guestMetrics.PV_drivers_version
if (major === undefined || minor === undefined) {
return false
}
return {
major,
minor,
major: +major,
minor: +minor,
version: +`${major}.${minor}`,
}
})()
@@ -292,8 +303,7 @@ const TRANSFORMS = {
}
})(),
// TODO: there is two possible value: "best-effort" and "restart"
high_availability: Boolean(obj.ha_restart_priority),
high_availability: obj.ha_restart_priority,
memory: (function () {
const dynamicMin = +obj.memory_dynamic_min
@@ -327,6 +337,7 @@ const TRANSFORMS = {
other: otherConfig,
os_version: (guestMetrics && guestMetrics.os_version) || null,
power_state: obj.power_state,
hasVendorDevice: obj.has_vendor_device,
resourceSet,
snapshots: link(obj, 'snapshots'),
startTime: metrics && toTimestamp(metrics.start_time),
@@ -510,9 +521,7 @@ const TRANSFORMS = {
vdi.type += '-snapshot'
vdi.snapshot_time = toTimestamp(obj.snapshot_time)
vdi.$snapshot_of = link(obj, 'snapshot_of')
}
if (!obj.managed) {
} else if (!obj.managed) {
vdi.type += '-unmanaged'
}

View File

@@ -1,7 +1,19 @@
import JSON5 from 'json5'
import limitConcurrency from 'limit-concurrency-decorator'
import { BaseError } from 'make-error'
import { endsWith, findKey, forEach, get, identity, map } from 'lodash'
import {
endsWith,
findKey,
forEach,
get,
identity,
map,
mapValues,
mean,
sum,
uniq,
zipWith,
} from 'lodash'
import { parseDateTime } from './xapi'
@@ -62,6 +74,9 @@ const computeValues = (dataRow, legendIndex, transformValue = identity) =>
transformValue(convertNanToNull(values[legendIndex]))
)
const combineStats = (stats, path, combineValues) =>
zipWith(...map(stats, path), (...values) => combineValues(values))
// It browse the object in depth and initialise it's properties
// The targerPath can be a string or an array containing the depth
// targetPath: [a, b, c] => a.b.c
@@ -141,6 +156,45 @@ const STATS = {
getPath: matches => ['pifs', 'tx', matches[1]],
},
},
iops: {
r: {
test: /^iops_read_(\w+)$/,
getPath: matches => ['iops', 'r', matches[1]],
},
w: {
test: /^iops_write_(\w+)$/,
getPath: matches => ['iops', 'w', matches[1]],
},
},
ioThroughput: {
r: {
test: /^io_throughput_read_(\w+)$/,
getPath: matches => ['ioThroughput', 'r', matches[1]],
transformValue: value => value * 2 ** 20,
},
w: {
test: /^io_throughput_write_(\w+)$/,
getPath: matches => ['ioThroughput', 'w', matches[1]],
transformValue: value => value * 2 ** 20,
},
},
latency: {
r: {
test: /^read_latency_(\w+)$/,
getPath: matches => ['latency', 'r', matches[1]],
transformValue: value => value / 1e3,
},
w: {
test: /^write_latency_(\w+)$/,
getPath: matches => ['latency', 'w', matches[1]],
transformValue: value => value / 1e3,
},
},
iowait: {
test: /^iowait_(\w+)$/,
getPath: matches => ['iowait', matches[1]],
transformValue: value => value * 1e2,
},
},
vm: {
memoryFree: {
@@ -361,4 +415,47 @@ export default class XapiStats {
granularity,
})
}
async getSrStats (xapi, srId, granularity) {
const sr = xapi.getObject(srId)
const hostsStats = {}
await Promise.all(
map(uniq(map(sr.$PBDs, 'host')), hostId =>
this.getHostStats(xapi, hostId, granularity).then(stats => {
hostsStats[xapi.getObject(hostId).name_label] = stats
})
)
)
const srShortUUID = sr.uuid.slice(0, 8)
return {
interval: hostsStats[Object.keys(hostsStats)[0]].interval,
endTimestamp: Math.max(...map(hostsStats, 'endTimestamp')),
localTimestamp: Math.min(...map(hostsStats, 'localTimestamp')),
stats: {
iops: {
r: combineStats(hostsStats, `stats.iops.r[${srShortUUID}]`, sum),
w: combineStats(hostsStats, `stats.iops.w[${srShortUUID}]`, sum),
},
ioThroughput: {
r: combineStats(
hostsStats,
`stats.ioThroughput.r[${srShortUUID}]`,
sum
),
w: combineStats(
hostsStats,
`stats.ioThroughput.w[${srShortUUID}]`,
sum
),
},
latency: {
r: combineStats(hostsStats, `stats.latency.r[${srShortUUID}]`, mean),
w: combineStats(hostsStats, `stats.latency.w[${srShortUUID}]`, mean),
},
iowait: mapValues(hostsStats, `stats.iowait[${srShortUUID}]`),
},
}
}
}

View File

@@ -70,7 +70,7 @@ import {
// ===================================================================
const TAG_BASE_DELTA = 'xo:base_delta'
const TAG_COPY_SRC = 'xo:copy_of'
export const TAG_COPY_SRC = 'xo:copy_of'
// ===================================================================
@@ -426,6 +426,14 @@ export default class Xapi extends XapiBase {
await this.call('host.restart_agent', this.getObject(hostId).$ref)
}
async setRemoteSyslogHost (hostId, syslogDestination) {
const host = this.getObject(hostId)
await this.call('host.set_logging', host.$ref, {
syslog_destination: syslogDestination,
})
await this.call('host.syslog_reconfigure', host.$ref)
}
async shutdownHost (hostId, force = false) {
const host = this.getObject(hostId)
@@ -656,7 +664,7 @@ export default class Xapi extends XapiBase {
}
// ensure the vm record is up-to-date
vm = await this.barrier('VM', $ref)
vm = await this.barrier($ref)
return Promise.all([
forceDeleteDefaultTemplate &&
@@ -816,12 +824,14 @@ export default class Xapi extends XapiBase {
} = {}
): Promise<DeltaVmExport> {
let vm = this.getObject(vmId)
if (!bypassVdiChainsCheck) {
this._assertHealthyVdiChains(vm)
}
// do not use the snapshot name in the delta export
const exportedNameLabel = vm.name_label
if (!vm.is_a_snapshot) {
if (!bypassVdiChainsCheck) {
this._assertHealthyVdiChains(vm)
}
vm = await this._snapshotVm($cancelToken, vm, snapshotNameLabel)
$defer.onFailure(() => this._deleteVm(vm))
}
@@ -958,7 +968,9 @@ export default class Xapi extends XapiBase {
)
if (!baseVm) {
throw new Error('could not find the base VM')
throw new Error(
`could not find the base VM (copy of ${remoteBaseVmUuid})`
)
}
}
}
@@ -994,11 +1006,29 @@ export default class Xapi extends XapiBase {
// 2. Delete all VBDs which may have been created by the import.
await asyncMap(vm.$VBDs, vbd => this._deleteVbd(vbd))::ignoreErrors()
// 3. Create VDIs.
const newVdis = await map(delta.vdis, async vdi => {
// 3. Create VDIs & VBDs.
const vbds = groupBy(delta.vbds, 'VDI')
const newVdis = await map(delta.vdis, async (vdi, vdiId) => {
let newVdi
const remoteBaseVdiUuid = detectBase && vdi.other_config[TAG_BASE_DELTA]
if (!remoteBaseVdiUuid) {
const newVdi = await this.createVdi({
if (remoteBaseVdiUuid) {
const baseVdi = find(
baseVdis,
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => this._deleteVdi(newVdi))
await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid,
})
} else {
newVdi = await this.createVdi({
...vdi,
other_config: {
...vdi.other_config,
@@ -1008,24 +1038,15 @@ export default class Xapi extends XapiBase {
sr: mapVdisSrs[vdi.uuid] || srId,
})
$defer.onFailure(() => this._deleteVdi(newVdi))
return newVdi
}
const baseVdi = find(
baseVdis,
vdi => vdi.other_config[TAG_COPY_SRC] === remoteBaseVdiUuid
await asyncMap(vbds[vdiId], vbd =>
this.createVbd({
...vbd,
vdi: newVdi,
vm,
})
)
if (!baseVdi) {
throw new Error(`missing base VDI (copy of ${remoteBaseVdiUuid})`)
}
const newVdi = await this._getOrWaitObject(await this._cloneVdi(baseVdi))
$defer.onFailure(() => this._deleteVdi(newVdi))
await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid,
})
return newVdi
})::pAll()
@@ -1051,15 +1072,6 @@ export default class Xapi extends XapiBase {
let transferSize = 0
await Promise.all([
// Create VBDs.
asyncMap(delta.vbds, vbd =>
this.createVbd({
...vbd,
vdi: newVdis[vbd.VDI],
vm,
})
),
// Import VDI contents.
asyncMap(newVdis, async (vdi, id) => {
for (let stream of ensureArray(streams[`${id}.vhd`])) {
@@ -1071,7 +1083,7 @@ export default class Xapi extends XapiBase {
.once('finish', () => {
transferSize += sizeStream.size
})
stream.task = sizeStream.task
sizeStream.task = stream.task
await this._importVdiContent(vdi, sizeStream, VDI_FORMAT_VHD)
}
}),
@@ -1142,7 +1154,9 @@ export default class Xapi extends XapiBase {
vdis[vdi.$ref] =
mapVdisSrs && mapVdisSrs[vdi.$id]
? hostXapi.getObject(mapVdisSrs[vdi.$id]).$ref
: sr !== undefined ? hostXapi.getObject(sr).$ref : defaultSr.$ref // Will error if there are no default SR.
: sr !== undefined
? hostXapi.getObject(sr).$ref
: defaultSr.$ref // Will error if there are no default SR.
}
}
@@ -1314,7 +1328,7 @@ export default class Xapi extends XapiBase {
async _importOvaVm (
$defer,
stream,
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus },
{ descriptionLabel, disks, memory, nameLabel, networks, nCpus, tables },
sr
) {
// 1. Create VM.
@@ -1387,8 +1401,9 @@ export default class Xapi extends XapiBase {
return
}
const vhdStream = await vmdkToVhd(stream)
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_RAW)
const table = tables[entry.name]
const vhdStream = await vmdkToVhd(stream, table)
await this._importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
// See: https://github.com/mafintosh/tar-stream#extracting
// No import parallelization.

View File

@@ -35,9 +35,24 @@ declare class XapiObject {
}
type Id = string | XapiObject
declare export class Vbd extends XapiObject {
type: string;
VDI: string;
}
declare export class Vdi extends XapiObject {
$snapshot_of: Vdi;
uuid: string;
}
declare export class Vm extends XapiObject {
$snapshots: Vm[];
$VBDs: Vbd[];
is_a_snapshot: boolean;
is_a_template: boolean;
name_label: string;
power_state: 'Running' | 'Halted' | 'Paused' | 'Suspended';
other_config: $Dict<string>;
snapshot_time: number;
uuid: string;
@@ -56,7 +71,7 @@ declare export class Xapi {
_updateObjectMapProperty(
object: XapiObject,
property: string,
entries: $Dict<string>
entries: $Dict<null | string>
): Promise<void>;
_setObjectProperties(
object: XapiObject,
@@ -65,21 +80,24 @@ declare export class Xapi {
_snapshotVm(cancelToken: mixed, vm: Vm, nameLabel?: string): Promise<Vm>;
addTag(object: Id, tag: string): Promise<void>;
barrier(): void;
barrier(ref: string): XapiObject;
barrier(): Promise<void>;
barrier(ref: string): Promise<XapiObject>;
deleteVm(vm: Id): Promise<void>;
editVm(vm: Id, $Dict<mixed>): Promise<void>;
exportDeltaVm(
cancelToken: mixed,
snapshot: Id,
baseSnapshot ?: Id
): Promise<DeltaVmExport>;
exportVm(
cancelToken: mixed,
vm: Vm,
options ?: Object
): Promise<AugmentedReadable>;
getObject(object: Id): XapiObject;
importDeltaVm(data: DeltaVmImport, options: Object): Promise<{ vm: Vm }>;
importVm(stream: AugmentedReadable, options: Object): Promise<Vm>;
exportDeltaVm(
cancelToken: mixed,
snapshot: Id,
baseSnapshot ?: Id,
opts?: { fullVdisRequired?: string[] }
): Promise<DeltaVmExport>;
exportVm(
cancelToken: mixed,
vm: Vm,
options ?: Object
): Promise<AugmentedReadable>;
getObject(object: Id): XapiObject;
importDeltaVm(data: DeltaVmImport, options: Object): Promise<{ vm: Vm }>;
importVm(stream: AugmentedReadable, options: Object): Promise<Vm>;
shutdownVm(object: Id): Promise<void>;
startVm(object: Id): Promise<void>;
}

View File

@@ -1,9 +1,10 @@
import deferrable from 'golike-defer'
import every from 'lodash/every'
import find from 'lodash/find'
import filter from 'lodash/filter'
import find from 'lodash/find'
import includes from 'lodash/includes'
import isObject from 'lodash/isObject'
import pickBy from 'lodash/pickBy'
import some from 'lodash/some'
import sortBy from 'lodash/sortBy'
import assign from 'lodash/assign'
@@ -11,6 +12,7 @@ import unzip from 'julien-f-unzip'
import { debounce } from '../../decorators'
import {
asyncMap,
ensureArray,
forEach,
mapFilter,
@@ -149,9 +151,12 @@ export default {
},
async listMissingPoolPatchesOnHost (hostId) {
const host = this.getObject(hostId)
// Returns an array to not break compatibility.
return mapToArray(
await this._listMissingPoolPatchesOnHost(this.getObject(hostId))
await (host.software_version.product_brand === 'XCP-ng'
? this._xcpListHostUpdates(host)
: this._listMissingPoolPatchesOnHost(host))
)
},
@@ -440,15 +445,21 @@ export default {
},
async installAllPoolPatchesOnHost (hostId) {
let host = this.getObject(hostId)
const host = this.getObject(hostId)
if (host.software_version.product_brand === 'XCP-ng') {
return this._xcpInstallHostUpdates(host)
}
return this._installAllPoolPatchesOnHost(host)
},
async _installAllPoolPatchesOnHost (host) {
const installableByUuid =
host.license_params.sku_type !== 'free'
? await this._listMissingPoolPatchesOnHost(host)
: filter(await this._listMissingPoolPatchesOnHost(host), {
paid: false,
upgrade: false,
})
: pickBy(await this._listMissingPoolPatchesOnHost(host), {
paid: false,
upgrade: false,
})
// List of all installable patches sorted from the newest to the
// oldest.
@@ -479,6 +490,13 @@ export default {
},
async installAllPoolPatchesOnAllHosts () {
if (this.pool.$master.software_version.product_brand === 'XCP-ng') {
return this._xcpInstallAllPoolUpdatesOnHost()
}
return this._installAllPoolPatchesOnAllHosts()
},
async _installAllPoolPatchesOnAllHosts () {
const installableByUuid = assign(
{},
...(await Promise.all(
@@ -488,7 +506,7 @@ export default {
patches =>
host.license_params.sku_type !== 'free'
? patches
: filter(patches, { paid: false, upgrade: false })
: pickBy(patches, { paid: false, upgrade: false })
)
}
})
@@ -518,4 +536,47 @@ export default {
})
}
},
// ----------------------------------
// XCP-ng dedicated zone for patching
// ----------------------------------
// list all yum updates available for a XCP-ng host
async _xcpListHostUpdates (host) {
return JSON.parse(
await this.call(
'host.call_plugin',
host.$ref,
'updater.py',
'check_update',
{}
)
)
},
// install all yum updates for a XCP-ng host
async _xcpInstallHostUpdates (host) {
const update = await this.call(
'host.call_plugin',
host.$ref,
'updater.py',
'update',
{}
)
if (JSON.parse(update).exit !== 0) {
throw new Error('Update install failed')
} else {
await this._updateObjectMapProperty(host, 'other_config', {
rpm_patch_installation_time: String(Date.now() / 1000),
})
}
},
// install all yum updates for all XCP-ng hosts in a give pool
async _xcpInstallAllPoolUpdatesOnHost () {
await asyncMap(filter(this.objects.all, { $type: 'host' }), host =>
this._xcpInstallHostUpdates(host)
)
},
}

View File

@@ -1,6 +1,6 @@
import deferrable from 'golike-defer'
import { catchPlus as pCatch, ignoreErrors } from 'promise-toolbox'
import { find, gte, includes, isEmpty, lte } from 'lodash'
import { find, gte, includes, isEmpty, lte, noop } from 'lodash'
import { forEach, mapToArray, parseSize } from '../../utils'
@@ -204,7 +204,7 @@ export default {
if (cloudConfig != null) {
// Refresh the record.
await this.barrier('VM', vm.$ref)
await this.barrier(vm.$ref)
vm = this.getObjectByRef(vm.$ref)
// Find the SR of the first VDI.
@@ -224,7 +224,7 @@ export default {
}
// wait for the record with all the VBDs and VIFs
return this.barrier('VM', vm.$ref)
return this.barrier(vm.$ref)
},
// High level method to edit a VM.
@@ -310,11 +310,7 @@ export default {
highAvailability: {
set (ha, vm) {
return this.call(
'VM.set_ha_restart_priority',
vm.$ref,
ha ? 'restart' : ''
)
return this.call('VM.set_ha_restart_priority', vm.$ref, ha)
},
},
@@ -384,6 +380,8 @@ export default {
tags: true,
hasVendorDevice: true,
vga: {
set (vga, vm) {
if (!includes(XEN_VGA_VALUES, vga)) {
@@ -431,4 +429,11 @@ export default {
// the force parameter is always true
return this.call('VM.resume', this.getObject(vmId).$ref, false, true)
},
shutdownVm (vmId, { hard = false } = {}) {
return this.call(
`VM.${hard ? 'hard' : 'clean'}_shutdown`,
this.getObject(vmId).$ref
).then(noop)
},
}

View File

@@ -0,0 +1,138 @@
import { forEach } from 'lodash'
const isSkippedError = error =>
error.message === 'no disks found' ||
error.message === 'no such object' ||
error.message === 'no VMs match this pattern' ||
error.message === 'unhealthy VDI chain'
const getStatus = (
error,
status = error === undefined ? 'success' : 'failure'
) => (status === 'failure' && isSkippedError(error) ? 'skipped' : status)
const computeStatusAndSortTasks = (status, tasks) => {
if (status === 'failure' || tasks === undefined) {
return status
}
for (let i = 0, n = tasks.length; i < n; ++i) {
const taskStatus = tasks[i].status
if (taskStatus === 'failure') {
return taskStatus
}
if (taskStatus === 'skipped') {
status = taskStatus
}
}
tasks.sort(taskTimeComparator)
return status
}
const taskTimeComparator = ({ start: s1, end: e1 }, { start: s2, end: e2 }) => {
if (e1 !== undefined) {
if (e2 !== undefined) {
// finished tasks are ordered by their end times
return e1 - e2
}
// finished task before unfinished tasks
return -1
} else if (e2 === undefined) {
// unfinished tasks are ordered by their start times
return s1 - s2
}
// unfinished task after finished tasks
return 1
}
export default {
async getBackupNgLogs (runId?: string) {
const { runningJobs } = this
const consolidated = {}
const started = {}
forEach(await this.getLogs('jobs'), ({ data, time, message }, id) => {
const { event } = data
if (event === 'job.start') {
if (
(data.type === 'backup' || data.key === undefined) &&
(runId === undefined || runId === id)
) {
const { scheduleId, jobId } = data
consolidated[id] = started[id] = {
data: data.data,
id,
jobId,
scheduleId,
start: time,
status: runningJobs[jobId] === id ? 'pending' : 'interrupted',
}
}
} else if (event === 'job.end') {
const { runJobId } = data
const log = started[runJobId]
if (log !== undefined) {
delete started[runJobId]
log.end = time
log.status = computeStatusAndSortTasks(
getStatus((log.result = data.error)),
log.tasks
)
}
} else if (event === 'task.start') {
const parent = started[data.parentId]
if (parent !== undefined) {
;(parent.tasks || (parent.tasks = [])).push(
(started[id] = {
data: data.data,
id,
message,
start: time,
status: parent.status,
})
)
}
} else if (event === 'task.end') {
const { taskId } = data
const log = started[taskId]
if (log !== undefined) {
// TODO: merge/transfer work-around
delete started[taskId]
log.end = time
log.status = computeStatusAndSortTasks(
getStatus((log.result = data.result), data.status),
log.tasks
)
}
} else if (event === 'jobCall.start') {
const parent = started[data.runJobId]
if (parent !== undefined) {
;(parent.tasks || (parent.tasks = [])).push(
(started[id] = {
data: {
type: 'VM',
id: data.params.id,
},
id,
start: time,
status: parent.status,
})
)
}
} else if (event === 'jobCall.end') {
const { runCallId } = data
const log = started[runCallId]
if (log !== undefined) {
delete started[runCallId]
log.end = time
log.status = computeStatusAndSortTasks(
getStatus((log.result = data.error)),
log.tasks
)
}
}
})
return runId === undefined ? consolidated : consolidated[runId]
},
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,6 @@ import {
endsWith,
filter,
find,
findIndex,
includes,
once,
range,
@@ -20,9 +19,13 @@ import {
startsWith,
trim,
} from 'lodash'
import {
chainVhd,
createSyntheticStream as createVhdReadStream,
mergeVhd,
} from 'vhd-lib'
import createSizeStream from '../size-stream'
import vhdMerge, { chainVhd } from '../vhd-merge'
import xapiObjectToXo from '../xapi-object-to-xo'
import { lvs, pvs } from '../lvm'
import {
@@ -138,7 +141,9 @@ const listPartitions = (() => {
valueTransform: (value, key) =>
key === 'start' || key === 'size'
? +value
: key === 'type' ? TYPES[+value] || value : value,
: key === 'type'
? TYPES[+value] || value
: value,
})
return device =>
@@ -551,7 +556,7 @@ export default class {
const backup = `${dir}/${backups[j]}`
try {
mergedDataSize += await vhdMerge(handler, parent, handler, backup)
mergedDataSize += await mergeVhd(handler, parent, handler, backup)
} catch (e) {
console.error('Unable to use vhd-util.', e)
throw e
@@ -566,33 +571,6 @@ export default class {
return mergedDataSize
}
async _listDeltaVdiDependencies (handler, filePath) {
const dir = dirname(filePath)
const filename = basename(filePath)
const backups = await this._listVdiBackups(handler, dir)
// Search file. (delta or full backup)
const i = findIndex(
backups,
backup => getVdiTimestamp(backup) === getVdiTimestamp(filename)
)
if (i === -1) {
throw new Error('VDI to import not found in this remote.')
}
// Search full backup.
let j
for (j = i; j >= 0 && isDeltaVdiBackup(backups[j]); j--);
if (j === -1) {
throw new Error(`Unable to found full vdi backup of: ${filePath}`)
}
return backups.slice(j, i + 1)
}
// -----------------------------------------------------------------
async _listDeltaVmBackups (handler, dir) {
@@ -840,17 +818,17 @@ export default class {
await Promise.all(
mapToArray(delta.vdis, async (vdi, id) => {
const vdisFolder = `${basePath}/${dirname(vdi.xoPath)}`
const backups = await this._listDeltaVdiDependencies(
handler,
`${basePath}/${vdi.xoPath}`
)
let path = `${basePath}/${vdi.xoPath}`
try {
await handler.getSize(path)
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
streams[`${id}.vhd`] = await Promise.all(
mapToArray(backups, async backup =>
handler.createReadStream(`${vdisFolder}/${backup}`)
)
)
path = path.replace(/_delta\.vhd$/, '_full.vhd')
}
streams[`${id}.vhd`] = await createVhdReadStream(handler, path)
})
)
@@ -927,6 +905,8 @@ export default class {
const xapi = this._xo.getXapi(vm)
vm = xapi.getObject(vm._xapiId)
xapi._assertHealthyVdiChains(vm)
const reg = new RegExp(
'^rollingSnapshot_[^_]+_' + escapeStringRegexp(tag) + '_'
)
@@ -1038,13 +1018,13 @@ export default class {
// VHD path may need to be fixed.
return endsWith(vhdPath, '_delta.vhd')
? pFromCallback(cb => stat(vhdPath, cb)).then(
() => vhdPath,
error => {
if (error && error.code === 'ENOENT') {
return `${vhdPath.slice(0, -10)}_full.vhd`
() => vhdPath,
error => {
if (error && error.code === 'ENOENT') {
return `${vhdPath.slice(0, -10)}_full.vhd`
}
}
}
)
)
: vhdPath
})
.then(vhdPath => execa('vhdimount', [vhdPath, mountDir]))

View File

@@ -0,0 +1,332 @@
import defer from 'golike-defer'
import execa from 'execa'
import splitLines from 'split-lines'
import { createParser as createPairsParser } from 'parse-pairs'
import { normalize } from 'path'
import { readdir, rmdir, stat } from 'fs-extra'
import { ZipFile } from 'yazl'
import { lvs, pvs } from '../lvm'
import { resolveSubpath, tmpDir } from '../utils'
const IGNORED_PARTITION_TYPES = {
// https://github.com/jhermsmeier/node-mbr/blob/master/lib/partition.js#L38
0x05: true,
0x0f: true,
0x15: true,
0x5e: true,
0x5f: true,
0x85: true,
0x91: true,
0x9b: true,
0xc5: true,
0xcf: true,
0xd5: true,
0x82: true, // swap
}
const PARTITION_TYPE_NAMES = {
0x07: 'NTFS',
0x0c: 'FAT',
0x83: 'linux',
}
const RE_VHDI = /^vhdi(\d+)$/
const parsePartxLine = createPairsParser({
keyTransform: key => (key === 'UUID' ? 'id' : key.toLowerCase()),
valueTransform: (value, key) =>
key === 'start' || key === 'size'
? +value
: key === 'type' ? PARTITION_TYPE_NAMES[+value] || value : value,
})
const listLvmLogicalVolumes = defer(
async ($defer, devicePath, partition, results = []) => {
const pv = await mountLvmPhysicalVolume(devicePath, partition)
$defer(pv.unmount)
const lvs = await pvs(['lv_name', 'lv_path', 'lv_size', 'vg_name'], pv.path)
const partitionId = partition !== undefined ? partition.id : ''
lvs.forEach((lv, i) => {
const name = lv.lv_name
if (name !== '') {
results.push({
id: `${partitionId}/${lv.vg_name}/${name}`,
name,
size: lv.lv_size,
})
}
})
return results
}
)
async function mountLvmPhysicalVolume (devicePath, partition) {
const args = []
if (partition !== undefined) {
args.push('-o', partition.start * 512)
}
args.push('--show', '-f', devicePath)
const path = (await execa.stdout('losetup', args)).trim()
await execa('pvscan', ['--cache', path])
return {
path,
unmount: async () => {
try {
const vgNames = await pvs('vg_name', path)
await execa('vgchange', ['-an', ...vgNames])
} finally {
await execa('losetup', ['-d', path])
}
},
}
}
const mountPartition = defer(async ($defer, devicePath, partition) => {
const options = ['loop', 'ro']
if (partition !== undefined) {
const { start } = partition
if (start !== undefined) {
options.push(`offset=${start * 512}`)
}
}
const path = await tmpDir()
$defer.onFailure(rmdir, path)
const mount = options =>
execa('mount', [
`--options=${options.join(',')}`,
`--source=${devicePath}`,
`--target=${path}`,
])
// `norecovery` option is used for ext3/ext4/xfs, if it fails it might be
// another fs, try without
try {
await mount([...options, 'norecovery'])
} catch (error) {
await mount(options)
}
const unmount = async () => {
await execa('umount', ['--lazy', path])
return rmdir(path)
}
$defer.onFailure(unmount)
return { path, unmount }
})
// - [x] list partitions
// - [x] list files in a partition
// - [x] list files in a bare partition
// - [x] list LVM partitions
//
// - [ ] partitions with unmount debounce
// - [ ] handle directory restore
// - [ ] handle multiple entries restore (both dirs and files)
// - [ ] by default use common path as root
// - [ ] handle LVM partitions on multiple disks
// - [ ] find mounted disks/partitions on start (in case of interruptions)
//
// - [ ] manual mount/unmount (of disk) for advance file restore
// - could it stay mounted during the backup process?
// - [ ] mountDisk (VHD)
// - [ ] unmountDisk (only for manual mount)
// - [ ] getMountedDisks
// - [ ] mountPartition (optional)
// - [ ] getMountedPartitions
// - [ ] unmountPartition
export default class BackupNgFileRestore {
constructor (app) {
this._app = app
this._mounts = { __proto__: null }
}
@defer
async fetchBackupNgPartitionFiles (
$defer,
remoteId,
diskId,
partitionId,
paths
) {
const disk = await this._mountDisk(remoteId, diskId)
$defer.onFailure(disk.unmount)
const partition = await this._mountPartition(disk.path, partitionId)
$defer.onFailure(partition.unmount)
const zip = new ZipFile()
paths.forEach(file => {
zip.addFile(resolveSubpath(partition.path, file), normalize('./' + file))
})
zip.end()
return zip.outputStream.on('end', () =>
partition.unmount().then(disk.unmount)
)
}
@defer
async listBackupNgDiskPartitions ($defer, remoteId, diskId) {
const disk = await this._mountDisk(remoteId, diskId)
$defer(disk.unmount)
return this._listPartitions(disk.path)
}
@defer
async listBackupNgPartitionFiles (
$defer,
remoteId,
diskId,
partitionId,
path
) {
const disk = await this._mountDisk(remoteId, diskId)
$defer(disk.unmount)
const partition = await this._mountPartition(disk.path, partitionId)
$defer(partition.unmount)
path = resolveSubpath(partition.path, path)
const entriesMap = {}
await Promise.all(
(await readdir(path)).map(async name => {
try {
const stats = await stat(`${path}/${name}`)
entriesMap[stats.isDirectory() ? `${name}/` : name] = {}
} catch (error) {
if (error == null || error.code !== 'ENOENT') {
throw error
}
}
})
)
return entriesMap
}
async _findPartition (devicePath, partitionId) {
const partitions = await this._listPartitions(devicePath, false)
const partition = partitions.find(_ => _.id === partitionId)
if (partition === undefined) {
throw new Error(`partition ${partitionId} not found`)
}
return partition
}
async _listPartitions (devicePath, inspectLvmPv = true) {
const stdout = await execa.stdout('partx', [
'--bytes',
'--output=NR,START,SIZE,NAME,UUID,TYPE',
'--pairs',
devicePath,
])
const promises = []
const partitions = []
splitLines(stdout).forEach(line => {
const partition = parsePartxLine(line)
let { type } = partition
if (type == null || (type = +type) in IGNORED_PARTITION_TYPES) {
return
}
if (inspectLvmPv && type === 0x8e) {
promises.push(listLvmLogicalVolumes(devicePath, partition, partitions))
return
}
partitions.push(partition)
})
await Promise.all(promises)
return partitions
}
@defer
async _mountDisk ($defer, remoteId, diskId) {
const handler = await this._app.getRemoteHandler(remoteId)
if (handler._getFilePath === undefined) {
throw new Error(`this remote is not supported`)
}
const diskPath = handler._getFilePath(diskId)
const mountDir = await tmpDir()
$defer.onFailure(rmdir, mountDir)
await execa('vhdimount', [diskPath, mountDir])
const unmount = async () => {
await execa('fusermount', ['-uz', mountDir])
return rmdir(mountDir)
}
$defer.onFailure(unmount)
let max = 0
let maxEntry
const entries = await readdir(mountDir)
entries.forEach(entry => {
const matches = RE_VHDI.exec(entry)
if (matches !== null) {
const value = +matches[1]
if (value > max) {
max = value
maxEntry = entry
}
}
})
if (max === 0) {
throw new Error('no disks found')
}
return {
path: `${mountDir}/${maxEntry}`,
unmount,
}
}
@defer
async _mountPartition ($defer, devicePath, partitionId) {
if (partitionId === undefined) {
return mountPartition(devicePath)
}
if (partitionId.includes('/')) {
const [pvId, vgName, lvName] = partitionId.split('/')
const lvmPartition =
pvId !== '' ? await this._findPartition(devicePath, pvId) : undefined
const pv = await mountLvmPhysicalVolume(devicePath, lvmPartition)
const unmountQueue = [pv.unmount]
const unmount = async () => {
let fn
while ((fn = unmountQueue.pop()) !== undefined) {
await fn()
}
}
$defer.onFailure(unmount)
await execa('vgchange', ['-ay', vgName])
unmountQueue.push(() => execa('vgchange', ['-an', vgName]))
const partition = await mountPartition(
(await lvs(['lv_name', 'lv_path'], vgName)).find(
_ => _.lv_name === lvName
).lv_path
)
unmountQueue.push(partition.unmount)
return { ...partition, unmount }
}
return mountPartition(
devicePath,
await this._findPartition(devicePath, partitionId)
)
}
}

Some files were not shown because too many files have changed in this diff Show More